]> git.ipfire.org Git - people/arne_f/ipfire-3.x.git/blame - kernel/patches/grsecurity-2.9-3.3.6-201205151707.patch
kernel: Update to 3.3.6.
[people/arne_f/ipfire-3.x.git] / kernel / patches / grsecurity-2.9-3.3.6-201205151707.patch
CommitLineData
fe2de317 1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
5e856224 2index 0c083c5..bf13011 100644
fe2de317
MT
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
4c928ab7
MT
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
fe2de317
MT
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
4c928ab7 17@@ -14,6 +16,7 @@
fe2de317
MT
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
4c928ab7 25@@ -48,9 +51,11 @@
fe2de317
MT
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
5e856224 37@@ -69,6 +74,7 @@ Image
fe2de317
MT
38 Module.markers
39 Module.symvers
40 PENDING
41+PERF*
42 SCCS
43 System.map*
44 TAGS
5e856224 45@@ -92,19 +98,24 @@ bounds.h
fe2de317
MT
46 bsetup
47 btfixupprep
48 build
49+builtin-policy.h
50 bvmlinux
51 bzImage*
52 capability_names.h
53 capflags.c
54 classlist.h*
55+clut_vga16.c
56+common-cmds.h
57 comp*.log
58 compile.h*
59 conf
60 config
61 config-*
62 config_data.h*
63+config.c
64 config.mak
65 config.mak.autogen
66+config.tmp
67 conmakehash
68 consolemap_deftbl.c*
69 cpustr.h
5e856224 70@@ -115,9 +126,11 @@ devlist.h*
4c928ab7
MT
71 dnotify_test
72 docproc
73 dslm
74+dtc-lexer.lex.c
fe2de317
MT
75 elf2ecoff
76 elfconfig.h*
77 evergreen_reg_safe.h
78+exception_policy.conf
79 fixdep
80 flask.h
81 fore200e_mkfirm
5e856224 82@@ -125,12 +138,15 @@ fore200e_pca_fw.c*
fe2de317
MT
83 gconf
84 gconf.glade.h
85 gen-devlist
86+gen-kdb_cmds.c
87 gen_crc32table
88 gen_init_cpio
89 generated
90 genheaders
91 genksyms
92 *_gray256.c
93+hash
94+hid-example
95 hpet_example
96 hugepage-mmap
97 hugepage-shm
5e856224 98@@ -145,7 +161,7 @@ int32.c
fe2de317
MT
99 int4.c
100 int8.c
101 kallsyms
102-kconfig
103+kern_constants.h
104 keywords.c
105 ksym.c*
106 ksym.h*
5e856224 107@@ -153,7 +169,7 @@ kxgettext
fe2de317
MT
108 lkc_defs.h
109 lex.c
110 lex.*.c
111-linux
4c928ab7 112+lib1funcs.S
fe2de317
MT
113 logo_*.c
114 logo_*_clut224.c
115 logo_*_mono.c
5e856224 116@@ -165,14 +181,15 @@ machtypes.h
fe2de317
MT
117 map
118 map_hugetlb
119 maui_boot.h
120-media
121 mconf
122+mdp
123 miboot*
124 mk_elfconfig
125 mkboot
126 mkbugboot
127 mkcpustr
128 mkdep
129+mkpiggy
130 mkprep
131 mkregtable
132 mktables
5e856224 133@@ -208,6 +225,7 @@ r300_reg_safe.h
fe2de317
MT
134 r420_reg_safe.h
135 r600_reg_safe.h
136 recordmcount
137+regdb.c
138 relocs
139 rlim_names.h
140 rn50_reg_safe.h
5e856224 141@@ -218,6 +236,7 @@ setup
fe2de317
MT
142 setup.bin
143 setup.elf
144 sImage
145+slabinfo
146 sm_tbl*
147 split-include
148 syscalltab.h
5e856224 149@@ -228,6 +247,7 @@ tftpboot.img
fe2de317
MT
150 timeconst.h
151 times.h*
152 trix_boot.h
153+user_constants.h
154 utsrelease.h*
155 vdso-syms.lds
156 vdso.lds
5e856224 157@@ -245,7 +265,9 @@ vmlinux
fe2de317
MT
158 vmlinux-*
159 vmlinux.aout
160 vmlinux.bin.all
161+vmlinux.bin.bz2
162 vmlinux.lds
163+vmlinux.relocs
164 vmlinuz
165 voffset.h
166 vsyscall.lds
5e856224 167@@ -253,9 +275,11 @@ vsyscall_32.lds
fe2de317
MT
168 wanxlfw.inc
169 uImage
170 unifdef
171+utsrelease.h
172 wakeup.bin
173 wakeup.elf
174 wakeup.lds
175 zImage*
176 zconf.hash.c
177+zconf.lex.c
178 zoffset.h
179diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
5e856224 180index d99fd9c..8689fef 100644
fe2de317
MT
181--- a/Documentation/kernel-parameters.txt
182+++ b/Documentation/kernel-parameters.txt
5e856224 183@@ -1977,6 +1977,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
fe2de317
MT
184 the specified number of seconds. This is to be used if
185 your oopses keep scrolling off the screen.
186
187+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
188+ virtualization environments that don't cope well with the
189+ expand down segment used by UDEREF on X86-32 or the frequent
190+ page table updates on X86-64.
191+
192+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
193+
194 pcbit= [HW,ISDN]
195
196 pcd. [PARIDE]
197diff --git a/Makefile b/Makefile
5e856224 198index 9cd6941..92e68ff 100644
fe2de317
MT
199--- a/Makefile
200+++ b/Makefile
201@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
202
203 HOSTCC = gcc
204 HOSTCXX = g++
205-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
206-HOSTCXXFLAGS = -O2
207+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
208+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
209+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
210
211 # Decide whether to build built-in, modular, or both.
212 # Normally, just do built-in.
213@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
214 # Rules shared between *config targets and build targets
215
216 # Basic helpers built in scripts/
217-PHONY += scripts_basic
218-scripts_basic:
219+PHONY += scripts_basic gcc-plugins
220+scripts_basic: gcc-plugins
221 $(Q)$(MAKE) $(build)=scripts/basic
222 $(Q)rm -f .tmp_quiet_recordmcount
223
5e856224 224@@ -564,6 +565,55 @@ else
fe2de317
MT
225 KBUILD_CFLAGS += -O2
226 endif
227
228+ifndef DISABLE_PAX_PLUGINS
229+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
230+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
5e856224 231+ifndef CONFIG_UML
4c928ab7 232+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
fe2de317 233+endif
5e856224 234+endif
fe2de317 235+ifdef CONFIG_PAX_MEMORY_STACKLEAK
4c928ab7
MT
236+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
237+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
fe2de317
MT
238+endif
239+ifdef CONFIG_KALLOCSTAT_PLUGIN
4c928ab7 240+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
fe2de317
MT
241+endif
242+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
4c928ab7
MT
243+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
244+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
245+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
fe2de317
MT
246+endif
247+ifdef CONFIG_CHECKER_PLUGIN
248+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
4c928ab7
MT
249+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
250+endif
fe2de317 251+endif
4c928ab7
MT
252+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
253+ifdef CONFIG_PAX_SIZE_OVERFLOW
5e856224 254+SIZE_OVERFLOW_PLUGIN := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
fe2de317 255+endif
4c928ab7 256+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
5e856224 257+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) $(SIZE_OVERFLOW_PLUGIN)
4c928ab7
MT
258+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
259+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN SIZE_OVERFLOW_PLUGIN
260+ifeq ($(KBUILD_EXTMOD),)
fe2de317
MT
261+gcc-plugins:
262+ $(Q)$(MAKE) $(build)=tools/gcc
263+else
4c928ab7
MT
264+gcc-plugins: ;
265+endif
266+else
fe2de317
MT
267+gcc-plugins:
268+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
269+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
270+else
271+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
272+endif
273+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
274+endif
275+endif
276+
277 include $(srctree)/arch/$(SRCARCH)/Makefile
278
279 ifneq ($(CONFIG_FRAME_WARN),0)
5e856224 280@@ -708,7 +758,7 @@ export mod_strip_cmd
fe2de317
MT
281
282
283 ifeq ($(KBUILD_EXTMOD),)
284-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
285+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
286
287 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
288 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
5e856224 289@@ -932,6 +982,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
fe2de317
MT
290
291 # The actual objects are generated when descending,
292 # make sure no implicit rule kicks in
4c928ab7
MT
293+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
294+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
fe2de317
MT
295 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
296
297 # Handle descending into subdirectories listed in $(vmlinux-dirs)
5e856224 298@@ -941,7 +993,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
fe2de317
MT
299 # Error messages still appears in the original language
300
301 PHONY += $(vmlinux-dirs)
302-$(vmlinux-dirs): prepare scripts
303+$(vmlinux-dirs): gcc-plugins prepare scripts
304 $(Q)$(MAKE) $(build)=$@
305
306 # Store (new) KERNELRELASE string in include/config/kernel.release
5e856224 307@@ -985,6 +1037,7 @@ prepare0: archprepare FORCE
4c928ab7 308 $(Q)$(MAKE) $(build)=.
fe2de317
MT
309
310 # All the preparing..
4c928ab7 311+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
fe2de317
MT
312 prepare: prepare0
313
314 # Generate some files
5e856224 315@@ -1089,6 +1142,8 @@ all: modules
fe2de317
MT
316 # using awk while concatenating to the final file.
317
318 PHONY += modules
4c928ab7
MT
319+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
320+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
fe2de317
MT
321 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
322 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
323 @$(kecho) ' Building modules, stage 2.';
5e856224 324@@ -1104,7 +1159,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
fe2de317
MT
325
326 # Target to prepare building external modules
327 PHONY += modules_prepare
328-modules_prepare: prepare scripts
329+modules_prepare: gcc-plugins prepare scripts
330
331 # Target to install modules
332 PHONY += modules_install
5e856224 333@@ -1201,6 +1256,7 @@ distclean: mrproper
fe2de317
MT
334 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
335 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
4c928ab7
MT
336 -o -name '.*.rej' \
337+ -o -name '.*.rej' -o -name '*.so' \
fe2de317
MT
338 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
339 -type f -print | xargs rm -f
340
5e856224 341@@ -1361,6 +1417,8 @@ PHONY += $(module-dirs) modules
fe2de317
MT
342 $(module-dirs): crmodverdir $(objtree)/Module.symvers
343 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
344
4c928ab7
MT
345+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
346+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
fe2de317
MT
347 modules: $(module-dirs)
348 @$(kecho) ' Building modules, stage 2.';
349 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
5e856224 350@@ -1487,17 +1545,21 @@ else
fe2de317
MT
351 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
352 endif
353
354-%.s: %.c prepare scripts FORCE
4c928ab7
MT
355+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
356+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
fe2de317
MT
357+%.s: %.c gcc-plugins prepare scripts FORCE
358 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
359 %.i: %.c prepare scripts FORCE
360 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
361-%.o: %.c prepare scripts FORCE
4c928ab7
MT
362+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
363+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
fe2de317
MT
364+%.o: %.c gcc-plugins prepare scripts FORCE
365 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
366 %.lst: %.c prepare scripts FORCE
367 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
368-%.s: %.S prepare scripts FORCE
369+%.s: %.S gcc-plugins prepare scripts FORCE
370 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
371-%.o: %.S prepare scripts FORCE
372+%.o: %.S gcc-plugins prepare scripts FORCE
373 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
374 %.symtypes: %.c prepare scripts FORCE
375 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
5e856224 376@@ -1507,11 +1569,15 @@ endif
fe2de317
MT
377 $(cmd_crmodverdir)
378 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
379 $(build)=$(build-dir)
380-%/: prepare scripts FORCE
4c928ab7
MT
381+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
382+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
fe2de317
MT
383+%/: gcc-plugins prepare scripts FORCE
384 $(cmd_crmodverdir)
385 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
386 $(build)=$(build-dir)
387-%.ko: prepare scripts FORCE
4c928ab7
MT
388+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
389+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
fe2de317
MT
390+%.ko: gcc-plugins prepare scripts FORCE
391 $(cmd_crmodverdir)
392 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
393 $(build)=$(build-dir) $(@:.ko=.o)
4c928ab7
MT
394diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
395index 640f909..48b6597 100644
396--- a/arch/alpha/include/asm/atomic.h
397+++ b/arch/alpha/include/asm/atomic.h
398@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
399 #define atomic_dec(v) atomic_sub(1,(v))
400 #define atomic64_dec(v) atomic64_sub(1,(v))
401
402+#define atomic64_read_unchecked(v) atomic64_read(v)
403+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
404+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
405+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
406+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
407+#define atomic64_inc_unchecked(v) atomic64_inc(v)
408+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
409+#define atomic64_dec_unchecked(v) atomic64_dec(v)
410+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
411+
412 #define smp_mb__before_atomic_dec() smp_mb()
413 #define smp_mb__after_atomic_dec() smp_mb()
414 #define smp_mb__before_atomic_inc() smp_mb()
415diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
416index ad368a9..fbe0f25 100644
417--- a/arch/alpha/include/asm/cache.h
418+++ b/arch/alpha/include/asm/cache.h
419@@ -4,19 +4,19 @@
420 #ifndef __ARCH_ALPHA_CACHE_H
421 #define __ARCH_ALPHA_CACHE_H
422
423+#include <linux/const.h>
424
425 /* Bytes per L1 (data) cache line. */
426 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
427-# define L1_CACHE_BYTES 64
428 # define L1_CACHE_SHIFT 6
429 #else
430 /* Both EV4 and EV5 are write-through, read-allocate,
431 direct-mapped, physical.
432 */
433-# define L1_CACHE_BYTES 32
434 # define L1_CACHE_SHIFT 5
435 #endif
436
437+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
438 #define SMP_CACHE_BYTES L1_CACHE_BYTES
439
440 #endif
fe2de317
MT
441diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
442index da5449e..7418343 100644
443--- a/arch/alpha/include/asm/elf.h
444+++ b/arch/alpha/include/asm/elf.h
445@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
58c5fc13
MT
446
447 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
448
449+#ifdef CONFIG_PAX_ASLR
450+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
451+
452+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
453+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
454+#endif
455+
456 /* $0 is set by ld.so to a pointer to a function which might be
457 registered using atexit. This provides a mean for the dynamic
458 linker to call DT_FINI functions for shared libraries that have
5e856224
MT
459diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
460index bc2a0da..8ad11ee 100644
461--- a/arch/alpha/include/asm/pgalloc.h
462+++ b/arch/alpha/include/asm/pgalloc.h
463@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
464 pgd_set(pgd, pmd);
465 }
466
467+static inline void
468+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
469+{
470+ pgd_populate(mm, pgd, pmd);
471+}
472+
473 extern pgd_t *pgd_alloc(struct mm_struct *mm);
474
475 static inline void
fe2de317
MT
476diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
477index de98a73..bd4f1f8 100644
478--- a/arch/alpha/include/asm/pgtable.h
479+++ b/arch/alpha/include/asm/pgtable.h
58c5fc13
MT
480@@ -101,6 +101,17 @@ struct vm_area_struct;
481 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
482 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
483 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
484+
485+#ifdef CONFIG_PAX_PAGEEXEC
486+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
487+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
488+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
489+#else
490+# define PAGE_SHARED_NOEXEC PAGE_SHARED
491+# define PAGE_COPY_NOEXEC PAGE_COPY
492+# define PAGE_READONLY_NOEXEC PAGE_READONLY
493+#endif
494+
495 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
496
497 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
fe2de317
MT
498diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
499index 2fd00b7..cfd5069 100644
500--- a/arch/alpha/kernel/module.c
501+++ b/arch/alpha/kernel/module.c
502@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
58c5fc13
MT
503
504 /* The small sections were sorted to the end of the segment.
505 The following should definitely cover them. */
506- gp = (u64)me->module_core + me->core_size - 0x8000;
507+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
508 got = sechdrs[me->arch.gotsecindex].sh_addr;
509
510 for (i = 0; i < n; i++) {
fe2de317
MT
511diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
512index 01e8715..be0e80f 100644
513--- a/arch/alpha/kernel/osf_sys.c
514+++ b/arch/alpha/kernel/osf_sys.c
515@@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
57199397
MT
516 /* At this point: (!vma || addr < vma->vm_end). */
517 if (limit - len < addr)
518 return -ENOMEM;
519- if (!vma || addr + len <= vma->vm_start)
520+ if (check_heap_stack_gap(vma, addr, len))
521 return addr;
522 addr = vma->vm_end;
523 vma = vma->vm_next;
fe2de317 524@@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
58c5fc13
MT
525 merely specific addresses, but regions of memory -- perhaps
526 this feature should be incorporated into all ports? */
527
528+#ifdef CONFIG_PAX_RANDMMAP
529+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
530+#endif
531+
532 if (addr) {
533 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
534 if (addr != (unsigned long) -ENOMEM)
fe2de317 535@@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
58c5fc13
MT
536 }
537
538 /* Next, try allocating at TASK_UNMAPPED_BASE. */
539- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
540- len, limit);
541+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
542+
543 if (addr != (unsigned long) -ENOMEM)
544 return addr;
545
fe2de317
MT
546diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
547index fadd5f8..904e73a 100644
548--- a/arch/alpha/mm/fault.c
549+++ b/arch/alpha/mm/fault.c
550@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
58c5fc13
MT
551 __reload_thread(pcb);
552 }
553
554+#ifdef CONFIG_PAX_PAGEEXEC
555+/*
556+ * PaX: decide what to do with offenders (regs->pc = fault address)
557+ *
558+ * returns 1 when task should be killed
559+ * 2 when patched PLT trampoline was detected
560+ * 3 when unpatched PLT trampoline was detected
561+ */
562+static int pax_handle_fetch_fault(struct pt_regs *regs)
563+{
564+
565+#ifdef CONFIG_PAX_EMUPLT
566+ int err;
567+
568+ do { /* PaX: patched PLT emulation #1 */
569+ unsigned int ldah, ldq, jmp;
570+
571+ err = get_user(ldah, (unsigned int *)regs->pc);
572+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
573+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
574+
575+ if (err)
576+ break;
577+
578+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
579+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
580+ jmp == 0x6BFB0000U)
581+ {
582+ unsigned long r27, addr;
583+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
584+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
585+
586+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
587+ err = get_user(r27, (unsigned long *)addr);
588+ if (err)
589+ break;
590+
591+ regs->r27 = r27;
592+ regs->pc = r27;
593+ return 2;
594+ }
595+ } while (0);
596+
597+ do { /* PaX: patched PLT emulation #2 */
598+ unsigned int ldah, lda, br;
599+
600+ err = get_user(ldah, (unsigned int *)regs->pc);
601+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
602+ err |= get_user(br, (unsigned int *)(regs->pc+8));
603+
604+ if (err)
605+ break;
606+
607+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
608+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
609+ (br & 0xFFE00000U) == 0xC3E00000U)
610+ {
611+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
612+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
613+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
614+
615+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
616+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
617+ return 2;
618+ }
619+ } while (0);
620+
621+ do { /* PaX: unpatched PLT emulation */
622+ unsigned int br;
623+
624+ err = get_user(br, (unsigned int *)regs->pc);
625+
626+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
627+ unsigned int br2, ldq, nop, jmp;
628+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
629+
630+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
631+ err = get_user(br2, (unsigned int *)addr);
632+ err |= get_user(ldq, (unsigned int *)(addr+4));
633+ err |= get_user(nop, (unsigned int *)(addr+8));
634+ err |= get_user(jmp, (unsigned int *)(addr+12));
635+ err |= get_user(resolver, (unsigned long *)(addr+16));
636+
637+ if (err)
638+ break;
639+
640+ if (br2 == 0xC3600000U &&
641+ ldq == 0xA77B000CU &&
642+ nop == 0x47FF041FU &&
643+ jmp == 0x6B7B0000U)
644+ {
645+ regs->r28 = regs->pc+4;
646+ regs->r27 = addr+16;
647+ regs->pc = resolver;
648+ return 3;
649+ }
650+ }
651+ } while (0);
652+#endif
653+
654+ return 1;
655+}
656+
6e9df6a3 657+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
658+{
659+ unsigned long i;
660+
661+ printk(KERN_ERR "PAX: bytes at PC: ");
662+ for (i = 0; i < 5; i++) {
663+ unsigned int c;
664+ if (get_user(c, (unsigned int *)pc+i))
665+ printk(KERN_CONT "???????? ");
666+ else
667+ printk(KERN_CONT "%08x ", c);
668+ }
669+ printk("\n");
670+}
671+#endif
672
673 /*
674 * This routine handles page faults. It determines the address,
fe2de317 675@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
58c5fc13
MT
676 good_area:
677 si_code = SEGV_ACCERR;
678 if (cause < 0) {
679- if (!(vma->vm_flags & VM_EXEC))
680+ if (!(vma->vm_flags & VM_EXEC)) {
681+
682+#ifdef CONFIG_PAX_PAGEEXEC
683+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
684+ goto bad_area;
685+
686+ up_read(&mm->mmap_sem);
687+ switch (pax_handle_fetch_fault(regs)) {
688+
689+#ifdef CONFIG_PAX_EMUPLT
690+ case 2:
691+ case 3:
692+ return;
693+#endif
694+
695+ }
696+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
697+ do_group_exit(SIGKILL);
698+#else
699 goto bad_area;
700+#endif
701+
702+ }
703 } else if (!cause) {
704 /* Allow reads even for write-only mappings */
705 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
fe2de317 706diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
5e856224 707index 86976d0..8e07f84 100644
fe2de317
MT
708--- a/arch/arm/include/asm/atomic.h
709+++ b/arch/arm/include/asm/atomic.h
4c928ab7
MT
710@@ -15,6 +15,10 @@
711 #include <linux/types.h>
712 #include <asm/system.h>
713
714+#ifdef CONFIG_GENERIC_ATOMIC64
715+#include <asm-generic/atomic64.h>
716+#endif
717+
718 #define ATOMIC_INIT(i) { (i) }
719
720 #ifdef __KERNEL__
721@@ -25,7 +29,15 @@
722 * atomic_set() is the clrex or dummy strex done on every exception return.
723 */
724 #define atomic_read(v) (*(volatile int *)&(v)->counter)
725+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
726+{
727+ return v->counter;
728+}
729 #define atomic_set(v,i) (((v)->counter) = (i))
730+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
731+{
732+ v->counter = i;
733+}
734
735 #if __LINUX_ARM_ARCH__ >= 6
736
737@@ -40,6 +52,35 @@ static inline void atomic_add(int i, atomic_t *v)
738 int result;
739
740 __asm__ __volatile__("@ atomic_add\n"
741+"1: ldrex %1, [%3]\n"
742+" adds %0, %1, %4\n"
743+
744+#ifdef CONFIG_PAX_REFCOUNT
745+" bvc 3f\n"
746+"2: bkpt 0xf103\n"
747+"3:\n"
748+#endif
749+
750+" strex %1, %0, [%3]\n"
751+" teq %1, #0\n"
752+" bne 1b"
753+
754+#ifdef CONFIG_PAX_REFCOUNT
755+"\n4:\n"
756+ _ASM_EXTABLE(2b, 4b)
757+#endif
758+
759+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
760+ : "r" (&v->counter), "Ir" (i)
761+ : "cc");
762+}
763+
764+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
765+{
766+ unsigned long tmp;
767+ int result;
768+
769+ __asm__ __volatile__("@ atomic_add_unchecked\n"
770 "1: ldrex %0, [%3]\n"
771 " add %0, %0, %4\n"
772 " strex %1, %0, [%3]\n"
773@@ -58,6 +99,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
774 smp_mb();
775
776 __asm__ __volatile__("@ atomic_add_return\n"
777+"1: ldrex %1, [%3]\n"
778+" adds %0, %1, %4\n"
779+
780+#ifdef CONFIG_PAX_REFCOUNT
781+" bvc 3f\n"
782+" mov %0, %1\n"
783+"2: bkpt 0xf103\n"
784+"3:\n"
785+#endif
786+
787+" strex %1, %0, [%3]\n"
788+" teq %1, #0\n"
789+" bne 1b"
790+
791+#ifdef CONFIG_PAX_REFCOUNT
792+"\n4:\n"
793+ _ASM_EXTABLE(2b, 4b)
794+#endif
795+
796+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
797+ : "r" (&v->counter), "Ir" (i)
798+ : "cc");
799+
800+ smp_mb();
801+
802+ return result;
803+}
804+
805+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
806+{
807+ unsigned long tmp;
808+ int result;
809+
810+ smp_mb();
811+
812+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
813 "1: ldrex %0, [%3]\n"
814 " add %0, %0, %4\n"
815 " strex %1, %0, [%3]\n"
816@@ -78,6 +155,35 @@ static inline void atomic_sub(int i, atomic_t *v)
817 int result;
818
819 __asm__ __volatile__("@ atomic_sub\n"
820+"1: ldrex %1, [%3]\n"
821+" subs %0, %1, %4\n"
822+
823+#ifdef CONFIG_PAX_REFCOUNT
824+" bvc 3f\n"
825+"2: bkpt 0xf103\n"
826+"3:\n"
827+#endif
828+
829+" strex %1, %0, [%3]\n"
830+" teq %1, #0\n"
831+" bne 1b"
832+
833+#ifdef CONFIG_PAX_REFCOUNT
834+"\n4:\n"
835+ _ASM_EXTABLE(2b, 4b)
836+#endif
837+
838+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
839+ : "r" (&v->counter), "Ir" (i)
840+ : "cc");
841+}
842+
843+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
844+{
845+ unsigned long tmp;
846+ int result;
847+
848+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
849 "1: ldrex %0, [%3]\n"
850 " sub %0, %0, %4\n"
851 " strex %1, %0, [%3]\n"
852@@ -96,11 +202,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
853 smp_mb();
854
855 __asm__ __volatile__("@ atomic_sub_return\n"
856-"1: ldrex %0, [%3]\n"
857-" sub %0, %0, %4\n"
858+"1: ldrex %1, [%3]\n"
859+" sub %0, %1, %4\n"
860+
861+#ifdef CONFIG_PAX_REFCOUNT
862+" bvc 3f\n"
863+" mov %0, %1\n"
864+"2: bkpt 0xf103\n"
865+"3:\n"
866+#endif
867+
868 " strex %1, %0, [%3]\n"
869 " teq %1, #0\n"
870 " bne 1b"
871+
872+#ifdef CONFIG_PAX_REFCOUNT
873+"\n4:\n"
874+ _ASM_EXTABLE(2b, 4b)
875+#endif
876+
877 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
878 : "r" (&v->counter), "Ir" (i)
879 : "cc");
880@@ -132,6 +252,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
881 return oldval;
882 }
883
884+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
885+{
886+ unsigned long oldval, res;
887+
888+ smp_mb();
889+
890+ do {
891+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
892+ "ldrex %1, [%3]\n"
893+ "mov %0, #0\n"
894+ "teq %1, %4\n"
895+ "strexeq %0, %5, [%3]\n"
896+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
897+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
898+ : "cc");
899+ } while (res);
900+
901+ smp_mb();
902+
903+ return oldval;
904+}
905+
906 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
907 {
908 unsigned long tmp, tmp2;
5e856224
MT
909@@ -165,7 +307,9 @@ static inline int atomic_add_return(int i, atomic_t *v)
910
911 return val;
912 }
913+#define atomic_add_return_unchecked(i, v) atomic_add_return(i, v)
914 #define atomic_add(i, v) (void) atomic_add_return(i, v)
915+#define atomic_add_unchecked(i, v) (void) atomic_add_return_unchecked(i, v)
916
917 static inline int atomic_sub_return(int i, atomic_t *v)
918 {
919@@ -179,7 +323,9 @@ static inline int atomic_sub_return(int i, atomic_t *v)
920
921 return val;
922 }
923+#define atomic_sub_return_unchecked(i, v) atomic_sub_return(i, v)
924 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
925+#define atomic_sub_unchecked(i, v) (void) atomic_sub_return_unchecked(i, v)
926
927 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
928 {
929@@ -194,6 +340,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
930
931 return ret;
932 }
933+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg(v, o, n)
934
935 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
936 {
937@@ -207,6 +354,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
4c928ab7
MT
938 #endif /* __LINUX_ARM_ARCH__ */
939
940 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
941+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
942+{
943+ return xchg(&v->counter, new);
944+}
945
946 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
947 {
5e856224 948@@ -219,11 +370,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
4c928ab7
MT
949 }
950
951 #define atomic_inc(v) atomic_add(1, v)
952+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
953+{
954+ atomic_add_unchecked(1, v);
955+}
956 #define atomic_dec(v) atomic_sub(1, v)
957+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
958+{
959+ atomic_sub_unchecked(1, v);
960+}
961
962 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
963+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
964+{
965+ return atomic_add_return_unchecked(1, v) == 0;
966+}
967 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
968 #define atomic_inc_return(v) (atomic_add_return(1, v))
969+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
970+{
971+ return atomic_add_return_unchecked(1, v);
972+}
973 #define atomic_dec_return(v) (atomic_sub_return(1, v))
974 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
975
5e856224 976@@ -239,6 +406,14 @@ typedef struct {
fe2de317
MT
977 u64 __aligned(8) counter;
978 } atomic64_t;
979
980+#ifdef CONFIG_PAX_REFCOUNT
981+typedef struct {
982+ u64 __aligned(8) counter;
983+} atomic64_unchecked_t;
984+#else
985+typedef atomic64_t atomic64_unchecked_t;
986+#endif
987+
988 #define ATOMIC64_INIT(i) { (i) }
989
990 static inline u64 atomic64_read(atomic64_t *v)
5e856224 991@@ -254,6 +429,19 @@ static inline u64 atomic64_read(atomic64_t *v)
4c928ab7
MT
992 return result;
993 }
994
995+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
996+{
997+ u64 result;
998+
999+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1000+" ldrexd %0, %H0, [%1]"
1001+ : "=&r" (result)
1002+ : "r" (&v->counter), "Qo" (v->counter)
1003+ );
1004+
1005+ return result;
1006+}
1007+
1008 static inline void atomic64_set(atomic64_t *v, u64 i)
1009 {
1010 u64 tmp;
5e856224 1011@@ -268,6 +456,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
4c928ab7
MT
1012 : "cc");
1013 }
1014
1015+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1016+{
1017+ u64 tmp;
1018+
1019+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1020+"1: ldrexd %0, %H0, [%2]\n"
1021+" strexd %0, %3, %H3, [%2]\n"
1022+" teq %0, #0\n"
1023+" bne 1b"
1024+ : "=&r" (tmp), "=Qo" (v->counter)
1025+ : "r" (&v->counter), "r" (i)
1026+ : "cc");
1027+}
1028+
1029 static inline void atomic64_add(u64 i, atomic64_t *v)
1030 {
1031 u64 result;
5e856224 1032@@ -276,6 +478,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
4c928ab7
MT
1033 __asm__ __volatile__("@ atomic64_add\n"
1034 "1: ldrexd %0, %H0, [%3]\n"
1035 " adds %0, %0, %4\n"
1036+" adcs %H0, %H0, %H4\n"
1037+
1038+#ifdef CONFIG_PAX_REFCOUNT
1039+" bvc 3f\n"
1040+"2: bkpt 0xf103\n"
1041+"3:\n"
1042+#endif
1043+
1044+" strexd %1, %0, %H0, [%3]\n"
1045+" teq %1, #0\n"
1046+" bne 1b"
1047+
1048+#ifdef CONFIG_PAX_REFCOUNT
1049+"\n4:\n"
1050+ _ASM_EXTABLE(2b, 4b)
1051+#endif
1052+
1053+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1054+ : "r" (&v->counter), "r" (i)
1055+ : "cc");
1056+}
1057+
1058+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1059+{
1060+ u64 result;
1061+ unsigned long tmp;
1062+
1063+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1064+"1: ldrexd %0, %H0, [%3]\n"
1065+" adds %0, %0, %4\n"
1066 " adc %H0, %H0, %H4\n"
1067 " strexd %1, %0, %H0, [%3]\n"
1068 " teq %1, #0\n"
5e856224 1069@@ -287,12 +519,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
4c928ab7
MT
1070
1071 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1072 {
1073- u64 result;
1074- unsigned long tmp;
1075+ u64 result, tmp;
1076
1077 smp_mb();
1078
1079 __asm__ __volatile__("@ atomic64_add_return\n"
1080+"1: ldrexd %1, %H1, [%3]\n"
1081+" adds %0, %1, %4\n"
1082+" adcs %H0, %H1, %H4\n"
1083+
1084+#ifdef CONFIG_PAX_REFCOUNT
1085+" bvc 3f\n"
1086+" mov %0, %1\n"
1087+" mov %H0, %H1\n"
1088+"2: bkpt 0xf103\n"
1089+"3:\n"
1090+#endif
1091+
1092+" strexd %1, %0, %H0, [%3]\n"
1093+" teq %1, #0\n"
1094+" bne 1b"
1095+
1096+#ifdef CONFIG_PAX_REFCOUNT
1097+"\n4:\n"
1098+ _ASM_EXTABLE(2b, 4b)
1099+#endif
1100+
1101+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1102+ : "r" (&v->counter), "r" (i)
1103+ : "cc");
1104+
1105+ smp_mb();
1106+
1107+ return result;
1108+}
1109+
1110+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1111+{
1112+ u64 result;
1113+ unsigned long tmp;
1114+
1115+ smp_mb();
1116+
1117+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1118 "1: ldrexd %0, %H0, [%3]\n"
1119 " adds %0, %0, %4\n"
1120 " adc %H0, %H0, %H4\n"
5e856224 1121@@ -316,6 +585,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
4c928ab7
MT
1122 __asm__ __volatile__("@ atomic64_sub\n"
1123 "1: ldrexd %0, %H0, [%3]\n"
1124 " subs %0, %0, %4\n"
1125+" sbcs %H0, %H0, %H4\n"
1126+
1127+#ifdef CONFIG_PAX_REFCOUNT
1128+" bvc 3f\n"
1129+"2: bkpt 0xf103\n"
1130+"3:\n"
1131+#endif
1132+
1133+" strexd %1, %0, %H0, [%3]\n"
1134+" teq %1, #0\n"
1135+" bne 1b"
1136+
1137+#ifdef CONFIG_PAX_REFCOUNT
1138+"\n4:\n"
1139+ _ASM_EXTABLE(2b, 4b)
1140+#endif
1141+
1142+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1143+ : "r" (&v->counter), "r" (i)
1144+ : "cc");
1145+}
1146+
1147+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1148+{
1149+ u64 result;
1150+ unsigned long tmp;
1151+
1152+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1153+"1: ldrexd %0, %H0, [%3]\n"
1154+" subs %0, %0, %4\n"
1155 " sbc %H0, %H0, %H4\n"
1156 " strexd %1, %0, %H0, [%3]\n"
1157 " teq %1, #0\n"
5e856224 1158@@ -327,18 +626,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
4c928ab7
MT
1159
1160 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1161 {
1162- u64 result;
1163- unsigned long tmp;
1164+ u64 result, tmp;
1165
1166 smp_mb();
1167
1168 __asm__ __volatile__("@ atomic64_sub_return\n"
1169-"1: ldrexd %0, %H0, [%3]\n"
1170-" subs %0, %0, %4\n"
1171-" sbc %H0, %H0, %H4\n"
1172+"1: ldrexd %1, %H1, [%3]\n"
1173+" subs %0, %1, %4\n"
1174+" sbc %H0, %H1, %H4\n"
1175+
1176+#ifdef CONFIG_PAX_REFCOUNT
1177+" bvc 3f\n"
1178+" mov %0, %1\n"
1179+" mov %H0, %H1\n"
1180+"2: bkpt 0xf103\n"
1181+"3:\n"
1182+#endif
1183+
1184 " strexd %1, %0, %H0, [%3]\n"
1185 " teq %1, #0\n"
1186 " bne 1b"
1187+
1188+#ifdef CONFIG_PAX_REFCOUNT
1189+"\n4:\n"
1190+ _ASM_EXTABLE(2b, 4b)
1191+#endif
1192+
1193 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1194 : "r" (&v->counter), "r" (i)
1195 : "cc");
5e856224 1196@@ -372,6 +685,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
4c928ab7
MT
1197 return oldval;
1198 }
1199
1200+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1201+{
1202+ u64 oldval;
1203+ unsigned long res;
1204+
1205+ smp_mb();
1206+
1207+ do {
1208+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1209+ "ldrexd %1, %H1, [%3]\n"
1210+ "mov %0, #0\n"
1211+ "teq %1, %4\n"
1212+ "teqeq %H1, %H4\n"
1213+ "strexdeq %0, %5, %H5, [%3]"
1214+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1215+ : "r" (&ptr->counter), "r" (old), "r" (new)
1216+ : "cc");
1217+ } while (res);
1218+
1219+ smp_mb();
1220+
1221+ return oldval;
1222+}
1223+
1224 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1225 {
1226 u64 result;
5e856224 1227@@ -395,21 +732,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
4c928ab7
MT
1228
1229 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1230 {
1231- u64 result;
1232- unsigned long tmp;
1233+ u64 result, tmp;
1234
1235 smp_mb();
1236
1237 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1238-"1: ldrexd %0, %H0, [%3]\n"
1239-" subs %0, %0, #1\n"
1240-" sbc %H0, %H0, #0\n"
1241+"1: ldrexd %1, %H1, [%3]\n"
1242+" subs %0, %1, #1\n"
1243+" sbc %H0, %H1, #0\n"
1244+
1245+#ifdef CONFIG_PAX_REFCOUNT
1246+" bvc 3f\n"
1247+" mov %0, %1\n"
1248+" mov %H0, %H1\n"
1249+"2: bkpt 0xf103\n"
1250+"3:\n"
1251+#endif
1252+
1253 " teq %H0, #0\n"
1254-" bmi 2f\n"
1255+" bmi 4f\n"
1256 " strexd %1, %0, %H0, [%3]\n"
1257 " teq %1, #0\n"
1258 " bne 1b\n"
1259-"2:"
1260+"4:\n"
1261+
1262+#ifdef CONFIG_PAX_REFCOUNT
1263+ _ASM_EXTABLE(2b, 4b)
1264+#endif
1265+
1266 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1267 : "r" (&v->counter)
1268 : "cc");
5e856224 1269@@ -432,13 +782,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
4c928ab7
MT
1270 " teq %0, %5\n"
1271 " teqeq %H0, %H5\n"
1272 " moveq %1, #0\n"
1273-" beq 2f\n"
1274+" beq 4f\n"
1275 " adds %0, %0, %6\n"
1276 " adc %H0, %H0, %H6\n"
1277+
1278+#ifdef CONFIG_PAX_REFCOUNT
1279+" bvc 3f\n"
1280+"2: bkpt 0xf103\n"
1281+"3:\n"
1282+#endif
1283+
1284 " strexd %2, %0, %H0, [%4]\n"
1285 " teq %2, #0\n"
1286 " bne 1b\n"
1287-"2:"
1288+"4:\n"
1289+
1290+#ifdef CONFIG_PAX_REFCOUNT
1291+ _ASM_EXTABLE(2b, 4b)
1292+#endif
1293+
1294 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1295 : "r" (&v->counter), "r" (u), "r" (a)
1296 : "cc");
5e856224 1297@@ -451,10 +813,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
4c928ab7
MT
1298
1299 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1300 #define atomic64_inc(v) atomic64_add(1LL, (v))
1301+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1302 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1303+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1304 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1305 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1306 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1307+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1308 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1309 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1310 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1311diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1312index 75fe66b..2255c86 100644
1313--- a/arch/arm/include/asm/cache.h
1314+++ b/arch/arm/include/asm/cache.h
1315@@ -4,8 +4,10 @@
1316 #ifndef __ASMARM_CACHE_H
1317 #define __ASMARM_CACHE_H
1318
1319+#include <linux/const.h>
1320+
1321 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1322-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1323+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1324
1325 /*
1326 * Memory returned by kmalloc() may be used for DMA, so we must make
1327diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1328index d5d8d5c..ad92c96 100644
1329--- a/arch/arm/include/asm/cacheflush.h
1330+++ b/arch/arm/include/asm/cacheflush.h
1331@@ -108,7 +108,7 @@ struct cpu_cache_fns {
1332 void (*dma_unmap_area)(const void *, size_t, int);
1333
1334 void (*dma_flush_range)(const void *, const void *);
1335-};
1336+} __no_const;
1337
1338 /*
1339 * Select the calling method
fe2de317
MT
1340diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1341index 0e9ce8d..6ef1e03 100644
1342--- a/arch/arm/include/asm/elf.h
1343+++ b/arch/arm/include/asm/elf.h
1344@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
58c5fc13
MT
1345 the loader. We need to make sure that it is out of the way of the program
1346 that it will "exec", and that there is sufficient room for the brk. */
1347
1348-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1349+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1350+
1351+#ifdef CONFIG_PAX_ASLR
1352+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1353+
1354+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1355+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1356+#endif
1357
1358 /* When the program starts, a1 contains a pointer to a function to be
1359 registered with atexit, as per the SVR4 ABI. A value of 0 means we
fe2de317 1360@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
bc901d79
MT
1361 extern void elf_set_personality(const struct elf32_hdr *);
1362 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1363
1364-struct mm_struct;
1365-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1366-#define arch_randomize_brk arch_randomize_brk
1367-
1368 extern int vectors_user_mapping(void);
1369 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
1370 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
fe2de317
MT
1371diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1372index e51b1e8..32a3113 100644
1373--- a/arch/arm/include/asm/kmap_types.h
1374+++ b/arch/arm/include/asm/kmap_types.h
57199397 1375@@ -21,6 +21,7 @@ enum km_type {
df50ba0c 1376 KM_L1_CACHE,
58c5fc13 1377 KM_L2_CACHE,
57199397 1378 KM_KDB,
58c5fc13
MT
1379+ KM_CLEARPAGE,
1380 KM_TYPE_NR
1381 };
1382
4c928ab7
MT
1383diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1384index 53426c6..c7baff3 100644
1385--- a/arch/arm/include/asm/outercache.h
1386+++ b/arch/arm/include/asm/outercache.h
1387@@ -35,7 +35,7 @@ struct outer_cache_fns {
1388 #endif
1389 void (*set_debug)(unsigned long);
1390 void (*resume)(void);
1391-};
1392+} __no_const;
1393
1394 #ifdef CONFIG_OUTER_CACHE
1395
1396diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
5e856224 1397index 97b440c..b7ff179 100644
4c928ab7
MT
1398--- a/arch/arm/include/asm/page.h
1399+++ b/arch/arm/include/asm/page.h
1400@@ -123,7 +123,7 @@ struct cpu_user_fns {
1401 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1402 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1403 unsigned long vaddr, struct vm_area_struct *vma);
1404-};
1405+} __no_const;
1406
1407 #ifdef MULTI_USER
1408 extern struct cpu_user_fns cpu_user;
5e856224
MT
1409diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1410index 943504f..bf8d667 100644
1411--- a/arch/arm/include/asm/pgalloc.h
1412+++ b/arch/arm/include/asm/pgalloc.h
1413@@ -43,6 +43,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1414 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1415 }
1416
1417+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1418+{
1419+ pud_populate(mm, pud, pmd);
1420+}
1421+
1422 #else /* !CONFIG_ARM_LPAE */
1423
1424 /*
1425@@ -51,6 +56,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1426 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1427 #define pmd_free(mm, pmd) do { } while (0)
1428 #define pud_populate(mm,pmd,pte) BUG()
1429+#define pud_populate_kernel(mm,pmd,pte) BUG()
1430
1431 #endif /* CONFIG_ARM_LPAE */
1432
4c928ab7 1433diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
5e856224 1434index e4c96cc..1145653 100644
4c928ab7
MT
1435--- a/arch/arm/include/asm/system.h
1436+++ b/arch/arm/include/asm/system.h
5e856224 1437@@ -98,6 +98,8 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
4c928ab7
MT
1438
1439 #define xchg(ptr,x) \
1440 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1441+#define xchg_unchecked(ptr,x) \
1442+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1443
1444 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
1445
5e856224 1446@@ -534,6 +536,13 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
4c928ab7
MT
1447
1448 #endif /* __LINUX_ARM_ARCH__ >= 6 */
1449
1450+#define _ASM_EXTABLE(from, to) \
1451+" .pushsection __ex_table,\"a\"\n"\
1452+" .align 3\n" \
1453+" .long " #from ", " #to"\n" \
1454+" .popsection"
1455+
1456+
1457 #endif /* __ASSEMBLY__ */
1458
1459 #define arch_align_stack(x) (x)
5e856224
MT
1460diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
1461index d4c24d4..4ac53e8 100644
1462--- a/arch/arm/include/asm/thread_info.h
1463+++ b/arch/arm/include/asm/thread_info.h
1464@@ -141,6 +141,12 @@ extern void vfp_flush_hwstate(struct thread_info *);
1465 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
1466 #define TIF_SYSCALL_TRACE 8
1467 #define TIF_SYSCALL_AUDIT 9
1468+
1469+/* within 8 bits of TIF_SYSCALL_TRACE
1470+ to meet flexible second operand requirements
1471+*/
1472+#define TIF_GRSEC_SETXID 10
1473+
1474 #define TIF_POLLING_NRFLAG 16
1475 #define TIF_USING_IWMMXT 17
1476 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
1477@@ -156,9 +162,11 @@ extern void vfp_flush_hwstate(struct thread_info *);
1478 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
1479 #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
1480 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
1481+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
1482
1483 /* Checks for any syscall work in entry-common.S */
1484-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
1485+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
1486+ _TIF_GRSEC_SETXID)
1487
1488 /*
1489 * Change these and you break ASM code in entry-common.S
fe2de317 1490diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
5e856224 1491index 2958976..12ccac4 100644
fe2de317
MT
1492--- a/arch/arm/include/asm/uaccess.h
1493+++ b/arch/arm/include/asm/uaccess.h
15a11c5b
MT
1494@@ -22,6 +22,8 @@
1495 #define VERIFY_READ 0
1496 #define VERIFY_WRITE 1
66a7e928 1497
15a11c5b
MT
1498+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1499+
1500 /*
1501 * The exception table consists of pairs of addresses: the first is the
1502 * address of an instruction that is allowed to fault, and the second is
1503@@ -387,8 +389,23 @@ do { \
66a7e928 1504
66a7e928 1505
15a11c5b
MT
1506 #ifdef CONFIG_MMU
1507-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1508-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1509+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1510+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1511+
1512+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1513+{
1514+ if (!__builtin_constant_p(n))
1515+ check_object_size(to, n, false);
1516+ return ___copy_from_user(to, from, n);
1517+}
1518+
1519+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1520+{
1521+ if (!__builtin_constant_p(n))
1522+ check_object_size(from, n, true);
1523+ return ___copy_to_user(to, from, n);
1524+}
1525+
1526 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1527 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1528 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
fe2de317 1529@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
58c5fc13
MT
1530
1531 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1532 {
1533+ if ((long)n < 0)
1534+ return n;
1535+
1536 if (access_ok(VERIFY_READ, from, n))
1537 n = __copy_from_user(to, from, n);
1538 else /* security hole - plug it */
fe2de317 1539@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
58c5fc13
MT
1540
1541 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1542 {
1543+ if ((long)n < 0)
1544+ return n;
1545+
1546 if (access_ok(VERIFY_WRITE, to, n))
1547 n = __copy_to_user(to, from, n);
1548 return n;
fe2de317 1549diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
4c928ab7 1550index 5b0bce6..becd81c 100644
fe2de317
MT
1551--- a/arch/arm/kernel/armksyms.c
1552+++ b/arch/arm/kernel/armksyms.c
4c928ab7 1553@@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
15a11c5b
MT
1554 #ifdef CONFIG_MMU
1555 EXPORT_SYMBOL(copy_page);
1556
1557-EXPORT_SYMBOL(__copy_from_user);
1558-EXPORT_SYMBOL(__copy_to_user);
1559+EXPORT_SYMBOL(___copy_from_user);
1560+EXPORT_SYMBOL(___copy_to_user);
1561 EXPORT_SYMBOL(__clear_user);
1562
1563 EXPORT_SYMBOL(__get_user_1);
fe2de317 1564diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
5e856224 1565index 971d65c..cc936fb 100644
fe2de317
MT
1566--- a/arch/arm/kernel/process.c
1567+++ b/arch/arm/kernel/process.c
bc901d79
MT
1568@@ -28,7 +28,6 @@
1569 #include <linux/tick.h>
1570 #include <linux/utsname.h>
1571 #include <linux/uaccess.h>
1572-#include <linux/random.h>
1573 #include <linux/hw_breakpoint.h>
6e9df6a3 1574 #include <linux/cpuidle.h>
bc901d79 1575
5e856224 1576@@ -273,9 +272,10 @@ void machine_power_off(void)
4c928ab7
MT
1577 machine_shutdown();
1578 if (pm_power_off)
1579 pm_power_off();
1580+ BUG();
1581 }
1582
5e856224
MT
1583-void machine_restart(char *cmd)
1584+__noreturn void machine_restart(char *cmd)
1585 {
1586 machine_shutdown();
1587
1588@@ -517,12 +517,6 @@ unsigned long get_wchan(struct task_struct *p)
bc901d79
MT
1589 return 0;
1590 }
1591
1592-unsigned long arch_randomize_brk(struct mm_struct *mm)
1593-{
1594- unsigned long range_end = mm->brk + 0x02000000;
1595- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1596-}
1597-
16454cff 1598 #ifdef CONFIG_MMU
bc901d79
MT
1599 /*
1600 * The vectors page is always readable from user space for the
5e856224
MT
1601diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
1602index f5ce8ab..4b73893 100644
1603--- a/arch/arm/kernel/ptrace.c
1604+++ b/arch/arm/kernel/ptrace.c
1605@@ -905,10 +905,19 @@ long arch_ptrace(struct task_struct *child, long request,
1606 return ret;
1607 }
1608
1609+#ifdef CONFIG_GRKERNSEC_SETXID
1610+extern void gr_delayed_cred_worker(void);
1611+#endif
1612+
1613 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
1614 {
1615 unsigned long ip;
1616
1617+#ifdef CONFIG_GRKERNSEC_SETXID
1618+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
1619+ gr_delayed_cred_worker();
1620+#endif
1621+
1622 if (why)
1623 audit_syscall_exit(regs);
1624 else
4c928ab7 1625diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
5e856224 1626index a255c39..4a19b25 100644
4c928ab7
MT
1627--- a/arch/arm/kernel/setup.c
1628+++ b/arch/arm/kernel/setup.c
5e856224 1629@@ -109,13 +109,13 @@ struct processor processor __read_mostly;
4c928ab7
MT
1630 struct cpu_tlb_fns cpu_tlb __read_mostly;
1631 #endif
1632 #ifdef MULTI_USER
1633-struct cpu_user_fns cpu_user __read_mostly;
1634+struct cpu_user_fns cpu_user __read_only;
1635 #endif
1636 #ifdef MULTI_CACHE
1637-struct cpu_cache_fns cpu_cache __read_mostly;
1638+struct cpu_cache_fns cpu_cache __read_only;
1639 #endif
1640 #ifdef CONFIG_OUTER_CACHE
1641-struct outer_cache_fns outer_cache __read_mostly;
1642+struct outer_cache_fns outer_cache __read_only;
1643 EXPORT_SYMBOL(outer_cache);
1644 #endif
1645
fe2de317 1646diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
5e856224 1647index f84dfe6..13e94f7 100644
fe2de317
MT
1648--- a/arch/arm/kernel/traps.c
1649+++ b/arch/arm/kernel/traps.c
4c928ab7 1650@@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
66a7e928 1651
4c928ab7 1652 static DEFINE_RAW_SPINLOCK(die_lock);
66a7e928 1653
15a11c5b
MT
1654+extern void gr_handle_kernel_exploit(void);
1655+
1656 /*
1657 * This function is protected against re-entrancy.
1658 */
5e856224 1659@@ -291,6 +293,9 @@ void die(const char *str, struct pt_regs *regs, int err)
15a11c5b
MT
1660 panic("Fatal exception in interrupt");
1661 if (panic_on_oops)
1662 panic("Fatal exception");
1663+
1664+ gr_handle_kernel_exploit();
1665+
1666 if (ret != NOTIFY_STOP)
1667 do_exit(SIGSEGV);
66a7e928 1668 }
fe2de317
MT
1669diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1670index 66a477a..bee61d3 100644
1671--- a/arch/arm/lib/copy_from_user.S
1672+++ b/arch/arm/lib/copy_from_user.S
15a11c5b
MT
1673@@ -16,7 +16,7 @@
1674 /*
1675 * Prototype:
1676 *
1677- * size_t __copy_from_user(void *to, const void *from, size_t n)
1678+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
1679 *
1680 * Purpose:
1681 *
1682@@ -84,11 +84,11 @@
66a7e928 1683
15a11c5b 1684 .text
66a7e928 1685
15a11c5b
MT
1686-ENTRY(__copy_from_user)
1687+ENTRY(___copy_from_user)
66a7e928 1688
15a11c5b 1689 #include "copy_template.S"
66a7e928 1690
15a11c5b
MT
1691-ENDPROC(__copy_from_user)
1692+ENDPROC(___copy_from_user)
66a7e928 1693
15a11c5b
MT
1694 .pushsection .fixup,"ax"
1695 .align 0
4c928ab7
MT
1696diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1697index 6ee2f67..d1cce76 100644
1698--- a/arch/arm/lib/copy_page.S
1699+++ b/arch/arm/lib/copy_page.S
1700@@ -10,6 +10,7 @@
1701 * ASM optimised string functions
1702 */
1703 #include <linux/linkage.h>
1704+#include <linux/const.h>
1705 #include <asm/assembler.h>
1706 #include <asm/asm-offsets.h>
1707 #include <asm/cache.h>
fe2de317
MT
1708diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1709index d066df6..df28194 100644
1710--- a/arch/arm/lib/copy_to_user.S
1711+++ b/arch/arm/lib/copy_to_user.S
15a11c5b
MT
1712@@ -16,7 +16,7 @@
1713 /*
1714 * Prototype:
1715 *
1716- * size_t __copy_to_user(void *to, const void *from, size_t n)
1717+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
1718 *
1719 * Purpose:
1720 *
1721@@ -88,11 +88,11 @@
1722 .text
57199397 1723
15a11c5b
MT
1724 ENTRY(__copy_to_user_std)
1725-WEAK(__copy_to_user)
1726+WEAK(___copy_to_user)
66a7e928 1727
15a11c5b 1728 #include "copy_template.S"
66a7e928 1729
15a11c5b
MT
1730-ENDPROC(__copy_to_user)
1731+ENDPROC(___copy_to_user)
1732 ENDPROC(__copy_to_user_std)
66a7e928 1733
15a11c5b 1734 .pushsection .fixup,"ax"
fe2de317 1735diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
5e856224 1736index 5c908b1..e712687 100644
fe2de317
MT
1737--- a/arch/arm/lib/uaccess.S
1738+++ b/arch/arm/lib/uaccess.S
15a11c5b 1739@@ -20,7 +20,7 @@
66a7e928 1740
15a11c5b
MT
1741 #define PAGE_SHIFT 12
1742
1743-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1744+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1745 * Purpose : copy a block to user memory from kernel memory
1746 * Params : to - user memory
1747 * : from - kernel memory
5e856224 1748@@ -40,7 +40,7 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
15a11c5b
MT
1749 sub r2, r2, ip
1750 b .Lc2u_dest_aligned
1751
1752-ENTRY(__copy_to_user)
1753+ENTRY(___copy_to_user)
1754 stmfd sp!, {r2, r4 - r7, lr}
1755 cmp r2, #4
1756 blt .Lc2u_not_enough
5e856224 1757@@ -278,14 +278,14 @@ USER( TUSER( strgeb) r3, [r0], #1) @ May fault
15a11c5b 1758 ldrgtb r3, [r1], #0
5e856224 1759 USER( TUSER( strgtb) r3, [r0], #1) @ May fault
15a11c5b
MT
1760 b .Lc2u_finished
1761-ENDPROC(__copy_to_user)
1762+ENDPROC(___copy_to_user)
1763
1764 .pushsection .fixup,"ax"
1765 .align 0
1766 9001: ldmfd sp!, {r0, r4 - r7, pc}
1767 .popsection
1768
1769-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1770+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1771 * Purpose : copy a block from user memory to kernel memory
1772 * Params : to - kernel memory
1773 * : from - user memory
5e856224 1774@@ -304,7 +304,7 @@ USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
15a11c5b
MT
1775 sub r2, r2, ip
1776 b .Lcfu_dest_aligned
1777
1778-ENTRY(__copy_from_user)
1779+ENTRY(___copy_from_user)
1780 stmfd sp!, {r0, r2, r4 - r7, lr}
1781 cmp r2, #4
1782 blt .Lcfu_not_enough
5e856224
MT
1783@@ -544,7 +544,7 @@ USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
1784 USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
15a11c5b
MT
1785 strgtb r3, [r0], #1
1786 b .Lcfu_finished
1787-ENDPROC(__copy_from_user)
1788+ENDPROC(___copy_from_user)
1789
1790 .pushsection .fixup,"ax"
1791 .align 0
fe2de317 1792diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
4c928ab7 1793index 025f742..8432b08 100644
fe2de317
MT
1794--- a/arch/arm/lib/uaccess_with_memcpy.c
1795+++ b/arch/arm/lib/uaccess_with_memcpy.c
4c928ab7 1796@@ -104,7 +104,7 @@ out:
66a7e928
MT
1797 }
1798
15a11c5b
MT
1799 unsigned long
1800-__copy_to_user(void __user *to, const void *from, unsigned long n)
1801+___copy_to_user(void __user *to, const void *from, unsigned long n)
1802 {
1803 /*
1804 * This test is stubbed out of the main function above to keep
4c928ab7 1805diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
5e856224 1806index 6722627..8f97548c 100644
4c928ab7
MT
1807--- a/arch/arm/mach-omap2/board-n8x0.c
1808+++ b/arch/arm/mach-omap2/board-n8x0.c
5e856224 1809@@ -597,7 +597,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
4c928ab7
MT
1810 }
1811 #endif
1812
1813-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1814+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1815 .late_init = n8x0_menelaus_late_init,
1816 };
1817
fe2de317
MT
1818diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
1819index 2b2d51c..0127490 100644
1820--- a/arch/arm/mach-ux500/mbox-db5500.c
1821+++ b/arch/arm/mach-ux500/mbox-db5500.c
1822@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
317566c1
MT
1823 return sprintf(buf, "0x%X\n", mbox_value);
1824 }
1825
1826-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1827+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1828
1829 static int mbox_show(struct seq_file *s, void *data)
1830 {
fe2de317 1831diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
5e856224 1832index bb7eac3..3bade16 100644
fe2de317
MT
1833--- a/arch/arm/mm/fault.c
1834+++ b/arch/arm/mm/fault.c
5e856224 1835@@ -172,6 +172,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
ae4e228f
MT
1836 }
1837 #endif
1838
1839+#ifdef CONFIG_PAX_PAGEEXEC
1840+ if (fsr & FSR_LNX_PF) {
1841+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1842+ do_group_exit(SIGKILL);
1843+ }
1844+#endif
1845+
1846 tsk->thread.address = addr;
1847 tsk->thread.error_code = fsr;
1848 tsk->thread.trap_no = 14;
5e856224 1849@@ -393,6 +400,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
ae4e228f
MT
1850 }
1851 #endif /* CONFIG_MMU */
1852
1853+#ifdef CONFIG_PAX_PAGEEXEC
6e9df6a3 1854+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
ae4e228f
MT
1855+{
1856+ long i;
1857+
1858+ printk(KERN_ERR "PAX: bytes at PC: ");
1859+ for (i = 0; i < 20; i++) {
1860+ unsigned char c;
1861+ if (get_user(c, (__force unsigned char __user *)pc+i))
1862+ printk(KERN_CONT "?? ");
1863+ else
1864+ printk(KERN_CONT "%02x ", c);
1865+ }
1866+ printk("\n");
1867+
1868+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1869+ for (i = -1; i < 20; i++) {
1870+ unsigned long c;
1871+ if (get_user(c, (__force unsigned long __user *)sp+i))
1872+ printk(KERN_CONT "???????? ");
1873+ else
1874+ printk(KERN_CONT "%08lx ", c);
1875+ }
1876+ printk("\n");
1877+}
1878+#endif
1879+
1880 /*
1881 * First Level Translation Fault Handler
1882 *
5e856224 1883@@ -573,6 +607,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4c928ab7
MT
1884 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1885 struct siginfo info;
1886
1887+#ifdef CONFIG_PAX_REFCOUNT
1888+ if (fsr_fs(ifsr) == 2) {
1889+ unsigned int bkpt;
1890+
1891+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1892+ current->thread.error_code = ifsr;
1893+ current->thread.trap_no = 0;
1894+ pax_report_refcount_overflow(regs);
1895+ fixup_exception(regs);
1896+ return;
1897+ }
1898+ }
1899+#endif
1900+
1901 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1902 return;
1903
fe2de317 1904diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
5e856224 1905index ce8cb19..3ec539d 100644
fe2de317
MT
1906--- a/arch/arm/mm/mmap.c
1907+++ b/arch/arm/mm/mmap.c
5e856224 1908@@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
58c5fc13
MT
1909 if (len > TASK_SIZE)
1910 return -ENOMEM;
1911
1912+#ifdef CONFIG_PAX_RANDMMAP
1913+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1914+#endif
1915+
1916 if (addr) {
1917 if (do_align)
1918 addr = COLOUR_ALIGN(addr, pgoff);
5e856224 1919@@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
57199397
MT
1920 addr = PAGE_ALIGN(addr);
1921
1922 vma = find_vma(mm, addr);
1923- if (TASK_SIZE - len >= addr &&
1924- (!vma || addr + len <= vma->vm_start))
1925+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
58c5fc13
MT
1926 return addr;
1927 }
1928 if (len > mm->cached_hole_size) {
1929- start_addr = addr = mm->free_area_cache;
1930+ start_addr = addr = mm->free_area_cache;
1931 } else {
5e856224 1932- start_addr = addr = mm->mmap_base;
58c5fc13
MT
1933- mm->cached_hole_size = 0;
1934+ start_addr = addr = mm->mmap_base;
1935+ mm->cached_hole_size = 0;
1936 }
5e856224
MT
1937
1938 full_search:
1939@@ -124,14 +127,14 @@ full_search:
58c5fc13
MT
1940 * Start a new search - just in case we missed
1941 * some holes.
1942 */
1943- if (start_addr != TASK_UNMAPPED_BASE) {
1944- start_addr = addr = TASK_UNMAPPED_BASE;
1945+ if (start_addr != mm->mmap_base) {
1946+ start_addr = addr = mm->mmap_base;
1947 mm->cached_hole_size = 0;
1948 goto full_search;
1949 }
57199397
MT
1950 return -ENOMEM;
1951 }
1952- if (!vma || addr + len <= vma->vm_start) {
1953+ if (check_heap_stack_gap(vma, addr, len)) {
1954 /*
1955 * Remember the place where we stopped the search:
1956 */
5e856224
MT
1957@@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1958
1959 if (mmap_is_legacy()) {
1960 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
1961+
1962+#ifdef CONFIG_PAX_RANDMMAP
1963+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1964+ mm->mmap_base += mm->delta_mmap;
1965+#endif
1966+
1967 mm->get_unmapped_area = arch_get_unmapped_area;
1968 mm->unmap_area = arch_unmap_area;
1969 } else {
1970 mm->mmap_base = mmap_base(random_factor);
1971+
1972+#ifdef CONFIG_PAX_RANDMMAP
1973+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1974+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
1975+#endif
1976+
1977 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
1978 mm->unmap_area = arch_unmap_area_topdown;
1979 }
4c928ab7 1980diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
5e856224 1981index 71a6827..e7fbc23 100644
4c928ab7
MT
1982--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
1983+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
5e856224 1984@@ -43,7 +43,7 @@ struct samsung_dma_ops {
4c928ab7
MT
1985 int (*started)(unsigned ch);
1986 int (*flush)(unsigned ch);
1987 int (*stop)(unsigned ch);
1988-};
1989+} __no_const;
1990
1991 extern void *samsung_dmadev_get_ops(void);
1992 extern void *s3c_dma_get_ops(void);
1993diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
1994index 5f28cae..3d23723 100644
1995--- a/arch/arm/plat-samsung/include/plat/ehci.h
1996+++ b/arch/arm/plat-samsung/include/plat/ehci.h
1997@@ -14,7 +14,7 @@
1998 struct s5p_ehci_platdata {
1999 int (*phy_init)(struct platform_device *pdev, int type);
2000 int (*phy_exit)(struct platform_device *pdev, int type);
2001-};
2002+} __no_const;
2003
2004 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
2005
2006diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
2007index c3a58a1..78fbf54 100644
2008--- a/arch/avr32/include/asm/cache.h
2009+++ b/arch/avr32/include/asm/cache.h
2010@@ -1,8 +1,10 @@
2011 #ifndef __ASM_AVR32_CACHE_H
2012 #define __ASM_AVR32_CACHE_H
2013
2014+#include <linux/const.h>
2015+
2016 #define L1_CACHE_SHIFT 5
2017-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2018+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2019
2020 /*
2021 * Memory returned by kmalloc() may be used for DMA, so we must make
fe2de317
MT
2022diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
2023index 3b3159b..425ea94 100644
2024--- a/arch/avr32/include/asm/elf.h
2025+++ b/arch/avr32/include/asm/elf.h
2026@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
58c5fc13
MT
2027 the loader. We need to make sure that it is out of the way of the program
2028 that it will "exec", and that there is sufficient room for the brk. */
2029
2030-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2031+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2032
2033+#ifdef CONFIG_PAX_ASLR
2034+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
2035+
2036+#define PAX_DELTA_MMAP_LEN 15
2037+#define PAX_DELTA_STACK_LEN 15
2038+#endif
2039
2040 /* This yields a mask that user programs can use to figure out what
2041 instruction set this CPU supports. This could be done in user space,
fe2de317
MT
2042diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
2043index b7f5c68..556135c 100644
2044--- a/arch/avr32/include/asm/kmap_types.h
2045+++ b/arch/avr32/include/asm/kmap_types.h
58c5fc13
MT
2046@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
2047 D(11) KM_IRQ1,
2048 D(12) KM_SOFTIRQ0,
2049 D(13) KM_SOFTIRQ1,
2050-D(14) KM_TYPE_NR
2051+D(14) KM_CLEARPAGE,
2052+D(15) KM_TYPE_NR
2053 };
2054
2055 #undef D
fe2de317
MT
2056diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
2057index f7040a1..db9f300 100644
2058--- a/arch/avr32/mm/fault.c
2059+++ b/arch/avr32/mm/fault.c
2060@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
58c5fc13
MT
2061
2062 int exception_trace = 1;
2063
2064+#ifdef CONFIG_PAX_PAGEEXEC
6e9df6a3 2065+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
2066+{
2067+ unsigned long i;
2068+
2069+ printk(KERN_ERR "PAX: bytes at PC: ");
2070+ for (i = 0; i < 20; i++) {
2071+ unsigned char c;
2072+ if (get_user(c, (unsigned char *)pc+i))
2073+ printk(KERN_CONT "???????? ");
2074+ else
2075+ printk(KERN_CONT "%02x ", c);
2076+ }
2077+ printk("\n");
2078+}
2079+#endif
2080+
2081 /*
2082 * This routine handles page faults. It determines the address and the
2083 * problem, and then passes it off to one of the appropriate routines.
6892158b 2084@@ -156,6 +173,16 @@ bad_area:
58c5fc13
MT
2085 up_read(&mm->mmap_sem);
2086
2087 if (user_mode(regs)) {
2088+
2089+#ifdef CONFIG_PAX_PAGEEXEC
2090+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2091+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
2092+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
2093+ do_group_exit(SIGKILL);
2094+ }
2095+ }
2096+#endif
2097+
2098 if (exception_trace && printk_ratelimit())
2099 printk("%s%s[%d]: segfault at %08lx pc %08lx "
2100 "sp %08lx ecr %lu\n",
4c928ab7
MT
2101diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
2102index 568885a..f8008df 100644
2103--- a/arch/blackfin/include/asm/cache.h
2104+++ b/arch/blackfin/include/asm/cache.h
2105@@ -7,6 +7,7 @@
2106 #ifndef __ARCH_BLACKFIN_CACHE_H
2107 #define __ARCH_BLACKFIN_CACHE_H
2108
2109+#include <linux/const.h>
2110 #include <linux/linkage.h> /* for asmlinkage */
2111
2112 /*
2113@@ -14,7 +15,7 @@
2114 * Blackfin loads 32 bytes for cache
2115 */
2116 #define L1_CACHE_SHIFT 5
2117-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2118+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2119 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2120
2121 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2122diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2123index aea2718..3639a60 100644
2124--- a/arch/cris/include/arch-v10/arch/cache.h
2125+++ b/arch/cris/include/arch-v10/arch/cache.h
2126@@ -1,8 +1,9 @@
2127 #ifndef _ASM_ARCH_CACHE_H
2128 #define _ASM_ARCH_CACHE_H
2129
2130+#include <linux/const.h>
2131 /* Etrax 100LX have 32-byte cache-lines. */
2132-#define L1_CACHE_BYTES 32
2133 #define L1_CACHE_SHIFT 5
2134+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2135
2136 #endif /* _ASM_ARCH_CACHE_H */
2137diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2138index 1de779f..336fad3 100644
2139--- a/arch/cris/include/arch-v32/arch/cache.h
2140+++ b/arch/cris/include/arch-v32/arch/cache.h
2141@@ -1,11 +1,12 @@
2142 #ifndef _ASM_CRIS_ARCH_CACHE_H
2143 #define _ASM_CRIS_ARCH_CACHE_H
2144
2145+#include <linux/const.h>
2146 #include <arch/hwregs/dma.h>
2147
2148 /* A cache-line is 32 bytes. */
2149-#define L1_CACHE_BYTES 32
2150 #define L1_CACHE_SHIFT 5
2151+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2152
2153 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
2154
2155diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2156index 0d8a7d6..d0c9ff5 100644
2157--- a/arch/frv/include/asm/atomic.h
2158+++ b/arch/frv/include/asm/atomic.h
2159@@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
2160 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2161 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2162
2163+#define atomic64_read_unchecked(v) atomic64_read(v)
2164+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2165+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2166+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2167+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2168+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2169+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2170+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2171+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2172+
2173 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2174 {
2175 int c, old;
2176diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2177index 2797163..c2a401d 100644
2178--- a/arch/frv/include/asm/cache.h
2179+++ b/arch/frv/include/asm/cache.h
2180@@ -12,10 +12,11 @@
2181 #ifndef __ASM_CACHE_H
2182 #define __ASM_CACHE_H
2183
2184+#include <linux/const.h>
2185
2186 /* bytes per L1 cache line */
2187 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2188-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2189+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2190
2191 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2192 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
fe2de317
MT
2193diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2194index f8e16b2..c73ff79 100644
2195--- a/arch/frv/include/asm/kmap_types.h
2196+++ b/arch/frv/include/asm/kmap_types.h
58c5fc13
MT
2197@@ -23,6 +23,7 @@ enum km_type {
2198 KM_IRQ1,
2199 KM_SOFTIRQ0,
2200 KM_SOFTIRQ1,
2201+ KM_CLEARPAGE,
2202 KM_TYPE_NR
2203 };
2204
fe2de317
MT
2205diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2206index 385fd30..6c3d97e 100644
2207--- a/arch/frv/mm/elf-fdpic.c
2208+++ b/arch/frv/mm/elf-fdpic.c
2209@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
57199397
MT
2210 if (addr) {
2211 addr = PAGE_ALIGN(addr);
2212 vma = find_vma(current->mm, addr);
2213- if (TASK_SIZE - len >= addr &&
2214- (!vma || addr + len <= vma->vm_start))
2215+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2216 goto success;
2217 }
2218
fe2de317 2219@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
57199397
MT
2220 for (; vma; vma = vma->vm_next) {
2221 if (addr > limit)
2222 break;
2223- if (addr + len <= vma->vm_start)
2224+ if (check_heap_stack_gap(vma, addr, len))
2225 goto success;
2226 addr = vma->vm_end;
2227 }
fe2de317 2228@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
57199397
MT
2229 for (; vma; vma = vma->vm_next) {
2230 if (addr > limit)
2231 break;
2232- if (addr + len <= vma->vm_start)
2233+ if (check_heap_stack_gap(vma, addr, len))
2234 goto success;
2235 addr = vma->vm_end;
2236 }
4c928ab7
MT
2237diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2238index c635028..6d9445a 100644
2239--- a/arch/h8300/include/asm/cache.h
2240+++ b/arch/h8300/include/asm/cache.h
2241@@ -1,8 +1,10 @@
2242 #ifndef __ARCH_H8300_CACHE_H
2243 #define __ARCH_H8300_CACHE_H
2244
2245+#include <linux/const.h>
2246+
2247 /* bytes per L1 cache line */
2248-#define L1_CACHE_BYTES 4
2249+#define L1_CACHE_BYTES _AC(4,UL)
2250
2251 /* m68k-elf-gcc 2.95.2 doesn't like these */
2252
2253diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2254index 0f01de2..d37d309 100644
2255--- a/arch/hexagon/include/asm/cache.h
2256+++ b/arch/hexagon/include/asm/cache.h
2257@@ -21,9 +21,11 @@
2258 #ifndef __ASM_CACHE_H
2259 #define __ASM_CACHE_H
2260
2261+#include <linux/const.h>
2262+
2263 /* Bytes per L1 cache line */
2264-#define L1_CACHE_SHIFT (5)
2265-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2266+#define L1_CACHE_SHIFT 5
2267+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2268
2269 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2270 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2271diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2272index 3fad89e..3047da5 100644
2273--- a/arch/ia64/include/asm/atomic.h
2274+++ b/arch/ia64/include/asm/atomic.h
2275@@ -209,6 +209,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2276 #define atomic64_inc(v) atomic64_add(1, (v))
2277 #define atomic64_dec(v) atomic64_sub(1, (v))
2278
2279+#define atomic64_read_unchecked(v) atomic64_read(v)
2280+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2281+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2282+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2283+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2284+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2285+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2286+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2287+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2288+
2289 /* Atomic operations are already serializing */
2290 #define smp_mb__before_atomic_dec() barrier()
2291 #define smp_mb__after_atomic_dec() barrier()
2292diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2293index 988254a..e1ee885 100644
2294--- a/arch/ia64/include/asm/cache.h
2295+++ b/arch/ia64/include/asm/cache.h
2296@@ -1,6 +1,7 @@
2297 #ifndef _ASM_IA64_CACHE_H
2298 #define _ASM_IA64_CACHE_H
2299
2300+#include <linux/const.h>
2301
2302 /*
2303 * Copyright (C) 1998-2000 Hewlett-Packard Co
2304@@ -9,7 +10,7 @@
2305
2306 /* Bytes per L1 (data) cache line. */
2307 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2308-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2309+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2310
2311 #ifdef CONFIG_SMP
2312 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
fe2de317
MT
2313diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2314index b5298eb..67c6e62 100644
2315--- a/arch/ia64/include/asm/elf.h
2316+++ b/arch/ia64/include/asm/elf.h
ae4e228f 2317@@ -42,6 +42,13 @@
58c5fc13
MT
2318 */
2319 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2320
2321+#ifdef CONFIG_PAX_ASLR
2322+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2323+
2324+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2325+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2326+#endif
2327+
2328 #define PT_IA_64_UNWIND 0x70000001
2329
2330 /* IA-64 relocations: */
5e856224
MT
2331diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
2332index 96a8d92..617a1cf 100644
2333--- a/arch/ia64/include/asm/pgalloc.h
2334+++ b/arch/ia64/include/asm/pgalloc.h
2335@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2336 pgd_val(*pgd_entry) = __pa(pud);
2337 }
2338
2339+static inline void
2340+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2341+{
2342+ pgd_populate(mm, pgd_entry, pud);
2343+}
2344+
2345 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
2346 {
2347 return quicklist_alloc(0, GFP_KERNEL, NULL);
2348@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2349 pud_val(*pud_entry) = __pa(pmd);
2350 }
2351
2352+static inline void
2353+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2354+{
2355+ pud_populate(mm, pud_entry, pmd);
2356+}
2357+
2358 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
2359 {
2360 return quicklist_alloc(0, GFP_KERNEL, NULL);
fe2de317
MT
2361diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2362index 1a97af3..7529d31 100644
2363--- a/arch/ia64/include/asm/pgtable.h
2364+++ b/arch/ia64/include/asm/pgtable.h
57199397
MT
2365@@ -12,7 +12,7 @@
2366 * David Mosberger-Tang <davidm@hpl.hp.com>
2367 */
2368
2369-
2370+#include <linux/const.h>
2371 #include <asm/mman.h>
2372 #include <asm/page.h>
2373 #include <asm/processor.h>
58c5fc13
MT
2374@@ -143,6 +143,17 @@
2375 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2376 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2377 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2378+
2379+#ifdef CONFIG_PAX_PAGEEXEC
2380+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2381+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2382+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2383+#else
2384+# define PAGE_SHARED_NOEXEC PAGE_SHARED
2385+# define PAGE_READONLY_NOEXEC PAGE_READONLY
2386+# define PAGE_COPY_NOEXEC PAGE_COPY
2387+#endif
2388+
2389 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2390 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2391 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
fe2de317
MT
2392diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2393index b77768d..e0795eb 100644
2394--- a/arch/ia64/include/asm/spinlock.h
2395+++ b/arch/ia64/include/asm/spinlock.h
2396@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
317566c1
MT
2397 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2398
2399 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2400- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2401+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2402 }
2403
2404 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
fe2de317
MT
2405diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2406index 449c8c0..432a3d2 100644
2407--- a/arch/ia64/include/asm/uaccess.h
2408+++ b/arch/ia64/include/asm/uaccess.h
2409@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
58c5fc13
MT
2410 const void *__cu_from = (from); \
2411 long __cu_len = (n); \
2412 \
2413- if (__access_ok(__cu_to, __cu_len, get_fs())) \
ae4e228f 2414+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
58c5fc13
MT
2415 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2416 __cu_len; \
2417 })
fe2de317 2418@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
58c5fc13
MT
2419 long __cu_len = (n); \
2420 \
2421 __chk_user_ptr(__cu_from); \
2422- if (__access_ok(__cu_from, __cu_len, get_fs())) \
ae4e228f 2423+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
58c5fc13
MT
2424 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2425 __cu_len; \
2426 })
fe2de317
MT
2427diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2428index 24603be..948052d 100644
2429--- a/arch/ia64/kernel/module.c
2430+++ b/arch/ia64/kernel/module.c
6e9df6a3 2431@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
58c5fc13
MT
2432 void
2433 module_free (struct module *mod, void *module_region)
2434 {
2435- if (mod && mod->arch.init_unw_table &&
2436- module_region == mod->module_init) {
2437+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2438 unw_remove_unwind_table(mod->arch.init_unw_table);
2439 mod->arch.init_unw_table = NULL;
2440 }
fe2de317 2441@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
58c5fc13
MT
2442 }
2443
2444 static inline int
2445+in_init_rx (const struct module *mod, uint64_t addr)
2446+{
2447+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2448+}
2449+
2450+static inline int
2451+in_init_rw (const struct module *mod, uint64_t addr)
2452+{
2453+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2454+}
2455+
2456+static inline int
2457 in_init (const struct module *mod, uint64_t addr)
2458 {
2459- return addr - (uint64_t) mod->module_init < mod->init_size;
2460+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2461+}
2462+
2463+static inline int
2464+in_core_rx (const struct module *mod, uint64_t addr)
2465+{
2466+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2467+}
2468+
2469+static inline int
2470+in_core_rw (const struct module *mod, uint64_t addr)
2471+{
2472+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2473 }
2474
2475 static inline int
2476 in_core (const struct module *mod, uint64_t addr)
2477 {
2478- return addr - (uint64_t) mod->module_core < mod->core_size;
2479+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2480 }
2481
2482 static inline int
fe2de317 2483@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
58c5fc13
MT
2484 break;
2485
2486 case RV_BDREL:
2487- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2488+ if (in_init_rx(mod, val))
2489+ val -= (uint64_t) mod->module_init_rx;
2490+ else if (in_init_rw(mod, val))
2491+ val -= (uint64_t) mod->module_init_rw;
2492+ else if (in_core_rx(mod, val))
2493+ val -= (uint64_t) mod->module_core_rx;
2494+ else if (in_core_rw(mod, val))
2495+ val -= (uint64_t) mod->module_core_rw;
2496 break;
2497
2498 case RV_LTV:
fe2de317 2499@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
58c5fc13
MT
2500 * addresses have been selected...
2501 */
2502 uint64_t gp;
2503- if (mod->core_size > MAX_LTOFF)
2504+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2505 /*
2506 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2507 * at the end of the module.
2508 */
2509- gp = mod->core_size - MAX_LTOFF / 2;
2510+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2511 else
2512- gp = mod->core_size / 2;
2513- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2514+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2515+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2516 mod->arch.gp = gp;
2517 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2518 }
fe2de317
MT
2519diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2520index 609d500..7dde2a8 100644
2521--- a/arch/ia64/kernel/sys_ia64.c
2522+++ b/arch/ia64/kernel/sys_ia64.c
2523@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
58c5fc13
MT
2524 if (REGION_NUMBER(addr) == RGN_HPAGE)
2525 addr = 0;
2526 #endif
2527+
2528+#ifdef CONFIG_PAX_RANDMMAP
2529+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2530+ addr = mm->free_area_cache;
2531+ else
2532+#endif
2533+
2534 if (!addr)
2535 addr = mm->free_area_cache;
2536
fe2de317 2537@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
58c5fc13
MT
2538 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2539 /* At this point: (!vma || addr < vma->vm_end). */
2540 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2541- if (start_addr != TASK_UNMAPPED_BASE) {
2542+ if (start_addr != mm->mmap_base) {
2543 /* Start a new search --- just in case we missed some holes. */
2544- addr = TASK_UNMAPPED_BASE;
2545+ addr = mm->mmap_base;
2546 goto full_search;
2547 }
2548 return -ENOMEM;
57199397
MT
2549 }
2550- if (!vma || addr + len <= vma->vm_start) {
2551+ if (check_heap_stack_gap(vma, addr, len)) {
2552 /* Remember the address where we stopped this search: */
2553 mm->free_area_cache = addr + len;
2554 return addr;
fe2de317
MT
2555diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2556index 53c0ba0..2accdde 100644
2557--- a/arch/ia64/kernel/vmlinux.lds.S
2558+++ b/arch/ia64/kernel/vmlinux.lds.S
6892158b
MT
2559@@ -199,7 +199,7 @@ SECTIONS {
2560 /* Per-cpu data: */
2561 . = ALIGN(PERCPU_PAGE_SIZE);
66a7e928 2562 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
6892158b
MT
2563- __phys_per_cpu_start = __per_cpu_load;
2564+ __phys_per_cpu_start = per_cpu_load;
2565 /*
2566 * ensure percpu data fits
2567 * into percpu page size
fe2de317
MT
2568diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2569index 20b3593..1ce77f0 100644
2570--- a/arch/ia64/mm/fault.c
2571+++ b/arch/ia64/mm/fault.c
2572@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
58c5fc13
MT
2573 return pte_present(pte);
2574 }
2575
2576+#ifdef CONFIG_PAX_PAGEEXEC
6e9df6a3 2577+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
2578+{
2579+ unsigned long i;
2580+
2581+ printk(KERN_ERR "PAX: bytes at PC: ");
2582+ for (i = 0; i < 8; i++) {
2583+ unsigned int c;
2584+ if (get_user(c, (unsigned int *)pc+i))
2585+ printk(KERN_CONT "???????? ");
2586+ else
2587+ printk(KERN_CONT "%08x ", c);
2588+ }
2589+ printk("\n");
2590+}
2591+#endif
2592+
2593 void __kprobes
2594 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2595 {
fe2de317 2596@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
58c5fc13
MT
2597 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2598 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2599
2600- if ((vma->vm_flags & mask) != mask)
2601+ if ((vma->vm_flags & mask) != mask) {
2602+
2603+#ifdef CONFIG_PAX_PAGEEXEC
2604+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2605+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2606+ goto bad_area;
2607+
2608+ up_read(&mm->mmap_sem);
2609+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2610+ do_group_exit(SIGKILL);
2611+ }
2612+#endif
2613+
2614 goto bad_area;
2615
2616+ }
2617+
58c5fc13
MT
2618 /*
2619 * If for any reason at all we couldn't handle the fault, make
57199397 2620 * sure we exit gracefully rather than endlessly redo the
fe2de317
MT
2621diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2622index 5ca674b..e0e1b70 100644
2623--- a/arch/ia64/mm/hugetlbpage.c
2624+++ b/arch/ia64/mm/hugetlbpage.c
2625@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
57199397
MT
2626 /* At this point: (!vmm || addr < vmm->vm_end). */
2627 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2628 return -ENOMEM;
2629- if (!vmm || (addr + len) <= vmm->vm_start)
2630+ if (check_heap_stack_gap(vmm, addr, len))
2631 return addr;
2632 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2633 }
fe2de317 2634diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5e856224 2635index 13df239d..cb52116 100644
fe2de317
MT
2636--- a/arch/ia64/mm/init.c
2637+++ b/arch/ia64/mm/init.c
5e856224 2638@@ -121,6 +121,19 @@ ia64_init_addr_space (void)
58c5fc13
MT
2639 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2640 vma->vm_end = vma->vm_start + PAGE_SIZE;
2641 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2642+
2643+#ifdef CONFIG_PAX_PAGEEXEC
2644+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2645+ vma->vm_flags &= ~VM_EXEC;
2646+
2647+#ifdef CONFIG_PAX_MPROTECT
2648+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
2649+ vma->vm_flags &= ~VM_MAYEXEC;
2650+#endif
2651+
2652+ }
2653+#endif
2654+
2655 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2656 down_write(&current->mm->mmap_sem);
2657 if (insert_vm_struct(current->mm, vma)) {
4c928ab7
MT
2658diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2659index 40b3ee9..8c2c112 100644
2660--- a/arch/m32r/include/asm/cache.h
2661+++ b/arch/m32r/include/asm/cache.h
2662@@ -1,8 +1,10 @@
2663 #ifndef _ASM_M32R_CACHE_H
2664 #define _ASM_M32R_CACHE_H
2665
2666+#include <linux/const.h>
2667+
2668 /* L1 cache line size */
2669 #define L1_CACHE_SHIFT 4
2670-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2671+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2672
2673 #endif /* _ASM_M32R_CACHE_H */
fe2de317
MT
2674diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2675index 82abd15..d95ae5d 100644
2676--- a/arch/m32r/lib/usercopy.c
2677+++ b/arch/m32r/lib/usercopy.c
58c5fc13
MT
2678@@ -14,6 +14,9 @@
2679 unsigned long
2680 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2681 {
2682+ if ((long)n < 0)
2683+ return n;
2684+
2685 prefetch(from);
2686 if (access_ok(VERIFY_WRITE, to, n))
2687 __copy_user(to,from,n);
fe2de317 2688@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
58c5fc13
MT
2689 unsigned long
2690 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2691 {
2692+ if ((long)n < 0)
2693+ return n;
2694+
2695 prefetchw(to);
2696 if (access_ok(VERIFY_READ, from, n))
2697 __copy_user_zeroing(to,from,n);
4c928ab7
MT
2698diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2699index 0395c51..5f26031 100644
2700--- a/arch/m68k/include/asm/cache.h
2701+++ b/arch/m68k/include/asm/cache.h
2702@@ -4,9 +4,11 @@
2703 #ifndef __ARCH_M68K_CACHE_H
2704 #define __ARCH_M68K_CACHE_H
2705
2706+#include <linux/const.h>
2707+
2708 /* bytes per L1 cache line */
2709 #define L1_CACHE_SHIFT 4
2710-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2711+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2712
2713 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2714
2715diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2716index 4efe96a..60e8699 100644
2717--- a/arch/microblaze/include/asm/cache.h
2718+++ b/arch/microblaze/include/asm/cache.h
2719@@ -13,11 +13,12 @@
2720 #ifndef _ASM_MICROBLAZE_CACHE_H
2721 #define _ASM_MICROBLAZE_CACHE_H
2722
2723+#include <linux/const.h>
2724 #include <asm/registers.h>
2725
2726 #define L1_CACHE_SHIFT 5
2727 /* word-granular cache in microblaze */
2728-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2729+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2730
2731 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2732
2733diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2734index 1d93f81..67794d0 100644
2735--- a/arch/mips/include/asm/atomic.h
2736+++ b/arch/mips/include/asm/atomic.h
2737@@ -21,6 +21,10 @@
2738 #include <asm/war.h>
2739 #include <asm/system.h>
2740
2741+#ifdef CONFIG_GENERIC_ATOMIC64
2742+#include <asm-generic/atomic64.h>
2743+#endif
2744+
2745 #define ATOMIC_INIT(i) { (i) }
2746
2747 /*
2748@@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2749 */
2750 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2751
2752+#define atomic64_read_unchecked(v) atomic64_read(v)
2753+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2754+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2755+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2756+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2757+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2758+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2759+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2760+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2761+
2762 #endif /* CONFIG_64BIT */
2763
2764 /*
2765diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2766index b4db69f..8f3b093 100644
2767--- a/arch/mips/include/asm/cache.h
2768+++ b/arch/mips/include/asm/cache.h
2769@@ -9,10 +9,11 @@
2770 #ifndef _ASM_CACHE_H
2771 #define _ASM_CACHE_H
2772
2773+#include <linux/const.h>
2774 #include <kmalloc.h>
2775
2776 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2777-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2778+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2779
2780 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2781 #define SMP_CACHE_BYTES L1_CACHE_BYTES
fe2de317
MT
2782diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2783index 455c0ac..ad65fbe 100644
2784--- a/arch/mips/include/asm/elf.h
2785+++ b/arch/mips/include/asm/elf.h
bc901d79 2786@@ -372,13 +372,16 @@ extern const char *__elf_platform;
58c5fc13
MT
2787 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2788 #endif
2789
2790+#ifdef CONFIG_PAX_ASLR
66a7e928 2791+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
58c5fc13 2792+
66a7e928
MT
2793+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2794+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
58c5fc13
MT
2795+#endif
2796+
df50ba0c
MT
2797 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2798 struct linux_binprm;
2799 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
bc901d79
MT
2800 int uses_interp);
2801
2802-struct mm_struct;
2803-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2804-#define arch_randomize_brk arch_randomize_brk
2805-
2806 #endif /* _ASM_ELF_H */
fe2de317 2807diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
5e856224 2808index da9bd7d..91aa7ab 100644
fe2de317
MT
2809--- a/arch/mips/include/asm/page.h
2810+++ b/arch/mips/include/asm/page.h
5e856224 2811@@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
58c5fc13
MT
2812 #ifdef CONFIG_CPU_MIPS32
2813 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2814 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2815- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2816+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2817 #else
2818 typedef struct { unsigned long long pte; } pte_t;
2819 #define pte_val(x) ((x).pte)
5e856224
MT
2820diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
2821index 881d18b..cea38bc 100644
2822--- a/arch/mips/include/asm/pgalloc.h
2823+++ b/arch/mips/include/asm/pgalloc.h
2824@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2825 {
2826 set_pud(pud, __pud((unsigned long)pmd));
2827 }
2828+
2829+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2830+{
2831+ pud_populate(mm, pud, pmd);
2832+}
2833 #endif
2834
2835 /*
fe2de317
MT
2836diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2837index 6018c80..7c37203 100644
2838--- a/arch/mips/include/asm/system.h
2839+++ b/arch/mips/include/asm/system.h
71d190be 2840@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
58c5fc13
MT
2841 */
2842 #define __ARCH_WANT_UNLOCKED_CTXSW
2843
2844-extern unsigned long arch_align_stack(unsigned long sp);
71d190be 2845+#define arch_align_stack(x) ((x) & ~0xfUL)
58c5fc13
MT
2846
2847 #endif /* _ASM_SYSTEM_H */
5e856224
MT
2848diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
2849index 0d85d8e..ec71487 100644
2850--- a/arch/mips/include/asm/thread_info.h
2851+++ b/arch/mips/include/asm/thread_info.h
2852@@ -123,6 +123,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
2853 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
2854 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
2855 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
2856+/* li takes a 32bit immediate */
2857+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
2858 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
2859
2860 #ifdef CONFIG_MIPS32_O32
2861@@ -146,15 +148,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
2862 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
2863 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
2864 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
2865+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
2866+
2867+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2868
2869 /* work to do in syscall_trace_leave() */
2870-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
2871+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2872
2873 /* work to do on interrupt/exception return */
2874 #define _TIF_WORK_MASK (0x0000ffef & \
2875 ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
2876 /* work to do on any return to u-space */
2877-#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
2878+#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID)
2879
2880 #endif /* __KERNEL__ */
2881
fe2de317
MT
2882diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2883index 9fdd8bc..4bd7f1a 100644
2884--- a/arch/mips/kernel/binfmt_elfn32.c
2885+++ b/arch/mips/kernel/binfmt_elfn32.c
2886@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
58c5fc13
MT
2887 #undef ELF_ET_DYN_BASE
2888 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2889
2890+#ifdef CONFIG_PAX_ASLR
66a7e928 2891+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
58c5fc13 2892+
66a7e928
MT
2893+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2894+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
58c5fc13
MT
2895+#endif
2896+
2897 #include <asm/processor.h>
2898 #include <linux/module.h>
2899 #include <linux/elfcore.h>
fe2de317
MT
2900diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2901index ff44823..97f8906 100644
2902--- a/arch/mips/kernel/binfmt_elfo32.c
2903+++ b/arch/mips/kernel/binfmt_elfo32.c
2904@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
58c5fc13
MT
2905 #undef ELF_ET_DYN_BASE
2906 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2907
2908+#ifdef CONFIG_PAX_ASLR
66a7e928 2909+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
58c5fc13 2910+
66a7e928
MT
2911+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2912+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
58c5fc13
MT
2913+#endif
2914+
2915 #include <asm/processor.h>
2916
2917 /*
fe2de317 2918diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
5e856224 2919index 7955409..ceaea7c 100644
fe2de317
MT
2920--- a/arch/mips/kernel/process.c
2921+++ b/arch/mips/kernel/process.c
5e856224 2922@@ -483,15 +483,3 @@ unsigned long get_wchan(struct task_struct *task)
58c5fc13
MT
2923 out:
2924 return pc;
2925 }
2926-
2927-/*
2928- * Don't forget that the stack pointer must be aligned on a 8 bytes
2929- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2930- */
2931-unsigned long arch_align_stack(unsigned long sp)
2932-{
2933- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2934- sp -= get_random_int() & ~PAGE_MASK;
2935-
2936- return sp & ALMASK;
2937-}
5e856224
MT
2938diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
2939index 7786b60..3e38c72 100644
2940--- a/arch/mips/kernel/ptrace.c
2941+++ b/arch/mips/kernel/ptrace.c
2942@@ -529,6 +529,10 @@ static inline int audit_arch(void)
2943 return arch;
2944 }
2945
2946+#ifdef CONFIG_GRKERNSEC_SETXID
2947+extern void gr_delayed_cred_worker(void);
2948+#endif
2949+
2950 /*
2951 * Notification of system call entry/exit
2952 * - triggered by current->work.syscall_trace
2953@@ -538,6 +542,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
2954 /* do the secure computing check first */
2955 secure_computing(regs->regs[2]);
2956
2957+#ifdef CONFIG_GRKERNSEC_SETXID
2958+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2959+ gr_delayed_cred_worker();
2960+#endif
2961+
2962 if (!(current->ptrace & PT_PTRACED))
2963 goto out;
2964
2965diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
2966index a632bc1..0b77c7c 100644
2967--- a/arch/mips/kernel/scall32-o32.S
2968+++ b/arch/mips/kernel/scall32-o32.S
2969@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
2970
2971 stack_done:
2972 lw t0, TI_FLAGS($28) # syscall tracing enabled?
2973- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
2974+ li t1, _TIF_SYSCALL_WORK
2975 and t0, t1
2976 bnez t0, syscall_trace_entry # -> yes
2977
2978diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
2979index 3b5a5e9..e1ee86d 100644
2980--- a/arch/mips/kernel/scall64-64.S
2981+++ b/arch/mips/kernel/scall64-64.S
2982@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
2983
2984 sd a3, PT_R26(sp) # save a3 for syscall restarting
2985
2986- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
2987+ li t1, _TIF_SYSCALL_WORK
2988 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
2989 and t0, t1, t0
2990 bnez t0, syscall_trace_entry
2991diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
2992index 6be6f70..1859577 100644
2993--- a/arch/mips/kernel/scall64-n32.S
2994+++ b/arch/mips/kernel/scall64-n32.S
2995@@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
2996
2997 sd a3, PT_R26(sp) # save a3 for syscall restarting
2998
2999- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3000+ li t1, _TIF_SYSCALL_WORK
3001 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3002 and t0, t1, t0
3003 bnez t0, n32_syscall_trace_entry
3004diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
3005index 5422855..74e63a3 100644
3006--- a/arch/mips/kernel/scall64-o32.S
3007+++ b/arch/mips/kernel/scall64-o32.S
3008@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3009 PTR 4b, bad_stack
3010 .previous
3011
3012- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3013+ li t1, _TIF_SYSCALL_WORK
3014 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3015 and t0, t1, t0
3016 bnez t0, trace_a_syscall
fe2de317 3017diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
5e856224 3018index 69ebd58..e4bff83 100644
fe2de317
MT
3019--- a/arch/mips/mm/fault.c
3020+++ b/arch/mips/mm/fault.c
15a11c5b
MT
3021@@ -28,6 +28,23 @@
3022 #include <asm/highmem.h> /* For VMALLOC_END */
3023 #include <linux/kdebug.h>
3024
3025+#ifdef CONFIG_PAX_PAGEEXEC
6e9df6a3 3026+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
15a11c5b
MT
3027+{
3028+ unsigned long i;
3029+
3030+ printk(KERN_ERR "PAX: bytes at PC: ");
3031+ for (i = 0; i < 5; i++) {
3032+ unsigned int c;
3033+ if (get_user(c, (unsigned int *)pc+i))
3034+ printk(KERN_CONT "???????? ");
3035+ else
3036+ printk(KERN_CONT "%08x ", c);
3037+ }
3038+ printk("\n");
3039+}
3040+#endif
3041+
3042 /*
3043 * This routine handles page faults. It determines the address,
3044 * and the problem, and then passes it off to one of the appropriate
fe2de317
MT
3045diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
3046index 302d779..7d35bf8 100644
3047--- a/arch/mips/mm/mmap.c
3048+++ b/arch/mips/mm/mmap.c
3049@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
58c5fc13 3050 do_color_align = 1;
6e9df6a3
MT
3051
3052 /* requesting a specific address */
58c5fc13
MT
3053+
3054+#ifdef CONFIG_PAX_RANDMMAP
3055+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
3056+#endif
3057+
3058 if (addr) {
3059 if (do_color_align)
3060 addr = COLOUR_ALIGN(addr, pgoff);
fe2de317 3061@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
57199397 3062 addr = PAGE_ALIGN(addr);
6e9df6a3
MT
3063
3064 vma = find_vma(mm, addr);
15a11c5b 3065- if (TASK_SIZE - len >= addr &&
6e9df6a3 3066- (!vma || addr + len <= vma->vm_start))
15a11c5b 3067+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
58c5fc13
MT
3068 return addr;
3069 }
6e9df6a3 3070
fe2de317 3071@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6e9df6a3
MT
3072 /* At this point: (!vma || addr < vma->vm_end). */
3073 if (TASK_SIZE - len < addr)
3074 return -ENOMEM;
3075- if (!vma || addr + len <= vma->vm_start)
3076+ if (check_heap_stack_gap(vmm, addr, len))
3077 return addr;
3078 addr = vma->vm_end;
3079 if (do_color_align)
fe2de317 3080@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6e9df6a3
MT
3081 /* make sure it can fit in the remaining address space */
3082 if (likely(addr > len)) {
3083 vma = find_vma(mm, addr - len);
3084- if (!vma || addr <= vma->vm_start) {
3085+ if (check_heap_stack_gap(vmm, addr - len, len))
3086 /* cache the address as a hint for next time */
3087 return mm->free_area_cache = addr - len;
3088 }
fe2de317 3089@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6e9df6a3
MT
3090 * return with success:
3091 */
3092 vma = find_vma(mm, addr);
3093- if (likely(!vma || addr + len <= vma->vm_start)) {
3094+ if (check_heap_stack_gap(vmm, addr, len)) {
3095 /* cache the address as a hint for next time */
3096 return mm->free_area_cache = addr;
3097 }
fe2de317 3098@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6e9df6a3
MT
3099 mm->unmap_area = arch_unmap_area_topdown;
3100 }
bc901d79 3101 }
15a11c5b 3102-
66a7e928
MT
3103-static inline unsigned long brk_rnd(void)
3104-{
3105- unsigned long rnd = get_random_int();
3106-
3107- rnd = rnd << PAGE_SHIFT;
3108- /* 8MB for 32bit, 256MB for 64bit */
3109- if (TASK_IS_32BIT_ADDR)
3110- rnd = rnd & 0x7ffffful;
3111- else
3112- rnd = rnd & 0xffffffful;
3113-
3114- return rnd;
3115-}
3116-
bc901d79
MT
3117-unsigned long arch_randomize_brk(struct mm_struct *mm)
3118-{
3119- unsigned long base = mm->brk;
3120- unsigned long ret;
3121-
3122- ret = PAGE_ALIGN(base + brk_rnd());
3123-
3124- if (ret < mm->brk)
3125- return mm->brk;
3126-
3127- return ret;
3128-}
4c928ab7
MT
3129diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3130index 967d144..db12197 100644
3131--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
3132+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3133@@ -11,12 +11,14 @@
3134 #ifndef _ASM_PROC_CACHE_H
3135 #define _ASM_PROC_CACHE_H
3136
3137+#include <linux/const.h>
3138+
3139 /* L1 cache */
3140
3141 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3142 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
3143-#define L1_CACHE_BYTES 16 /* bytes per entry */
3144 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
3145+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3146 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
3147
3148 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3149diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3150index bcb5df2..84fabd2 100644
3151--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3152+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3153@@ -16,13 +16,15 @@
3154 #ifndef _ASM_PROC_CACHE_H
3155 #define _ASM_PROC_CACHE_H
3156
3157+#include <linux/const.h>
3158+
3159 /*
3160 * L1 cache
3161 */
3162 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3163 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
3164-#define L1_CACHE_BYTES 32 /* bytes per entry */
3165 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
3166+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3167 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
3168
3169 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3170diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
3171index 4ce7a01..449202a 100644
3172--- a/arch/openrisc/include/asm/cache.h
3173+++ b/arch/openrisc/include/asm/cache.h
3174@@ -19,11 +19,13 @@
3175 #ifndef __ASM_OPENRISC_CACHE_H
3176 #define __ASM_OPENRISC_CACHE_H
3177
3178+#include <linux/const.h>
3179+
3180 /* FIXME: How can we replace these with values from the CPU...
3181 * they shouldn't be hard-coded!
3182 */
3183
3184-#define L1_CACHE_BYTES 16
3185 #define L1_CACHE_SHIFT 4
3186+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3187
3188 #endif /* __ASM_OPENRISC_CACHE_H */
3189diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
3190index 4054b31..a10c105 100644
3191--- a/arch/parisc/include/asm/atomic.h
3192+++ b/arch/parisc/include/asm/atomic.h
3193@@ -335,6 +335,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3194
3195 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3196
3197+#define atomic64_read_unchecked(v) atomic64_read(v)
3198+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3199+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3200+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3201+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3202+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3203+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3204+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3205+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3206+
3207 #endif /* !CONFIG_64BIT */
3208
3209
3210diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3211index 47f11c7..3420df2 100644
3212--- a/arch/parisc/include/asm/cache.h
3213+++ b/arch/parisc/include/asm/cache.h
3214@@ -5,6 +5,7 @@
3215 #ifndef __ARCH_PARISC_CACHE_H
3216 #define __ARCH_PARISC_CACHE_H
3217
3218+#include <linux/const.h>
3219
3220 /*
3221 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3222@@ -15,13 +16,13 @@
3223 * just ruin performance.
3224 */
3225 #ifdef CONFIG_PA20
3226-#define L1_CACHE_BYTES 64
3227 #define L1_CACHE_SHIFT 6
3228 #else
3229-#define L1_CACHE_BYTES 32
3230 #define L1_CACHE_SHIFT 5
3231 #endif
3232
3233+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3234+
3235 #ifndef __ASSEMBLY__
3236
3237 #define SMP_CACHE_BYTES L1_CACHE_BYTES
fe2de317
MT
3238diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3239index 19f6cb1..6c78cf2 100644
3240--- a/arch/parisc/include/asm/elf.h
3241+++ b/arch/parisc/include/asm/elf.h
3242@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
58c5fc13
MT
3243
3244 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3245
3246+#ifdef CONFIG_PAX_ASLR
3247+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3248+
3249+#define PAX_DELTA_MMAP_LEN 16
3250+#define PAX_DELTA_STACK_LEN 16
3251+#endif
3252+
3253 /* This yields a mask that user programs can use to figure out what
3254 instruction set this CPU supports. This could be done in user space,
3255 but it's not easy, and we've already done it here. */
5e856224
MT
3256diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
3257index fc987a1..6e068ef 100644
3258--- a/arch/parisc/include/asm/pgalloc.h
3259+++ b/arch/parisc/include/asm/pgalloc.h
3260@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3261 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
3262 }
3263
3264+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3265+{
3266+ pgd_populate(mm, pgd, pmd);
3267+}
3268+
3269 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
3270 {
3271 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
3272@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
3273 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
3274 #define pmd_free(mm, x) do { } while (0)
3275 #define pgd_populate(mm, pmd, pte) BUG()
3276+#define pgd_populate_kernel(mm, pmd, pte) BUG()
3277
3278 #endif
3279
fe2de317
MT
3280diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3281index 22dadeb..f6c2be4 100644
3282--- a/arch/parisc/include/asm/pgtable.h
3283+++ b/arch/parisc/include/asm/pgtable.h
15a11c5b 3284@@ -210,6 +210,17 @@ struct vm_area_struct;
58c5fc13
MT
3285 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3286 #define PAGE_COPY PAGE_EXECREAD
3287 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3288+
3289+#ifdef CONFIG_PAX_PAGEEXEC
3290+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3291+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3292+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3293+#else
3294+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3295+# define PAGE_COPY_NOEXEC PAGE_COPY
3296+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3297+#endif
3298+
3299 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
15a11c5b
MT
3300 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
3301 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
fe2de317
MT
3302diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3303index 5e34ccf..672bc9c 100644
3304--- a/arch/parisc/kernel/module.c
3305+++ b/arch/parisc/kernel/module.c
15a11c5b 3306@@ -98,16 +98,38 @@
58c5fc13
MT
3307
3308 /* three functions to determine where in the module core
3309 * or init pieces the location is */
3310+static inline int in_init_rx(struct module *me, void *loc)
3311+{
3312+ return (loc >= me->module_init_rx &&
3313+ loc < (me->module_init_rx + me->init_size_rx));
3314+}
3315+
3316+static inline int in_init_rw(struct module *me, void *loc)
3317+{
3318+ return (loc >= me->module_init_rw &&
3319+ loc < (me->module_init_rw + me->init_size_rw));
3320+}
3321+
3322 static inline int in_init(struct module *me, void *loc)
3323 {
3324- return (loc >= me->module_init &&
3325- loc <= (me->module_init + me->init_size));
3326+ return in_init_rx(me, loc) || in_init_rw(me, loc);
3327+}
3328+
3329+static inline int in_core_rx(struct module *me, void *loc)
3330+{
3331+ return (loc >= me->module_core_rx &&
3332+ loc < (me->module_core_rx + me->core_size_rx));
3333+}
3334+
3335+static inline int in_core_rw(struct module *me, void *loc)
3336+{
3337+ return (loc >= me->module_core_rw &&
3338+ loc < (me->module_core_rw + me->core_size_rw));
3339 }
3340
3341 static inline int in_core(struct module *me, void *loc)
3342 {
3343- return (loc >= me->module_core &&
3344- loc <= (me->module_core + me->core_size));
3345+ return in_core_rx(me, loc) || in_core_rw(me, loc);
3346 }
3347
3348 static inline int in_local(struct module *me, void *loc)
fe2de317 3349@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
58c5fc13
MT
3350 }
3351
3352 /* align things a bit */
3353- me->core_size = ALIGN(me->core_size, 16);
3354- me->arch.got_offset = me->core_size;
3355- me->core_size += gots * sizeof(struct got_entry);
58c5fc13
MT
3356+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3357+ me->arch.got_offset = me->core_size_rw;
3358+ me->core_size_rw += gots * sizeof(struct got_entry);
fe2de317
MT
3359
3360- me->core_size = ALIGN(me->core_size, 16);
3361- me->arch.fdesc_offset = me->core_size;
3362- me->core_size += fdescs * sizeof(Elf_Fdesc);
58c5fc13
MT
3363+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3364+ me->arch.fdesc_offset = me->core_size_rw;
3365+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3366
3367 me->arch.got_max = gots;
3368 me->arch.fdesc_max = fdescs;
fe2de317 3369@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
58c5fc13
MT
3370
3371 BUG_ON(value == 0);
3372
3373- got = me->module_core + me->arch.got_offset;
3374+ got = me->module_core_rw + me->arch.got_offset;
3375 for (i = 0; got[i].addr; i++)
3376 if (got[i].addr == value)
3377 goto out;
fe2de317 3378@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
58c5fc13
MT
3379 #ifdef CONFIG_64BIT
3380 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3381 {
3382- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3383+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3384
3385 if (!value) {
3386 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
fe2de317 3387@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
58c5fc13
MT
3388
3389 /* Create new one */
3390 fdesc->addr = value;
3391- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3392+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3393 return (Elf_Addr)fdesc;
3394 }
3395 #endif /* CONFIG_64BIT */
6e9df6a3 3396@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
58c5fc13
MT
3397
3398 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3399 end = table + sechdrs[me->arch.unwind_section].sh_size;
3400- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3401+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3402
3403 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3404 me->arch.unwind_section, table, end, gp);
fe2de317
MT
3405diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3406index c9b9322..02d8940 100644
3407--- a/arch/parisc/kernel/sys_parisc.c
3408+++ b/arch/parisc/kernel/sys_parisc.c
3409@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
57199397
MT
3410 /* At this point: (!vma || addr < vma->vm_end). */
3411 if (TASK_SIZE - len < addr)
3412 return -ENOMEM;
3413- if (!vma || addr + len <= vma->vm_start)
3414+ if (check_heap_stack_gap(vma, addr, len))
3415 return addr;
3416 addr = vma->vm_end;
3417 }
fe2de317 3418@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
57199397
MT
3419 /* At this point: (!vma || addr < vma->vm_end). */
3420 if (TASK_SIZE - len < addr)
3421 return -ENOMEM;
3422- if (!vma || addr + len <= vma->vm_start)
3423+ if (check_heap_stack_gap(vma, addr, len))
3424 return addr;
3425 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3426 if (addr < vma->vm_end) /* handle wraparound */
fe2de317 3427@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
58c5fc13
MT
3428 if (flags & MAP_FIXED)
3429 return addr;
3430 if (!addr)
3431- addr = TASK_UNMAPPED_BASE;
3432+ addr = current->mm->mmap_base;
3433
3434 if (filp) {
3435 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
fe2de317
MT
3436diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3437index f19e660..414fe24 100644
3438--- a/arch/parisc/kernel/traps.c
3439+++ b/arch/parisc/kernel/traps.c
3440@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
58c5fc13
MT
3441
3442 down_read(&current->mm->mmap_sem);
3443 vma = find_vma(current->mm,regs->iaoq[0]);
3444- if (vma && (regs->iaoq[0] >= vma->vm_start)
3445- && (vma->vm_flags & VM_EXEC)) {
3446-
3447+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3448 fault_address = regs->iaoq[0];
3449 fault_space = regs->iasq[0];
3450
fe2de317
MT
3451diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3452index 18162ce..94de376 100644
3453--- a/arch/parisc/mm/fault.c
3454+++ b/arch/parisc/mm/fault.c
58c5fc13
MT
3455@@ -15,6 +15,7 @@
3456 #include <linux/sched.h>
3457 #include <linux/interrupt.h>
3458 #include <linux/module.h>
3459+#include <linux/unistd.h>
3460
3461 #include <asm/uaccess.h>
3462 #include <asm/traps.h>
fe2de317 3463@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
58c5fc13
MT
3464 static unsigned long
3465 parisc_acctyp(unsigned long code, unsigned int inst)
3466 {
3467- if (code == 6 || code == 16)
3468+ if (code == 6 || code == 7 || code == 16)
3469 return VM_EXEC;
3470
3471 switch (inst & 0xf0000000) {
fe2de317 3472@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
58c5fc13
MT
3473 }
3474 #endif
3475
3476+#ifdef CONFIG_PAX_PAGEEXEC
3477+/*
3478+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3479+ *
3480+ * returns 1 when task should be killed
3481+ * 2 when rt_sigreturn trampoline was detected
3482+ * 3 when unpatched PLT trampoline was detected
3483+ */
3484+static int pax_handle_fetch_fault(struct pt_regs *regs)
3485+{
3486+
3487+#ifdef CONFIG_PAX_EMUPLT
3488+ int err;
3489+
3490+ do { /* PaX: unpatched PLT emulation */
3491+ unsigned int bl, depwi;
3492+
3493+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3494+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3495+
3496+ if (err)
3497+ break;
3498+
3499+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3500+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3501+
3502+ err = get_user(ldw, (unsigned int *)addr);
3503+ err |= get_user(bv, (unsigned int *)(addr+4));
3504+ err |= get_user(ldw2, (unsigned int *)(addr+8));
3505+
3506+ if (err)
3507+ break;
3508+
3509+ if (ldw == 0x0E801096U &&
3510+ bv == 0xEAC0C000U &&
3511+ ldw2 == 0x0E881095U)
3512+ {
3513+ unsigned int resolver, map;
3514+
3515+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3516+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3517+ if (err)
3518+ break;
3519+
3520+ regs->gr[20] = instruction_pointer(regs)+8;
3521+ regs->gr[21] = map;
3522+ regs->gr[22] = resolver;
3523+ regs->iaoq[0] = resolver | 3UL;
3524+ regs->iaoq[1] = regs->iaoq[0] + 4;
3525+ return 3;
3526+ }
3527+ }
3528+ } while (0);
3529+#endif
3530+
3531+#ifdef CONFIG_PAX_EMUTRAMP
3532+
3533+#ifndef CONFIG_PAX_EMUSIGRT
3534+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3535+ return 1;
3536+#endif
3537+
3538+ do { /* PaX: rt_sigreturn emulation */
3539+ unsigned int ldi1, ldi2, bel, nop;
3540+
3541+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3542+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3543+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3544+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3545+
3546+ if (err)
3547+ break;
3548+
3549+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3550+ ldi2 == 0x3414015AU &&
3551+ bel == 0xE4008200U &&
3552+ nop == 0x08000240U)
3553+ {
3554+ regs->gr[25] = (ldi1 & 2) >> 1;
3555+ regs->gr[20] = __NR_rt_sigreturn;
3556+ regs->gr[31] = regs->iaoq[1] + 16;
3557+ regs->sr[0] = regs->iasq[1];
3558+ regs->iaoq[0] = 0x100UL;
3559+ regs->iaoq[1] = regs->iaoq[0] + 4;
3560+ regs->iasq[0] = regs->sr[2];
3561+ regs->iasq[1] = regs->sr[2];
3562+ return 2;
3563+ }
3564+ } while (0);
3565+#endif
3566+
3567+ return 1;
3568+}
3569+
6e9df6a3 3570+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
3571+{
3572+ unsigned long i;
3573+
3574+ printk(KERN_ERR "PAX: bytes at PC: ");
3575+ for (i = 0; i < 5; i++) {
3576+ unsigned int c;
3577+ if (get_user(c, (unsigned int *)pc+i))
3578+ printk(KERN_CONT "???????? ");
3579+ else
3580+ printk(KERN_CONT "%08x ", c);
3581+ }
3582+ printk("\n");
3583+}
3584+#endif
3585+
3586 int fixup_exception(struct pt_regs *regs)
3587 {
3588 const struct exception_table_entry *fix;
3589@@ -192,8 +303,33 @@ good_area:
3590
3591 acc_type = parisc_acctyp(code,regs->iir);
3592
3593- if ((vma->vm_flags & acc_type) != acc_type)
3594+ if ((vma->vm_flags & acc_type) != acc_type) {
3595+
3596+#ifdef CONFIG_PAX_PAGEEXEC
3597+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3598+ (address & ~3UL) == instruction_pointer(regs))
3599+ {
3600+ up_read(&mm->mmap_sem);
3601+ switch (pax_handle_fetch_fault(regs)) {
3602+
3603+#ifdef CONFIG_PAX_EMUPLT
3604+ case 3:
3605+ return;
3606+#endif
3607+
3608+#ifdef CONFIG_PAX_EMUTRAMP
3609+ case 2:
3610+ return;
3611+#endif
3612+
3613+ }
3614+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3615+ do_group_exit(SIGKILL);
3616+ }
3617+#endif
3618+
3619 goto bad_area;
3620+ }
3621
3622 /*
3623 * If for any reason at all we couldn't handle the fault, make
4c928ab7
MT
3624diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3625index 02e41b5..ec6e26c 100644
3626--- a/arch/powerpc/include/asm/atomic.h
3627+++ b/arch/powerpc/include/asm/atomic.h
3628@@ -469,6 +469,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3629
3630 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3631
3632+#define atomic64_read_unchecked(v) atomic64_read(v)
3633+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3634+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3635+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3636+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3637+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3638+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3639+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3640+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3641+
3642 #endif /* __powerpc64__ */
3643
3644 #endif /* __KERNEL__ */
3645diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3646index 4b50941..5605819 100644
3647--- a/arch/powerpc/include/asm/cache.h
3648+++ b/arch/powerpc/include/asm/cache.h
3649@@ -3,6 +3,7 @@
3650
3651 #ifdef __KERNEL__
3652
3653+#include <linux/const.h>
3654
3655 /* bytes per L1 cache line */
3656 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3657@@ -22,7 +23,7 @@
3658 #define L1_CACHE_SHIFT 7
3659 #endif
3660
3661-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3662+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3663
3664 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3665
fe2de317
MT
3666diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3667index 3bf9cca..e7457d0 100644
3668--- a/arch/powerpc/include/asm/elf.h
3669+++ b/arch/powerpc/include/asm/elf.h
3670@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
58c5fc13
MT
3671 the loader. We need to make sure that it is out of the way of the program
3672 that it will "exec", and that there is sufficient room for the brk. */
3673
3674-extern unsigned long randomize_et_dyn(unsigned long base);
3675-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3676+#define ELF_ET_DYN_BASE (0x20000000)
3677+
3678+#ifdef CONFIG_PAX_ASLR
3679+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3680+
3681+#ifdef __powerpc64__
bc901d79
MT
3682+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3683+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
58c5fc13
MT
3684+#else
3685+#define PAX_DELTA_MMAP_LEN 15
3686+#define PAX_DELTA_STACK_LEN 15
3687+#endif
3688+#endif
3689
3690 /*
3691 * Our registers are always unsigned longs, whether we're a 32 bit
fe2de317 3692@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
58c5fc13
MT
3693 (0x7ff >> (PAGE_SHIFT - 12)) : \
3694 (0x3ffff >> (PAGE_SHIFT - 12)))
3695
3696-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3697-#define arch_randomize_brk arch_randomize_brk
3698-
3699 #endif /* __KERNEL__ */
3700
3701 /*
fe2de317
MT
3702diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3703index bca8fdc..61e9580 100644
3704--- a/arch/powerpc/include/asm/kmap_types.h
3705+++ b/arch/powerpc/include/asm/kmap_types.h
57199397 3706@@ -27,6 +27,7 @@ enum km_type {
58c5fc13
MT
3707 KM_PPC_SYNC_PAGE,
3708 KM_PPC_SYNC_ICACHE,
57199397 3709 KM_KDB,
58c5fc13
MT
3710+ KM_CLEARPAGE,
3711 KM_TYPE_NR
3712 };
3713
fe2de317
MT
3714diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3715index d4a7f64..451de1c 100644
3716--- a/arch/powerpc/include/asm/mman.h
3717+++ b/arch/powerpc/include/asm/mman.h
3718@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
15a11c5b
MT
3719 }
3720 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3721
3722-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3723+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3724 {
3725 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3726 }
fe2de317 3727diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
5e856224 3728index f072e97..b436dee 100644
fe2de317
MT
3729--- a/arch/powerpc/include/asm/page.h
3730+++ b/arch/powerpc/include/asm/page.h
5e856224 3731@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
57199397
MT
3732 * and needs to be executable. This means the whole heap ends
3733 * up being executable.
3734 */
3735-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3736- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3737+#define VM_DATA_DEFAULT_FLAGS32 \
3738+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3739+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3740
3741 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3742 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
5e856224 3743@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
57199397
MT
3744 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3745 #endif
3746
3747+#define ktla_ktva(addr) (addr)
3748+#define ktva_ktla(addr) (addr)
3749+
4c928ab7
MT
3750 /*
3751 * Use the top bit of the higher-level page table entries to indicate whether
3752 * the entries we point to contain hugepages. This works because we know that
fe2de317 3753diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
5e856224 3754index fed85e6..da5c71b 100644
fe2de317
MT
3755--- a/arch/powerpc/include/asm/page_64.h
3756+++ b/arch/powerpc/include/asm/page_64.h
5e856224 3757@@ -146,15 +146,18 @@ do { \
fe2de317
MT
3758 * stack by default, so in the absence of a PT_GNU_STACK program header
3759 * we turn execute permission off.
3760 */
3761-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3762- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3763+#define VM_STACK_DEFAULT_FLAGS32 \
3764+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3765+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3766
3767 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3768 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3769
3770+#ifndef CONFIG_PAX_PAGEEXEC
3771 #define VM_STACK_DEFAULT_FLAGS \
3772 (is_32bit_task() ? \
3773 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3774+#endif
3775
3776 #include <asm-generic/getorder.h>
3777
5e856224
MT
3778diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
3779index 292725c..f87ae14 100644
3780--- a/arch/powerpc/include/asm/pgalloc-64.h
3781+++ b/arch/powerpc/include/asm/pgalloc-64.h
3782@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
3783 #ifndef CONFIG_PPC_64K_PAGES
3784
3785 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
3786+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
3787
3788 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
3789 {
3790@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3791 pud_set(pud, (unsigned long)pmd);
3792 }
3793
3794+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3795+{
3796+ pud_populate(mm, pud, pmd);
3797+}
3798+
3799 #define pmd_populate(mm, pmd, pte_page) \
3800 pmd_populate_kernel(mm, pmd, page_address(pte_page))
3801 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
3802@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3803 #else /* CONFIG_PPC_64K_PAGES */
3804
3805 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
3806+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
3807
3808 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
3809 pte_t *pte)
fe2de317 3810diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
5e856224 3811index 2e0e411..7899c68 100644
fe2de317
MT
3812--- a/arch/powerpc/include/asm/pgtable.h
3813+++ b/arch/powerpc/include/asm/pgtable.h
317566c1
MT
3814@@ -2,6 +2,7 @@
3815 #define _ASM_POWERPC_PGTABLE_H
3816 #ifdef __KERNEL__
3817
3818+#include <linux/const.h>
3819 #ifndef __ASSEMBLY__
3820 #include <asm/processor.h> /* For TASK_SIZE */
3821 #include <asm/mmu.h>
fe2de317
MT
3822diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3823index 4aad413..85d86bf 100644
3824--- a/arch/powerpc/include/asm/pte-hash32.h
3825+++ b/arch/powerpc/include/asm/pte-hash32.h
58c5fc13
MT
3826@@ -21,6 +21,7 @@
3827 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3828 #define _PAGE_USER 0x004 /* usermode access allowed */
3829 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
ae4e228f 3830+#define _PAGE_EXEC _PAGE_GUARDED
58c5fc13
MT
3831 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3832 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3833 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
fe2de317 3834diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
5e856224 3835index 7fdc2c0..e47a9b02d3 100644
fe2de317
MT
3836--- a/arch/powerpc/include/asm/reg.h
3837+++ b/arch/powerpc/include/asm/reg.h
6e9df6a3 3838@@ -212,6 +212,7 @@
58c5fc13
MT
3839 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3840 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3841 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3842+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3843 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3844 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3845 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
fe2de317 3846diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
5e856224 3847index c377457..3c69fbc 100644
fe2de317
MT
3848--- a/arch/powerpc/include/asm/system.h
3849+++ b/arch/powerpc/include/asm/system.h
5e856224 3850@@ -539,7 +539,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
bc901d79
MT
3851 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
3852 #endif
3853
3854-extern unsigned long arch_align_stack(unsigned long sp);
3855+#define arch_align_stack(x) ((x) & ~0xfUL)
3856
3857 /* Used in very early kernel initialization. */
3858 extern unsigned long reloc_offset(void);
5e856224
MT
3859diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
3860index 96471494..60ed5a2 100644
3861--- a/arch/powerpc/include/asm/thread_info.h
3862+++ b/arch/powerpc/include/asm/thread_info.h
3863@@ -104,13 +104,15 @@ static inline struct thread_info *current_thread_info(void)
3864 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
3865 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
3866 #define TIF_SINGLESTEP 8 /* singlestepping active */
3867-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
3868 #define TIF_SECCOMP 10 /* secure computing */
3869 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
3870 #define TIF_NOERROR 12 /* Force successful syscall return */
3871 #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
3872 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
3873 #define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
3874+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
3875+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
3876+#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
3877
3878 /* as above, but as bit values */
3879 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
3880@@ -128,8 +130,11 @@ static inline struct thread_info *current_thread_info(void)
3881 #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
3882 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
3883 #define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
3884+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
3885+
3886 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
3887- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
3888+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT \
3889+ _TIF_GRSEC_SETXID)
3890
3891 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
3892 _TIF_NOTIFY_RESUME)
fe2de317
MT
3893diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3894index bd0fb84..a42a14b 100644
3895--- a/arch/powerpc/include/asm/uaccess.h
3896+++ b/arch/powerpc/include/asm/uaccess.h
efbe55a5
MT
3897@@ -13,6 +13,8 @@
3898 #define VERIFY_READ 0
3899 #define VERIFY_WRITE 1
3900
3901+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3902+
3903 /*
3904 * The fs value determines whether argument validity checking should be
3905 * performed or not. If get_fs() == USER_DS, checking is performed, with
3906@@ -327,52 +329,6 @@ do { \
58c5fc13
MT
3907 extern unsigned long __copy_tofrom_user(void __user *to,
3908 const void __user *from, unsigned long size);
3909
3910-#ifndef __powerpc64__
3911-
3912-static inline unsigned long copy_from_user(void *to,
3913- const void __user *from, unsigned long n)
3914-{
3915- unsigned long over;
3916-
3917- if (access_ok(VERIFY_READ, from, n))
3918- return __copy_tofrom_user((__force void __user *)to, from, n);
3919- if ((unsigned long)from < TASK_SIZE) {
3920- over = (unsigned long)from + n - TASK_SIZE;
3921- return __copy_tofrom_user((__force void __user *)to, from,
3922- n - over) + over;
3923- }
3924- return n;
3925-}
3926-
3927-static inline unsigned long copy_to_user(void __user *to,
3928- const void *from, unsigned long n)
3929-{
3930- unsigned long over;
3931-
3932- if (access_ok(VERIFY_WRITE, to, n))
3933- return __copy_tofrom_user(to, (__force void __user *)from, n);
3934- if ((unsigned long)to < TASK_SIZE) {
3935- over = (unsigned long)to + n - TASK_SIZE;
3936- return __copy_tofrom_user(to, (__force void __user *)from,
3937- n - over) + over;
3938- }
3939- return n;
3940-}
3941-
3942-#else /* __powerpc64__ */
3943-
3944-#define __copy_in_user(to, from, size) \
3945- __copy_tofrom_user((to), (from), (size))
3946-
3947-extern unsigned long copy_from_user(void *to, const void __user *from,
3948- unsigned long n);
3949-extern unsigned long copy_to_user(void __user *to, const void *from,
3950- unsigned long n);
3951-extern unsigned long copy_in_user(void __user *to, const void __user *from,
3952- unsigned long n);
3953-
3954-#endif /* __powerpc64__ */
3955-
3956 static inline unsigned long __copy_from_user_inatomic(void *to,
3957 const void __user *from, unsigned long n)
3958 {
fe2de317 3959@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
58c5fc13
MT
3960 if (ret == 0)
3961 return 0;
3962 }
ae4e228f 3963+
58c5fc13
MT
3964+ if (!__builtin_constant_p(n))
3965+ check_object_size(to, n, false);
3966+
3967 return __copy_tofrom_user((__force void __user *)to, from, n);
3968 }
3969
fe2de317 3970@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
58c5fc13
MT
3971 if (ret == 0)
3972 return 0;
3973 }
ae4e228f 3974+
58c5fc13
MT
3975+ if (!__builtin_constant_p(n))
3976+ check_object_size(from, n, true);
3977+
3978 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3979 }
3980
fe2de317 3981@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
58c5fc13
MT
3982 return __copy_to_user_inatomic(to, from, size);
3983 }
3984
3985+#ifndef __powerpc64__
3986+
3987+static inline unsigned long __must_check copy_from_user(void *to,
3988+ const void __user *from, unsigned long n)
3989+{
3990+ unsigned long over;
3991+
ae4e228f 3992+ if ((long)n < 0)
58c5fc13
MT
3993+ return n;
3994+
3995+ if (access_ok(VERIFY_READ, from, n)) {
3996+ if (!__builtin_constant_p(n))
3997+ check_object_size(to, n, false);
58c5fc13
MT
3998+ return __copy_tofrom_user((__force void __user *)to, from, n);
3999+ }
4000+ if ((unsigned long)from < TASK_SIZE) {
4001+ over = (unsigned long)from + n - TASK_SIZE;
4002+ if (!__builtin_constant_p(n - over))
4003+ check_object_size(to, n - over, false);
4004+ return __copy_tofrom_user((__force void __user *)to, from,
4005+ n - over) + over;
4006+ }
4007+ return n;
4008+}
4009+
4010+static inline unsigned long __must_check copy_to_user(void __user *to,
4011+ const void *from, unsigned long n)
4012+{
4013+ unsigned long over;
4014+
ae4e228f 4015+ if ((long)n < 0)
58c5fc13
MT
4016+ return n;
4017+
4018+ if (access_ok(VERIFY_WRITE, to, n)) {
4019+ if (!__builtin_constant_p(n))
4020+ check_object_size(from, n, true);
4021+ return __copy_tofrom_user(to, (__force void __user *)from, n);
4022+ }
4023+ if ((unsigned long)to < TASK_SIZE) {
4024+ over = (unsigned long)to + n - TASK_SIZE;
4025+ if (!__builtin_constant_p(n))
4026+ check_object_size(from, n - over, true);
4027+ return __copy_tofrom_user(to, (__force void __user *)from,
4028+ n - over) + over;
4029+ }
4030+ return n;
4031+}
4032+
4033+#else /* __powerpc64__ */
4034+
4035+#define __copy_in_user(to, from, size) \
4036+ __copy_tofrom_user((to), (from), (size))
4037+
ae4e228f 4038+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
58c5fc13 4039+{
ae4e228f 4040+ if ((long)n < 0 || n > INT_MAX)
58c5fc13
MT
4041+ return n;
4042+
4043+ if (!__builtin_constant_p(n))
4044+ check_object_size(to, n, false);
4045+
4046+ if (likely(access_ok(VERIFY_READ, from, n)))
4047+ n = __copy_from_user(to, from, n);
4048+ else
4049+ memset(to, 0, n);
58c5fc13
MT
4050+ return n;
4051+}
4052+
ae4e228f 4053+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
58c5fc13 4054+{
ae4e228f 4055+ if ((long)n < 0 || n > INT_MAX)
58c5fc13
MT
4056+ return n;
4057+
4058+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
4059+ if (!__builtin_constant_p(n))
4060+ check_object_size(from, n, true);
4061+ n = __copy_to_user(to, from, n);
4062+ }
58c5fc13
MT
4063+ return n;
4064+}
4065+
4066+extern unsigned long copy_in_user(void __user *to, const void __user *from,
4067+ unsigned long n);
4068+
4069+#endif /* __powerpc64__ */
4070+
4071 extern unsigned long __clear_user(void __user *addr, unsigned long size);
4072
4073 static inline unsigned long clear_user(void __user *addr, unsigned long size)
fe2de317
MT
4074diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
4075index 429983c..7af363b 100644
4076--- a/arch/powerpc/kernel/exceptions-64e.S
4077+++ b/arch/powerpc/kernel/exceptions-64e.S
6e9df6a3 4078@@ -587,6 +587,7 @@ storage_fault_common:
ae4e228f
MT
4079 std r14,_DAR(r1)
4080 std r15,_DSISR(r1)
4081 addi r3,r1,STACK_FRAME_OVERHEAD
4082+ bl .save_nvgprs
4083 mr r4,r14
4084 mr r5,r15
4085 ld r14,PACA_EXGEN+EX_R14(r13)
6e9df6a3 4086@@ -596,8 +597,7 @@ storage_fault_common:
ae4e228f
MT
4087 cmpdi r3,0
4088 bne- 1f
4089 b .ret_from_except_lite
4090-1: bl .save_nvgprs
4091- mr r5,r3
4092+1: mr r5,r3
4093 addi r3,r1,STACK_FRAME_OVERHEAD
4094 ld r4,_DAR(r1)
4095 bl .bad_page_fault
fe2de317 4096diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
5e856224 4097index 15c5a4f..22a4000 100644
fe2de317
MT
4098--- a/arch/powerpc/kernel/exceptions-64s.S
4099+++ b/arch/powerpc/kernel/exceptions-64s.S
4c928ab7 4100@@ -1004,10 +1004,10 @@ handle_page_fault:
ae4e228f
MT
4101 11: ld r4,_DAR(r1)
4102 ld r5,_DSISR(r1)
4103 addi r3,r1,STACK_FRAME_OVERHEAD
4104+ bl .save_nvgprs
4105 bl .do_page_fault
4106 cmpdi r3,0
4107 beq+ 13f
4108- bl .save_nvgprs
4109 mr r5,r3
4110 addi r3,r1,STACK_FRAME_OVERHEAD
4111 lwz r4,_DAR(r1)
4c928ab7 4112diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
5e856224 4113index 01e2877..a1ba360 100644
4c928ab7
MT
4114--- a/arch/powerpc/kernel/irq.c
4115+++ b/arch/powerpc/kernel/irq.c
5e856224 4116@@ -560,9 +560,6 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
4c928ab7
MT
4117 host->ops = ops;
4118 host->of_node = of_node_get(of_node);
4119
4120- if (host->ops->match == NULL)
4121- host->ops->match = default_irq_host_match;
4122-
4123 raw_spin_lock_irqsave(&irq_big_lock, flags);
4124
4125 /* If it's a legacy controller, check for duplicates and
5e856224 4126@@ -635,7 +632,12 @@ struct irq_host *irq_find_host(struct device_node *node)
4c928ab7
MT
4127 */
4128 raw_spin_lock_irqsave(&irq_big_lock, flags);
4129 list_for_each_entry(h, &irq_hosts, link)
4130- if (h->ops->match(h, node)) {
4131+ if (h->ops->match) {
4132+ if (h->ops->match(h, node)) {
4133+ found = h;
4134+ break;
4135+ }
4136+ } else if (default_irq_host_match(h, node)) {
4137 found = h;
4138 break;
4139 }
fe2de317
MT
4140diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
4141index 0b6d796..d760ddb 100644
4142--- a/arch/powerpc/kernel/module_32.c
4143+++ b/arch/powerpc/kernel/module_32.c
4144@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
57199397
MT
4145 me->arch.core_plt_section = i;
4146 }
4147 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
4148- printk("Module doesn't contain .plt or .init.plt sections.\n");
4149+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
4150 return -ENOEXEC;
4151 }
4152
fe2de317 4153@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
57199397
MT
4154
4155 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
4156 /* Init, or core PLT? */
4157- if (location >= mod->module_core
4158- && location < mod->module_core + mod->core_size)
4159+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
4160+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
4161 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
4162- else
4163+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
4164+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
4165 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
4166+ else {
4167+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
4168+ return ~0UL;
4169+ }
4170
4171 /* Find this entry, or if that fails, the next avail. entry */
4172 while (entry->jump[0]) {
fe2de317 4173diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
5e856224 4174index d817ab0..b23b18e 100644
fe2de317
MT
4175--- a/arch/powerpc/kernel/process.c
4176+++ b/arch/powerpc/kernel/process.c
5e856224 4177@@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
bc901d79
MT
4178 * Lookup NIP late so we have the best change of getting the
4179 * above info out without failing
4180 */
4181- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
4182- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
4183+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
4184+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
4185 #endif
4186 show_stack(current, (unsigned long *) regs->gpr[1]);
4187 if (!user_mode(regs))
5e856224 4188@@ -1181,10 +1181,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
bc901d79
MT
4189 newsp = stack[0];
4190 ip = stack[STACK_FRAME_LR_SAVE];
4191 if (!firstframe || ip != lr) {
4192- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
4193+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
4194 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4195 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
4196- printk(" (%pS)",
4197+ printk(" (%pA)",
4198 (void *)current->ret_stack[curr_frame].ret);
4199 curr_frame--;
4200 }
5e856224 4201@@ -1204,7 +1204,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
bc901d79
MT
4202 struct pt_regs *regs = (struct pt_regs *)
4203 (sp + STACK_FRAME_OVERHEAD);
4204 lr = regs->link;
4205- printk("--- Exception: %lx at %pS\n LR = %pS\n",
4206+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
4207 regs->trap, (void *)regs->nip, (void *)lr);
4208 firstframe = 1;
4209 }
5e856224 4210@@ -1279,58 +1279,3 @@ void thread_info_cache_init(void)
58c5fc13 4211 }
6892158b 4212
bc901d79
MT
4213 #endif /* THREAD_SHIFT < PAGE_SHIFT */
4214-
4215-unsigned long arch_align_stack(unsigned long sp)
4216-{
4217- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4218- sp -= get_random_int() & ~PAGE_MASK;
4219- return sp & ~0xf;
4220-}
4221-
58c5fc13
MT
4222-static inline unsigned long brk_rnd(void)
4223-{
4224- unsigned long rnd = 0;
4225-
4226- /* 8MB for 32bit, 1GB for 64bit */
4227- if (is_32bit_task())
4228- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4229- else
4230- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4231-
4232- return rnd << PAGE_SHIFT;
4233-}
4234-
4235-unsigned long arch_randomize_brk(struct mm_struct *mm)
4236-{
ae4e228f
MT
4237- unsigned long base = mm->brk;
4238- unsigned long ret;
4239-
4240-#ifdef CONFIG_PPC_STD_MMU_64
4241- /*
4242- * If we are using 1TB segments and we are allowed to randomise
4243- * the heap, we can put it above 1TB so it is backed by a 1TB
4244- * segment. Otherwise the heap will be in the bottom 1TB
4245- * which always uses 256MB segments and this may result in a
4246- * performance penalty.
4247- */
4248- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4249- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4250-#endif
4251-
4252- ret = PAGE_ALIGN(base + brk_rnd());
58c5fc13
MT
4253-
4254- if (ret < mm->brk)
4255- return mm->brk;
4256-
4257- return ret;
4258-}
4259-
4260-unsigned long randomize_et_dyn(unsigned long base)
4261-{
4262- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4263-
4264- if (ret < base)
4265- return base;
4266-
4267- return ret;
4268-}
5e856224
MT
4269diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
4270index 5b43325..94a5bb4 100644
4271--- a/arch/powerpc/kernel/ptrace.c
4272+++ b/arch/powerpc/kernel/ptrace.c
4273@@ -1702,6 +1702,10 @@ long arch_ptrace(struct task_struct *child, long request,
4274 return ret;
4275 }
4276
4277+#ifdef CONFIG_GRKERNSEC_SETXID
4278+extern void gr_delayed_cred_worker(void);
4279+#endif
4280+
4281 /*
4282 * We must return the syscall number to actually look up in the table.
4283 * This can be -1L to skip running any syscall at all.
4284@@ -1712,6 +1716,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
4285
4286 secure_computing(regs->gpr[0]);
4287
4288+#ifdef CONFIG_GRKERNSEC_SETXID
4289+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4290+ gr_delayed_cred_worker();
4291+#endif
4292+
4293 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
4294 tracehook_report_syscall_entry(regs))
4295 /*
4296@@ -1746,6 +1755,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
4297 {
4298 int step;
4299
4300+#ifdef CONFIG_GRKERNSEC_SETXID
4301+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4302+ gr_delayed_cred_worker();
4303+#endif
4304+
4305 audit_syscall_exit(regs);
4306
4307 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
fe2de317 4308diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
4c928ab7 4309index 836a5a1..27289a3 100644
fe2de317
MT
4310--- a/arch/powerpc/kernel/signal_32.c
4311+++ b/arch/powerpc/kernel/signal_32.c
4312@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
58c5fc13
MT
4313 /* Save user registers on the stack */
4314 frame = &rt_sf->uc.uc_mcontext;
4315 addr = frame;
4316- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
4317+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4318 if (save_user_regs(regs, frame, 0, 1))
4319 goto badframe;
4320 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
fe2de317 4321diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
4c928ab7 4322index a50b5ec..547078a 100644
fe2de317
MT
4323--- a/arch/powerpc/kernel/signal_64.c
4324+++ b/arch/powerpc/kernel/signal_64.c
4c928ab7 4325@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
58c5fc13
MT
4326 current->thread.fpscr.val = 0;
4327
4328 /* Set up to return from userspace. */
4329- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
4330+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4331 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
4332 } else {
4333 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
fe2de317 4334diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
5e856224 4335index c091527..5592625 100644
fe2de317
MT
4336--- a/arch/powerpc/kernel/traps.c
4337+++ b/arch/powerpc/kernel/traps.c
5e856224
MT
4338@@ -131,6 +131,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
4339 return flags;
4340 }
15a11c5b
MT
4341
4342+extern void gr_handle_kernel_exploit(void);
4343+
5e856224
MT
4344 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4345 int signr)
15a11c5b 4346 {
5e856224
MT
4347@@ -178,6 +180,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4348 panic("Fatal exception in interrupt");
15a11c5b
MT
4349 if (panic_on_oops)
4350 panic("Fatal exception");
5e856224 4351+
15a11c5b
MT
4352+ gr_handle_kernel_exploit();
4353+
5e856224
MT
4354 do_exit(signr);
4355 }
15a11c5b 4356
fe2de317 4357diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
4c928ab7 4358index 7d14bb6..1305601 100644
fe2de317
MT
4359--- a/arch/powerpc/kernel/vdso.c
4360+++ b/arch/powerpc/kernel/vdso.c
4c928ab7 4361@@ -35,6 +35,7 @@
58c5fc13
MT
4362 #include <asm/firmware.h>
4363 #include <asm/vdso.h>
4364 #include <asm/vdso_datapage.h>
4365+#include <asm/mman.h>
4366
4367 #include "setup.h"
4368
4c928ab7 4369@@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
58c5fc13
MT
4370 vdso_base = VDSO32_MBASE;
4371 #endif
4372
4373- current->mm->context.vdso_base = 0;
4374+ current->mm->context.vdso_base = ~0UL;
4375
4376 /* vDSO has a problem and was disabled, just don't "enable" it for the
4377 * process
4c928ab7 4378@@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
58c5fc13 4379 vdso_base = get_unmapped_area(NULL, vdso_base,
ae4e228f
MT
4380 (vdso_pages << PAGE_SHIFT) +
4381 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
4382- 0, 0);
4383+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
58c5fc13
MT
4384 if (IS_ERR_VALUE(vdso_base)) {
4385 rc = vdso_base;
4386 goto fail_mmapsem;
fe2de317
MT
4387diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4388index 5eea6f3..5d10396 100644
4389--- a/arch/powerpc/lib/usercopy_64.c
4390+++ b/arch/powerpc/lib/usercopy_64.c
58c5fc13
MT
4391@@ -9,22 +9,6 @@
4392 #include <linux/module.h>
4393 #include <asm/uaccess.h>
4394
4395-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4396-{
4397- if (likely(access_ok(VERIFY_READ, from, n)))
4398- n = __copy_from_user(to, from, n);
4399- else
4400- memset(to, 0, n);
4401- return n;
4402-}
4403-
4404-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4405-{
4406- if (likely(access_ok(VERIFY_WRITE, to, n)))
4407- n = __copy_to_user(to, from, n);
4408- return n;
4409-}
4410-
4411 unsigned long copy_in_user(void __user *to, const void __user *from,
4412 unsigned long n)
4413 {
fe2de317 4414@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
58c5fc13
MT
4415 return n;
4416 }
4417
4418-EXPORT_SYMBOL(copy_from_user);
4419-EXPORT_SYMBOL(copy_to_user);
4420 EXPORT_SYMBOL(copy_in_user);
4421
fe2de317 4422diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
5e856224 4423index 2f0d1b0..36fb5cc 100644
fe2de317
MT
4424--- a/arch/powerpc/mm/fault.c
4425+++ b/arch/powerpc/mm/fault.c
15a11c5b 4426@@ -32,6 +32,10 @@
ae4e228f 4427 #include <linux/perf_event.h>
bc901d79 4428 #include <linux/magic.h>
15a11c5b 4429 #include <linux/ratelimit.h>
58c5fc13
MT
4430+#include <linux/slab.h>
4431+#include <linux/pagemap.h>
4432+#include <linux/compiler.h>
4433+#include <linux/unistd.h>
4434
4435 #include <asm/firmware.h>
4436 #include <asm/page.h>
15a11c5b 4437@@ -43,6 +47,7 @@
58c5fc13
MT
4438 #include <asm/tlbflush.h>
4439 #include <asm/siginfo.h>
ae4e228f 4440 #include <mm/mmu_decl.h>
58c5fc13
MT
4441+#include <asm/ptrace.h>
4442
5e856224
MT
4443 #include "icswx.h"
4444
4445@@ -68,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
58c5fc13
MT
4446 }
4447 #endif
4448
4449+#ifdef CONFIG_PAX_PAGEEXEC
4450+/*
4451+ * PaX: decide what to do with offenders (regs->nip = fault address)
4452+ *
4453+ * returns 1 when task should be killed
4454+ */
4455+static int pax_handle_fetch_fault(struct pt_regs *regs)
4456+{
4457+ return 1;
4458+}
4459+
6e9df6a3 4460+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
4461+{
4462+ unsigned long i;
4463+
4464+ printk(KERN_ERR "PAX: bytes at PC: ");
4465+ for (i = 0; i < 5; i++) {
4466+ unsigned int c;
ae4e228f 4467+ if (get_user(c, (unsigned int __user *)pc+i))
58c5fc13
MT
4468+ printk(KERN_CONT "???????? ");
4469+ else
4470+ printk(KERN_CONT "%08x ", c);
4471+ }
4472+ printk("\n");
4473+}
4474+#endif
4475+
4476 /*
4477 * Check whether the instruction at regs->nip is a store using
4478 * an update addressing form which will update r1.
5e856224 4479@@ -138,7 +170,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
58c5fc13
MT
4480 * indicate errors in DSISR but can validly be set in SRR1.
4481 */
4482 if (trap == 0x400)
4483- error_code &= 0x48200000;
4484+ error_code &= 0x58200000;
4485 else
4486 is_write = error_code & DSISR_ISSTORE;
4487 #else
5e856224 4488@@ -276,7 +308,7 @@ good_area:
58c5fc13
MT
4489 * "undefined". Of those that can be set, this is the only
4490 * one which seems bad.
4491 */
4492- if (error_code & 0x10000000)
4493+ if (error_code & DSISR_GUARDED)
4494 /* Guarded storage error. */
4495 goto bad_area;
4496 #endif /* CONFIG_8xx */
5e856224 4497@@ -291,7 +323,7 @@ good_area:
58c5fc13
MT
4498 * processors use the same I/D cache coherency mechanism
4499 * as embedded.
4500 */
4501- if (error_code & DSISR_PROTFAULT)
4502+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4503 goto bad_area;
4504 #endif /* CONFIG_PPC_STD_MMU */
4505
5e856224 4506@@ -360,6 +392,23 @@ bad_area:
58c5fc13
MT
4507 bad_area_nosemaphore:
4508 /* User mode accesses cause a SIGSEGV */
4509 if (user_mode(regs)) {
4510+
4511+#ifdef CONFIG_PAX_PAGEEXEC
4512+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4513+#ifdef CONFIG_PPC_STD_MMU
4514+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4515+#else
4516+ if (is_exec && regs->nip == address) {
4517+#endif
4518+ switch (pax_handle_fetch_fault(regs)) {
4519+ }
4520+
4521+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4522+ do_group_exit(SIGKILL);
4523+ }
4524+ }
4525+#endif
4526+
4527 _exception(SIGSEGV, regs, code, address);
4528 return 0;
4529 }
fe2de317 4530diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
5e856224 4531index 67a42ed..1c7210c 100644
fe2de317
MT
4532--- a/arch/powerpc/mm/mmap_64.c
4533+++ b/arch/powerpc/mm/mmap_64.c
5e856224 4534@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
58c5fc13
MT
4535 */
4536 if (mmap_is_legacy()) {
4537 mm->mmap_base = TASK_UNMAPPED_BASE;
4538+
4539+#ifdef CONFIG_PAX_RANDMMAP
4540+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4541+ mm->mmap_base += mm->delta_mmap;
4542+#endif
4543+
4544 mm->get_unmapped_area = arch_get_unmapped_area;
4545 mm->unmap_area = arch_unmap_area;
4546 } else {
4547 mm->mmap_base = mmap_base();
4548+
4549+#ifdef CONFIG_PAX_RANDMMAP
4550+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4551+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4552+#endif
4553+
4554 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4555 mm->unmap_area = arch_unmap_area_topdown;
4556 }
fe2de317 4557diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4c928ab7 4558index 73709f7..6b90313 100644
fe2de317
MT
4559--- a/arch/powerpc/mm/slice.c
4560+++ b/arch/powerpc/mm/slice.c
4561@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
57199397
MT
4562 if ((mm->task_size - len) < addr)
4563 return 0;
4564 vma = find_vma(mm, addr);
4565- return (!vma || (addr + len) <= vma->vm_start);
4566+ return check_heap_stack_gap(vma, addr, len);
4567 }
4568
6892158b
MT
4569 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4570@@ -256,7 +256,7 @@ full_search:
57199397
MT
4571 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4572 continue;
4573 }
4574- if (!vma || addr + len <= vma->vm_start) {
4575+ if (check_heap_stack_gap(vma, addr, len)) {
4576 /*
4577 * Remember the place where we stopped the search:
4578 */
fe2de317 4579@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
16454cff
MT
4580 }
4581 }
4582
4583- addr = mm->mmap_base;
4584- while (addr > len) {
4585+ if (mm->mmap_base < len)
4586+ addr = -ENOMEM;
4587+ else
4588+ addr = mm->mmap_base - len;
4589+
4590+ while (!IS_ERR_VALUE(addr)) {
4591 /* Go down by chunk size */
4592- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4593+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
4594
4595 /* Check for hit with different page size */
4596 mask = slice_range_to_mask(addr, len);
fe2de317 4597@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
57199397
MT
4598 * return with success:
4599 */
4600 vma = find_vma(mm, addr);
4601- if (!vma || (addr + len) <= vma->vm_start) {
4602+ if (check_heap_stack_gap(vma, addr, len)) {
4603 /* remember the address as a hint for next time */
4604 if (use_cache)
4605 mm->free_area_cache = addr;
fe2de317 4606@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
16454cff
MT
4607 mm->cached_hole_size = vma->vm_start - addr;
4608
4609 /* try just below the current vma->vm_start */
4610- addr = vma->vm_start;
4611+ addr = skip_heap_stack_gap(vma, len);
4612 }
4613
4614 /*
fe2de317 4615@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
58c5fc13
MT
4616 if (fixed && addr > (mm->task_size - len))
4617 return -EINVAL;
4618
4619+#ifdef CONFIG_PAX_RANDMMAP
4620+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4621+ addr = 0;
4622+#endif
4623+
4624 /* If hint, make sure it matches our alignment restrictions */
4625 if (!fixed && addr) {
4626 addr = _ALIGN_UP(addr, 1ul << pshift);
4c928ab7
MT
4627diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4628index 8517d2a..d2738d4 100644
4629--- a/arch/s390/include/asm/atomic.h
4630+++ b/arch/s390/include/asm/atomic.h
4631@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4632 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4633 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4634
4635+#define atomic64_read_unchecked(v) atomic64_read(v)
4636+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4637+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4638+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4639+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4640+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4641+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4642+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4643+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4644+
4645 #define smp_mb__before_atomic_dec() smp_mb()
4646 #define smp_mb__after_atomic_dec() smp_mb()
4647 #define smp_mb__before_atomic_inc() smp_mb()
4648diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4649index 2a30d5a..5e5586f 100644
4650--- a/arch/s390/include/asm/cache.h
4651+++ b/arch/s390/include/asm/cache.h
4652@@ -11,8 +11,10 @@
4653 #ifndef __ARCH_S390_CACHE_H
4654 #define __ARCH_S390_CACHE_H
4655
4656-#define L1_CACHE_BYTES 256
4657+#include <linux/const.h>
4658+
4659 #define L1_CACHE_SHIFT 8
4660+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4661 #define NET_SKB_PAD 32
4662
4663 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
fe2de317 4664diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4c928ab7 4665index 547f1a6..0b22b53 100644
fe2de317
MT
4666--- a/arch/s390/include/asm/elf.h
4667+++ b/arch/s390/include/asm/elf.h
16454cff
MT
4668@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
4669 the loader. We need to make sure that it is out of the way of the program
ae4e228f 4670 that it will "exec", and that there is sufficient room for the brk. */
58c5fc13 4671
16454cff
MT
4672-extern unsigned long randomize_et_dyn(unsigned long base);
4673-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4674+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4675+
ae4e228f
MT
4676+#ifdef CONFIG_PAX_ASLR
4677+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
58c5fc13 4678+
4c928ab7
MT
4679+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4680+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
ae4e228f 4681+#endif
16454cff 4682
ae4e228f
MT
4683 /* This yields a mask that user programs can use to figure out what
4684 instruction set this CPU supports. */
6e9df6a3 4685@@ -211,7 +217,4 @@ struct linux_binprm;
16454cff
MT
4686 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4687 int arch_setup_additional_pages(struct linux_binprm *, int);
4688
4689-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4690-#define arch_randomize_brk arch_randomize_brk
4691-
4692 #endif
fe2de317 4693diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
5e856224 4694index d73cc6b..1a296ad 100644
fe2de317
MT
4695--- a/arch/s390/include/asm/system.h
4696+++ b/arch/s390/include/asm/system.h
5e856224 4697@@ -260,7 +260,7 @@ extern void (*_machine_restart)(char *command);
16454cff
MT
4698 extern void (*_machine_halt)(void);
4699 extern void (*_machine_power_off)(void);
58c5fc13 4700
16454cff
MT
4701-extern unsigned long arch_align_stack(unsigned long sp);
4702+#define arch_align_stack(x) ((x) & ~0xfUL)
4703
4704 static inline int tprot(unsigned long addr)
4705 {
fe2de317
MT
4706diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4707index 2b23885..e136e31 100644
4708--- a/arch/s390/include/asm/uaccess.h
4709+++ b/arch/s390/include/asm/uaccess.h
15a11c5b 4710@@ -235,6 +235,10 @@ static inline unsigned long __must_check
58c5fc13
MT
4711 copy_to_user(void __user *to, const void *from, unsigned long n)
4712 {
4713 might_fault();
4714+
4715+ if ((long)n < 0)
4716+ return n;
4717+
4718 if (access_ok(VERIFY_WRITE, to, n))
4719 n = __copy_to_user(to, from, n);
4720 return n;
fe2de317 4721@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
58c5fc13
MT
4722 static inline unsigned long __must_check
4723 __copy_from_user(void *to, const void __user *from, unsigned long n)
4724 {
4725+ if ((long)n < 0)
4726+ return n;
4727+
4728 if (__builtin_constant_p(n) && (n <= 256))
4729 return uaccess.copy_from_user_small(n, from, to);
4730 else
fe2de317 4731@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
df50ba0c
MT
4732 unsigned int sz = __compiletime_object_size(to);
4733
58c5fc13
MT
4734 might_fault();
4735+
4736+ if ((long)n < 0)
4737+ return n;
4738+
df50ba0c
MT
4739 if (unlikely(sz != -1 && sz < n)) {
4740 copy_from_user_overflow();
4741 return n;
fe2de317
MT
4742diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4743index dfcb343..eda788a 100644
4744--- a/arch/s390/kernel/module.c
4745+++ b/arch/s390/kernel/module.c
4746@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
58c5fc13
MT
4747
4748 /* Increase core size by size of got & plt and set start
4749 offsets for got and plt. */
4750- me->core_size = ALIGN(me->core_size, 4);
4751- me->arch.got_offset = me->core_size;
4752- me->core_size += me->arch.got_size;
4753- me->arch.plt_offset = me->core_size;
4754- me->core_size += me->arch.plt_size;
4755+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
4756+ me->arch.got_offset = me->core_size_rw;
4757+ me->core_size_rw += me->arch.got_size;
4758+ me->arch.plt_offset = me->core_size_rx;
4759+ me->core_size_rx += me->arch.plt_size;
4760 return 0;
4761 }
4762
fe2de317 4763@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
58c5fc13
MT
4764 if (info->got_initialized == 0) {
4765 Elf_Addr *gotent;
4766
4767- gotent = me->module_core + me->arch.got_offset +
4768+ gotent = me->module_core_rw + me->arch.got_offset +
4769 info->got_offset;
4770 *gotent = val;
4771 info->got_initialized = 1;
fe2de317 4772@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
58c5fc13
MT
4773 else if (r_type == R_390_GOTENT ||
4774 r_type == R_390_GOTPLTENT)
4775 *(unsigned int *) loc =
4776- (val + (Elf_Addr) me->module_core - loc) >> 1;
4777+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4778 else if (r_type == R_390_GOT64 ||
4779 r_type == R_390_GOTPLT64)
4780 *(unsigned long *) loc = val;
fe2de317 4781@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
58c5fc13
MT
4782 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4783 if (info->plt_initialized == 0) {
4784 unsigned int *ip;
4785- ip = me->module_core + me->arch.plt_offset +
4786+ ip = me->module_core_rx + me->arch.plt_offset +
4787 info->plt_offset;
4788 #ifndef CONFIG_64BIT
4789 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
fe2de317 4790@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
58c5fc13
MT
4791 val - loc + 0xffffUL < 0x1ffffeUL) ||
4792 (r_type == R_390_PLT32DBL &&
4793 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4794- val = (Elf_Addr) me->module_core +
4795+ val = (Elf_Addr) me->module_core_rx +
4796 me->arch.plt_offset +
4797 info->plt_offset;
4798 val += rela->r_addend - loc;
fe2de317 4799@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
58c5fc13
MT
4800 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4801 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4802 val = val + rela->r_addend -
4803- ((Elf_Addr) me->module_core + me->arch.got_offset);
4804+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4805 if (r_type == R_390_GOTOFF16)
4806 *(unsigned short *) loc = val;
4807 else if (r_type == R_390_GOTOFF32)
fe2de317 4808@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
58c5fc13
MT
4809 break;
4810 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4811 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4812- val = (Elf_Addr) me->module_core + me->arch.got_offset +
4813+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4814 rela->r_addend - loc;
4815 if (r_type == R_390_GOTPC)
4816 *(unsigned int *) loc = val;
fe2de317 4817diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
5e856224 4818index e795933..b32563c 100644
fe2de317
MT
4819--- a/arch/s390/kernel/process.c
4820+++ b/arch/s390/kernel/process.c
5e856224 4821@@ -323,39 +323,3 @@ unsigned long get_wchan(struct task_struct *p)
16454cff
MT
4822 }
4823 return 0;
4824 }
4825-
4826-unsigned long arch_align_stack(unsigned long sp)
4827-{
4828- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4829- sp -= get_random_int() & ~PAGE_MASK;
4830- return sp & ~0xf;
4831-}
4832-
4833-static inline unsigned long brk_rnd(void)
4834-{
4835- /* 8MB for 32bit, 1GB for 64bit */
4836- if (is_32bit_task())
4837- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4838- else
4839- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4840-}
4841-
4842-unsigned long arch_randomize_brk(struct mm_struct *mm)
4843-{
4844- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4845-
4846- if (ret < mm->brk)
4847- return mm->brk;
4848- return ret;
4849-}
4850-
4851-unsigned long randomize_et_dyn(unsigned long base)
4852-{
4853- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4854-
4855- if (!(current->flags & PF_RANDOMIZE))
4856- return base;
4857- if (ret < base)
4858- return base;
4859- return ret;
4860-}
fe2de317 4861diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4c928ab7 4862index a0155c0..34cc491 100644
fe2de317
MT
4863--- a/arch/s390/mm/mmap.c
4864+++ b/arch/s390/mm/mmap.c
4c928ab7 4865@@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
ae4e228f
MT
4866 */
4867 if (mmap_is_legacy()) {
4868 mm->mmap_base = TASK_UNMAPPED_BASE;
4869+
4870+#ifdef CONFIG_PAX_RANDMMAP
4871+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4872+ mm->mmap_base += mm->delta_mmap;
4873+#endif
4874+
4875 mm->get_unmapped_area = arch_get_unmapped_area;
4876 mm->unmap_area = arch_unmap_area;
4877 } else {
4878 mm->mmap_base = mmap_base();
4879+
4880+#ifdef CONFIG_PAX_RANDMMAP
4881+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4882+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4883+#endif
4884+
4885 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4886 mm->unmap_area = arch_unmap_area_topdown;
4887 }
4c928ab7 4888@@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
ae4e228f
MT
4889 */
4890 if (mmap_is_legacy()) {
4891 mm->mmap_base = TASK_UNMAPPED_BASE;
4892+
4893+#ifdef CONFIG_PAX_RANDMMAP
4894+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4895+ mm->mmap_base += mm->delta_mmap;
4896+#endif
4897+
4898 mm->get_unmapped_area = s390_get_unmapped_area;
4899 mm->unmap_area = arch_unmap_area;
4900 } else {
4901 mm->mmap_base = mmap_base();
4902+
4903+#ifdef CONFIG_PAX_RANDMMAP
4904+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4905+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4906+#endif
4907+
4908 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4909 mm->unmap_area = arch_unmap_area_topdown;
4910 }
4c928ab7
MT
4911diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4912index ae3d59f..f65f075 100644
4913--- a/arch/score/include/asm/cache.h
4914+++ b/arch/score/include/asm/cache.h
4915@@ -1,7 +1,9 @@
4916 #ifndef _ASM_SCORE_CACHE_H
4917 #define _ASM_SCORE_CACHE_H
4918
4919+#include <linux/const.h>
4920+
4921 #define L1_CACHE_SHIFT 4
4922-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4923+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4924
4925 #endif /* _ASM_SCORE_CACHE_H */
fe2de317
MT
4926diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4927index 589d5c7..669e274 100644
4928--- a/arch/score/include/asm/system.h
4929+++ b/arch/score/include/asm/system.h
bc901d79
MT
4930@@ -17,7 +17,7 @@ do { \
4931 #define finish_arch_switch(prev) do {} while (0)
4932
4933 typedef void (*vi_handler_t)(void);
4934-extern unsigned long arch_align_stack(unsigned long sp);
4935+#define arch_align_stack(x) (x)
4936
4937 #define mb() barrier()
4938 #define rmb() barrier()
fe2de317
MT
4939diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4940index 25d0803..d6c8e36 100644
4941--- a/arch/score/kernel/process.c
4942+++ b/arch/score/kernel/process.c
4943@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
bc901d79
MT
4944
4945 return task_pt_regs(task)->cp0_epc;
4946 }
4947-
4948-unsigned long arch_align_stack(unsigned long sp)
4949-{
4950- return sp;
4951-}
4c928ab7
MT
4952diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
4953index ef9e555..331bd29 100644
4954--- a/arch/sh/include/asm/cache.h
4955+++ b/arch/sh/include/asm/cache.h
4956@@ -9,10 +9,11 @@
4957 #define __ASM_SH_CACHE_H
4958 #ifdef __KERNEL__
4959
4960+#include <linux/const.h>
4961 #include <linux/init.h>
4962 #include <cpu/cache.h>
4963
4964-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4965+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4966
4967 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4968
fe2de317
MT
4969diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4970index afeb710..d1d1289 100644
4971--- a/arch/sh/mm/mmap.c
4972+++ b/arch/sh/mm/mmap.c
4973@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
57199397
MT
4974 addr = PAGE_ALIGN(addr);
4975
4976 vma = find_vma(mm, addr);
4977- if (TASK_SIZE - len >= addr &&
4978- (!vma || addr + len <= vma->vm_start))
4979+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4980 return addr;
4981 }
efbe55a5 4982
57199397
MT
4983@@ -106,7 +105,7 @@ full_search:
4984 }
4985 return -ENOMEM;
4986 }
4987- if (likely(!vma || addr + len <= vma->vm_start)) {
4988+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4989 /*
4990 * Remember the place where we stopped the search:
4991 */
fe2de317 4992@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
4993 addr = PAGE_ALIGN(addr);
4994
4995 vma = find_vma(mm, addr);
4996- if (TASK_SIZE - len >= addr &&
4997- (!vma || addr + len <= vma->vm_start))
4998+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4999 return addr;
5000 }
5001
fe2de317 5002@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
5003 /* make sure it can fit in the remaining address space */
5004 if (likely(addr > len)) {
5005 vma = find_vma(mm, addr-len);
5006- if (!vma || addr <= vma->vm_start) {
5007+ if (check_heap_stack_gap(vma, addr - len, len)) {
5008 /* remember the address as a hint for next time */
5009 return (mm->free_area_cache = addr-len);
5010 }
fe2de317 5011@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
5012 if (unlikely(mm->mmap_base < len))
5013 goto bottomup;
5014
5015- addr = mm->mmap_base-len;
5016- if (do_colour_align)
5017- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5018+ addr = mm->mmap_base - len;
5019
5020 do {
5021+ if (do_colour_align)
5022+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5023 /*
5024 * Lookup failure means no vma is above this address,
5025 * else if new region fits below vma->vm_start,
57199397
MT
5026 * return with success:
5027 */
5028 vma = find_vma(mm, addr);
5029- if (likely(!vma || addr+len <= vma->vm_start)) {
5030+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5031 /* remember the address as a hint for next time */
5032 return (mm->free_area_cache = addr);
5033 }
fe2de317 5034@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
5035 mm->cached_hole_size = vma->vm_start - addr;
5036
5037 /* try just below the current vma->vm_start */
5038- addr = vma->vm_start-len;
5039- if (do_colour_align)
5040- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5041- } while (likely(len < vma->vm_start));
5042+ addr = skip_heap_stack_gap(vma, len);
5043+ } while (!IS_ERR_VALUE(addr));
5044
5045 bottomup:
5046 /*
fe2de317 5047diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4c928ab7 5048index eddcfb3..b117d90 100644
fe2de317
MT
5049--- a/arch/sparc/Makefile
5050+++ b/arch/sparc/Makefile
5051@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
5052 # Export what is needed by arch/sparc/boot/Makefile
5053 export VMLINUX_INIT VMLINUX_MAIN
5054 VMLINUX_INIT := $(head-y) $(init-y)
5055-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5056+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5057 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5058 VMLINUX_MAIN += $(drivers-y) $(net-y)
5059
5060diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
5061index 9f421df..b81fc12 100644
5062--- a/arch/sparc/include/asm/atomic_64.h
5063+++ b/arch/sparc/include/asm/atomic_64.h
57199397 5064@@ -14,18 +14,40 @@
58c5fc13
MT
5065 #define ATOMIC64_INIT(i) { (i) }
5066
57199397 5067 #define atomic_read(v) (*(volatile int *)&(v)->counter)
ae4e228f
MT
5068+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5069+{
5070+ return v->counter;
5071+}
57199397 5072 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
ae4e228f
MT
5073+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5074+{
5075+ return v->counter;
5076+}
58c5fc13
MT
5077
5078 #define atomic_set(v, i) (((v)->counter) = i)
ae4e228f
MT
5079+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5080+{
5081+ v->counter = i;
5082+}
58c5fc13 5083 #define atomic64_set(v, i) (((v)->counter) = i)
ae4e228f
MT
5084+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5085+{
5086+ v->counter = i;
5087+}
58c5fc13
MT
5088
5089 extern void atomic_add(int, atomic_t *);
5090+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
57199397
MT
5091 extern void atomic64_add(long, atomic64_t *);
5092+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
58c5fc13
MT
5093 extern void atomic_sub(int, atomic_t *);
5094+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
57199397
MT
5095 extern void atomic64_sub(long, atomic64_t *);
5096+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
58c5fc13
MT
5097
5098 extern int atomic_add_ret(int, atomic_t *);
57199397
MT
5099+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
5100 extern long atomic64_add_ret(long, atomic64_t *);
5101+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
ae4e228f 5102 extern int atomic_sub_ret(int, atomic_t *);
57199397 5103 extern long atomic64_sub_ret(long, atomic64_t *);
ae4e228f 5104
fe2de317 5105@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
57199397 5106 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
ae4e228f
MT
5107
5108 #define atomic_inc_return(v) atomic_add_ret(1, v)
57199397
MT
5109+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5110+{
5111+ return atomic_add_ret_unchecked(1, v);
5112+}
ae4e228f 5113 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
57199397
MT
5114+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5115+{
5116+ return atomic64_add_ret_unchecked(1, v);
5117+}
ae4e228f
MT
5118
5119 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
5120 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
6892158b
MT
5121
5122 #define atomic_add_return(i, v) atomic_add_ret(i, v)
5123+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5124+{
5125+ return atomic_add_ret_unchecked(i, v);
5126+}
5127 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
15a11c5b
MT
5128+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
5129+{
5130+ return atomic64_add_ret_unchecked(i, v);
5131+}
6892158b
MT
5132
5133 /*
15a11c5b 5134 * atomic_inc_and_test - increment and test
fe2de317 5135@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8308f9c9
MT
5136 * other cases.
5137 */
5138 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
15a11c5b
MT
5139+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5140+{
5141+ return atomic_inc_return_unchecked(v) == 0;
5142+}
8308f9c9
MT
5143 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
5144
5145 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
fe2de317 5146@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
58c5fc13
MT
5147 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
5148
5149 #define atomic_inc(v) atomic_add(1, v)
ae4e228f
MT
5150+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
5151+{
5152+ atomic_add_unchecked(1, v);
5153+}
58c5fc13 5154 #define atomic64_inc(v) atomic64_add(1, v)
ae4e228f
MT
5155+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
5156+{
5157+ atomic64_add_unchecked(1, v);
5158+}
58c5fc13
MT
5159
5160 #define atomic_dec(v) atomic_sub(1, v)
df50ba0c
MT
5161+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
5162+{
5163+ atomic_sub_unchecked(1, v);
5164+}
ae4e228f 5165 #define atomic64_dec(v) atomic64_sub(1, v)
df50ba0c
MT
5166+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
5167+{
5168+ atomic64_sub_unchecked(1, v);
5169+}
5170
5171 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
5172 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
8308f9c9
MT
5173
5174 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
15a11c5b
MT
5175+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
5176+{
5177+ return cmpxchg(&v->counter, old, new);
5178+}
8308f9c9 5179 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
15a11c5b
MT
5180+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5181+{
5182+ return xchg(&v->counter, new);
5183+}
58c5fc13 5184
6e9df6a3 5185 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
58c5fc13
MT
5186 {
5187- int c, old;
5188+ int c, old, new;
5189 c = atomic_read(v);
5190 for (;;) {
5191- if (unlikely(c == (u)))
5192+ if (unlikely(c == u))
5193 break;
5194- old = atomic_cmpxchg((v), c, c + (a));
5195+
5196+ asm volatile("addcc %2, %0, %0\n"
5197+
5198+#ifdef CONFIG_PAX_REFCOUNT
5199+ "tvs %%icc, 6\n"
5200+#endif
5201+
5202+ : "=r" (new)
5203+ : "0" (c), "ir" (a)
5204+ : "cc");
5205+
5206+ old = atomic_cmpxchg(v, c, new);
5207 if (likely(old == c))
5208 break;
5209 c = old;
fe2de317 5210@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15a11c5b
MT
5211 #define atomic64_cmpxchg(v, o, n) \
5212 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5213 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5214+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5215+{
5216+ return xchg(&v->counter, new);
5217+}
58c5fc13 5218
57199397 5219 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
58c5fc13
MT
5220 {
5221- long c, old;
5222+ long c, old, new;
5223 c = atomic64_read(v);
5224 for (;;) {
5225- if (unlikely(c == (u)))
5226+ if (unlikely(c == u))
5227 break;
5228- old = atomic64_cmpxchg((v), c, c + (a));
5229+
5230+ asm volatile("addcc %2, %0, %0\n"
5231+
5232+#ifdef CONFIG_PAX_REFCOUNT
5233+ "tvs %%xcc, 6\n"
5234+#endif
5235+
5236+ : "=r" (new)
5237+ : "0" (c), "ir" (a)
5238+ : "cc");
5239+
5240+ old = atomic64_cmpxchg(v, c, new);
5241 if (likely(old == c))
5242 break;
5243 c = old;
5244 }
5245- return c != (u);
5246+ return c != u;
5247 }
5248
5249 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
fe2de317 5250diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4c928ab7 5251index 69358b5..9d0d492 100644
fe2de317
MT
5252--- a/arch/sparc/include/asm/cache.h
5253+++ b/arch/sparc/include/asm/cache.h
4c928ab7
MT
5254@@ -7,10 +7,12 @@
5255 #ifndef _SPARC_CACHE_H
5256 #define _SPARC_CACHE_H
5257
5258+#include <linux/const.h>
5259+
66a7e928
MT
5260 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
5261
5262 #define L1_CACHE_SHIFT 5
5263-#define L1_CACHE_BYTES 32
4c928ab7 5264+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
66a7e928
MT
5265
5266 #ifdef CONFIG_SPARC32
5267 #define SMP_CACHE_BYTES_SHIFT 5
fe2de317
MT
5268diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5269index 4269ca6..e3da77f 100644
5270--- a/arch/sparc/include/asm/elf_32.h
5271+++ b/arch/sparc/include/asm/elf_32.h
ae4e228f 5272@@ -114,6 +114,13 @@ typedef struct {
58c5fc13
MT
5273
5274 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5275
5276+#ifdef CONFIG_PAX_ASLR
5277+#define PAX_ELF_ET_DYN_BASE 0x10000UL
5278+
5279+#define PAX_DELTA_MMAP_LEN 16
5280+#define PAX_DELTA_STACK_LEN 16
5281+#endif
5282+
5283 /* This yields a mask that user programs can use to figure out what
5284 instruction set this cpu supports. This can NOT be done in userspace
5285 on Sparc. */
fe2de317
MT
5286diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5287index 7df8b7f..4946269 100644
5288--- a/arch/sparc/include/asm/elf_64.h
5289+++ b/arch/sparc/include/asm/elf_64.h
15a11c5b 5290@@ -180,6 +180,13 @@ typedef struct {
58c5fc13
MT
5291 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5292 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5293
5294+#ifdef CONFIG_PAX_ASLR
5295+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5296+
ae4e228f
MT
5297+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5298+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
58c5fc13 5299+#endif
15a11c5b
MT
5300+
5301 extern unsigned long sparc64_elf_hwcap;
5302 #define ELF_HWCAP sparc64_elf_hwcap
58c5fc13 5303
5e856224
MT
5304diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
5305index ca2b344..c6084f89 100644
5306--- a/arch/sparc/include/asm/pgalloc_32.h
5307+++ b/arch/sparc/include/asm/pgalloc_32.h
5308@@ -37,6 +37,7 @@ BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
5309 BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
5310 #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
5311 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
5312+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
4c928ab7 5313
5e856224
MT
5314 BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
5315 #define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
5316diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
5317index 40b2d7a..22a665b 100644
5318--- a/arch/sparc/include/asm/pgalloc_64.h
5319+++ b/arch/sparc/include/asm/pgalloc_64.h
5320@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
5321 }
4c928ab7 5322
5e856224
MT
5323 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
5324+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
5325
5326 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5327 {
fe2de317 5328diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4c928ab7 5329index a790cc6..091ed94 100644
fe2de317
MT
5330--- a/arch/sparc/include/asm/pgtable_32.h
5331+++ b/arch/sparc/include/asm/pgtable_32.h
15a11c5b 5332@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
58c5fc13
MT
5333 BTFIXUPDEF_INT(page_none)
5334 BTFIXUPDEF_INT(page_copy)
5335 BTFIXUPDEF_INT(page_readonly)
5336+
5337+#ifdef CONFIG_PAX_PAGEEXEC
5338+BTFIXUPDEF_INT(page_shared_noexec)
5339+BTFIXUPDEF_INT(page_copy_noexec)
5340+BTFIXUPDEF_INT(page_readonly_noexec)
5341+#endif
5342+
5343 BTFIXUPDEF_INT(page_kernel)
5344
5345 #define PMD_SHIFT SUN4C_PMD_SHIFT
15a11c5b 5346@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
58c5fc13
MT
5347 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
5348 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
5349
5350+#ifdef CONFIG_PAX_PAGEEXEC
5351+extern pgprot_t PAGE_SHARED_NOEXEC;
5352+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
5353+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
5354+#else
5355+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5356+# define PAGE_COPY_NOEXEC PAGE_COPY
5357+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5358+#endif
5359+
5360 extern unsigned long page_kernel;
5361
5362 #ifdef MODULE
fe2de317
MT
5363diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5364index f6ae2b2..b03ffc7 100644
5365--- a/arch/sparc/include/asm/pgtsrmmu.h
5366+++ b/arch/sparc/include/asm/pgtsrmmu.h
58c5fc13
MT
5367@@ -115,6 +115,13 @@
5368 SRMMU_EXEC | SRMMU_REF)
5369 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5370 SRMMU_EXEC | SRMMU_REF)
5371+
5372+#ifdef CONFIG_PAX_PAGEEXEC
5373+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5374+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5375+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5376+#endif
5377+
5378 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5379 SRMMU_DIRTY | SRMMU_REF)
5380
fe2de317
MT
5381diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5382index 9689176..63c18ea 100644
5383--- a/arch/sparc/include/asm/spinlock_64.h
5384+++ b/arch/sparc/include/asm/spinlock_64.h
5385@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
8308f9c9
MT
5386
5387 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5388
5389-static void inline arch_read_lock(arch_rwlock_t *lock)
5390+static inline void arch_read_lock(arch_rwlock_t *lock)
5391 {
5392 unsigned long tmp1, tmp2;
5393
58c5fc13
MT
5394 __asm__ __volatile__ (
5395 "1: ldsw [%2], %0\n"
5396 " brlz,pn %0, 2f\n"
5397-"4: add %0, 1, %1\n"
5398+"4: addcc %0, 1, %1\n"
5399+
5400+#ifdef CONFIG_PAX_REFCOUNT
5401+" tvs %%icc, 6\n"
5402+#endif
5403+
5404 " cas [%2], %0, %1\n"
5405 " cmp %0, %1\n"
5406 " bne,pn %%icc, 1b\n"
fe2de317 5407@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
58c5fc13
MT
5408 " .previous"
5409 : "=&r" (tmp1), "=&r" (tmp2)
5410 : "r" (lock)
5411- : "memory");
5412+ : "memory", "cc");
5413 }
5414
8308f9c9
MT
5415-static int inline arch_read_trylock(arch_rwlock_t *lock)
5416+static inline int arch_read_trylock(arch_rwlock_t *lock)
5417 {
5418 int tmp1, tmp2;
5419
fe2de317 5420@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
58c5fc13
MT
5421 "1: ldsw [%2], %0\n"
5422 " brlz,a,pn %0, 2f\n"
5423 " mov 0, %0\n"
5424-" add %0, 1, %1\n"
5425+" addcc %0, 1, %1\n"
5426+
5427+#ifdef CONFIG_PAX_REFCOUNT
5428+" tvs %%icc, 6\n"
5429+#endif
5430+
5431 " cas [%2], %0, %1\n"
5432 " cmp %0, %1\n"
5433 " bne,pn %%icc, 1b\n"
fe2de317 5434@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8308f9c9
MT
5435 return tmp1;
5436 }
5437
5438-static void inline arch_read_unlock(arch_rwlock_t *lock)
5439+static inline void arch_read_unlock(arch_rwlock_t *lock)
5440 {
5441 unsigned long tmp1, tmp2;
58c5fc13
MT
5442
5443 __asm__ __volatile__(
5444 "1: lduw [%2], %0\n"
5445-" sub %0, 1, %1\n"
5446+" subcc %0, 1, %1\n"
5447+
5448+#ifdef CONFIG_PAX_REFCOUNT
ae4e228f 5449+" tvs %%icc, 6\n"
58c5fc13
MT
5450+#endif
5451+
5452 " cas [%2], %0, %1\n"
5453 " cmp %0, %1\n"
5454 " bne,pn %%xcc, 1b\n"
fe2de317 5455@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
8308f9c9
MT
5456 : "memory");
5457 }
5458
5459-static void inline arch_write_lock(arch_rwlock_t *lock)
5460+static inline void arch_write_lock(arch_rwlock_t *lock)
5461 {
5462 unsigned long mask, tmp1, tmp2;
5463
fe2de317 5464@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
8308f9c9
MT
5465 : "memory");
5466 }
5467
5468-static void inline arch_write_unlock(arch_rwlock_t *lock)
5469+static inline void arch_write_unlock(arch_rwlock_t *lock)
5470 {
5471 __asm__ __volatile__(
5472 " stw %%g0, [%0]"
fe2de317 5473@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
8308f9c9
MT
5474 : "memory");
5475 }
5476
5477-static int inline arch_write_trylock(arch_rwlock_t *lock)
5478+static inline int arch_write_trylock(arch_rwlock_t *lock)
5479 {
5480 unsigned long mask, tmp1, tmp2, result;
5481
fe2de317 5482diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5e856224 5483index c2a1080..21ed218 100644
fe2de317
MT
5484--- a/arch/sparc/include/asm/thread_info_32.h
5485+++ b/arch/sparc/include/asm/thread_info_32.h
15a11c5b
MT
5486@@ -50,6 +50,8 @@ struct thread_info {
5487 unsigned long w_saved;
5488
5489 struct restart_block restart_block;
5490+
5491+ unsigned long lowest_stack;
5492 };
5493
5494 /*
fe2de317 5495diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5e856224 5496index 01d057f..13a7d2f 100644
fe2de317
MT
5497--- a/arch/sparc/include/asm/thread_info_64.h
5498+++ b/arch/sparc/include/asm/thread_info_64.h
15a11c5b
MT
5499@@ -63,6 +63,8 @@ struct thread_info {
5500 struct pt_regs *kern_una_regs;
5501 unsigned int kern_una_insn;
5502
5503+ unsigned long lowest_stack;
5504+
5505 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5506 };
5507
5e856224
MT
5508@@ -214,10 +216,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
5509 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
5510 /* flag bit 6 is available */
5511 #define TIF_32BIT 7 /* 32-bit binary */
5512-/* flag bit 8 is available */
5513+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
5514 #define TIF_SECCOMP 9 /* secure computing */
5515 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
5516 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
5517+
5518 /* NOTE: Thread flags >= 12 should be ones we have no interest
5519 * in using in assembly, else we can't use the mask as
5520 * an immediate value in instructions such as andcc.
5521@@ -236,12 +239,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
5522 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
5523 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
5524 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
5525+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5526
5527 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
5528 _TIF_DO_NOTIFY_RESUME_MASK | \
5529 _TIF_NEED_RESCHED)
5530 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
5531
5532+#define _TIF_WORK_SYSCALL \
5533+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
5534+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
5535+
5536+
5537 /*
5538 * Thread-synchronous status.
5539 *
fe2de317
MT
5540diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5541index e88fbe5..96b0ce5 100644
5542--- a/arch/sparc/include/asm/uaccess.h
5543+++ b/arch/sparc/include/asm/uaccess.h
5544@@ -1,5 +1,13 @@
5545 #ifndef ___ASM_SPARC_UACCESS_H
5546 #define ___ASM_SPARC_UACCESS_H
5547+
5548+#ifdef __KERNEL__
5549+#ifndef __ASSEMBLY__
5550+#include <linux/types.h>
5551+extern void check_object_size(const void *ptr, unsigned long n, bool to);
5552+#endif
5553+#endif
5554+
5555 #if defined(__sparc__) && defined(__arch64__)
5556 #include <asm/uaccess_64.h>
5557 #else
5558diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5559index 8303ac4..07f333d 100644
5560--- a/arch/sparc/include/asm/uaccess_32.h
5561+++ b/arch/sparc/include/asm/uaccess_32.h
5562@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
58c5fc13
MT
5563
5564 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5565 {
5566- if (n && __access_ok((unsigned long) to, n))
5567+ if ((long)n < 0)
5568+ return n;
5569+
5570+ if (n && __access_ok((unsigned long) to, n)) {
5571+ if (!__builtin_constant_p(n))
5572+ check_object_size(from, n, true);
5573 return __copy_user(to, (__force void __user *) from, n);
5574- else
5575+ } else
5576 return n;
5577 }
5578
5579 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5580 {
5581+ if ((long)n < 0)
5582+ return n;
5583+
5584+ if (!__builtin_constant_p(n))
5585+ check_object_size(from, n, true);
5586+
5587 return __copy_user(to, (__force void __user *) from, n);
5588 }
5589
6892158b 5590 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
58c5fc13 5591 {
6892158b 5592- if (n && __access_ok((unsigned long) from, n))
58c5fc13
MT
5593+ if ((long)n < 0)
5594+ return n;
5595+
5596+ if (n && __access_ok((unsigned long) from, n)) {
5597+ if (!__builtin_constant_p(n))
5598+ check_object_size(to, n, false);
5599 return __copy_user((__force void __user *) to, from, n);
5600- else
5601+ } else
5602 return n;
5603 }
5604
5605 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5606 {
5607+ if ((long)n < 0)
5608+ return n;
58c5fc13
MT
5609+
5610 return __copy_user((__force void __user *) to, from, n);
5611 }
5612
fe2de317
MT
5613diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5614index 3e1449f..5293a0e 100644
5615--- a/arch/sparc/include/asm/uaccess_64.h
5616+++ b/arch/sparc/include/asm/uaccess_64.h
ae4e228f
MT
5617@@ -10,6 +10,7 @@
5618 #include <linux/compiler.h>
5619 #include <linux/string.h>
5620 #include <linux/thread_info.h>
5621+#include <linux/kernel.h>
5622 #include <asm/asi.h>
5623 #include <asm/system.h>
5624 #include <asm/spitfire.h>
fe2de317 5625@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
6892158b
MT
5626 static inline unsigned long __must_check
5627 copy_from_user(void *to, const void __user *from, unsigned long size)
5628 {
5629- unsigned long ret = ___copy_from_user(to, from, size);
5630+ unsigned long ret;
ae4e228f
MT
5631
5632+ if ((long)size < 0 || size > INT_MAX)
58c5fc13
MT
5633+ return size;
5634+
5635+ if (!__builtin_constant_p(size))
5636+ check_object_size(to, size, false);
5637+
6892158b
MT
5638+ ret = ___copy_from_user(to, from, size);
5639 if (unlikely(ret))
5640 ret = copy_from_user_fixup(to, from, size);
5641
fe2de317 5642@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
58c5fc13
MT
5643 static inline unsigned long __must_check
5644 copy_to_user(void __user *to, const void *from, unsigned long size)
5645 {
5646- unsigned long ret = ___copy_to_user(to, from, size);
5647+ unsigned long ret;
fe2de317 5648
ae4e228f 5649+ if ((long)size < 0 || size > INT_MAX)
58c5fc13
MT
5650+ return size;
5651+
5652+ if (!__builtin_constant_p(size))
5653+ check_object_size(from, size, true);
fe2de317 5654+
ae4e228f 5655+ ret = ___copy_to_user(to, from, size);
58c5fc13
MT
5656 if (unlikely(ret))
5657 ret = copy_to_user_fixup(to, from, size);
ae4e228f 5658 return ret;
fe2de317
MT
5659diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5660index cb85458..e063f17 100644
5661--- a/arch/sparc/kernel/Makefile
5662+++ b/arch/sparc/kernel/Makefile
57199397
MT
5663@@ -3,7 +3,7 @@
5664 #
5665
5666 asflags-y := -ansi
5667-ccflags-y := -Werror
5668+#ccflags-y := -Werror
5669
5670 extra-y := head_$(BITS).o
5671 extra-y += init_task.o
fe2de317
MT
5672diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5673index f793742..4d880af 100644
5674--- a/arch/sparc/kernel/process_32.c
5675+++ b/arch/sparc/kernel/process_32.c
15a11c5b 5676@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
bc901d79
MT
5677 rw->ins[4], rw->ins[5],
5678 rw->ins[6],
5679 rw->ins[7]);
5680- printk("%pS\n", (void *) rw->ins[7]);
5681+ printk("%pA\n", (void *) rw->ins[7]);
5682 rw = (struct reg_window32 *) rw->ins[6];
5683 }
5684 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
15a11c5b 5685@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
bc901d79
MT
5686
5687 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5688 r->psr, r->pc, r->npc, r->y, print_tainted());
5689- printk("PC: <%pS>\n", (void *) r->pc);
5690+ printk("PC: <%pA>\n", (void *) r->pc);
5691 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5692 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5693 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5694 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5695 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5696 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5697- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5698+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5699
5700 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5701 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
fe2de317 5702@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
bc901d79
MT
5703 rw = (struct reg_window32 *) fp;
5704 pc = rw->ins[7];
5705 printk("[%08lx : ", pc);
5706- printk("%pS ] ", (void *) pc);
5707+ printk("%pA ] ", (void *) pc);
5708 fp = rw->ins[6];
5709 } while (++count < 16);
5710 printk("\n");
fe2de317 5711diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5e856224 5712index 39d8b05..d1a7d90 100644
fe2de317
MT
5713--- a/arch/sparc/kernel/process_64.c
5714+++ b/arch/sparc/kernel/process_64.c
5e856224 5715@@ -182,14 +182,14 @@ static void show_regwindow(struct pt_regs *regs)
bc901d79
MT
5716 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5717 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5718 if (regs->tstate & TSTATE_PRIV)
5719- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5720+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5721 }
5722
5723 void show_regs(struct pt_regs *regs)
5724 {
5725 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5726 regs->tpc, regs->tnpc, regs->y, print_tainted());
5727- printk("TPC: <%pS>\n", (void *) regs->tpc);
5728+ printk("TPC: <%pA>\n", (void *) regs->tpc);
5729 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5730 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5731 regs->u_regs[3]);
5e856224 5732@@ -202,7 +202,7 @@ void show_regs(struct pt_regs *regs)
bc901d79
MT
5733 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5734 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5735 regs->u_regs[15]);
5736- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5737+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5738 show_regwindow(regs);
5739 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5740 }
5e856224 5741@@ -287,7 +287,7 @@ void arch_trigger_all_cpu_backtrace(void)
bc901d79
MT
5742 ((tp && tp->task) ? tp->task->pid : -1));
5743
5744 if (gp->tstate & TSTATE_PRIV) {
5745- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5746+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5747 (void *) gp->tpc,
5748 (void *) gp->o7,
5749 (void *) gp->i7,
5e856224
MT
5750diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
5751index 9388844..0075fd2 100644
5752--- a/arch/sparc/kernel/ptrace_64.c
5753+++ b/arch/sparc/kernel/ptrace_64.c
5754@@ -1058,6 +1058,10 @@ long arch_ptrace(struct task_struct *child, long request,
5755 return ret;
5756 }
5757
5758+#ifdef CONFIG_GRKERNSEC_SETXID
5759+extern void gr_delayed_cred_worker(void);
5760+#endif
5761+
5762 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5763 {
5764 int ret = 0;
5765@@ -1065,6 +1069,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5766 /* do the secure computing check first */
5767 secure_computing(regs->u_regs[UREG_G1]);
5768
5769+#ifdef CONFIG_GRKERNSEC_SETXID
5770+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5771+ gr_delayed_cred_worker();
5772+#endif
5773+
5774 if (test_thread_flag(TIF_SYSCALL_TRACE))
5775 ret = tracehook_report_syscall_entry(regs);
5776
5777@@ -1085,6 +1094,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5778
5779 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
5780 {
5781+#ifdef CONFIG_GRKERNSEC_SETXID
5782+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5783+ gr_delayed_cred_worker();
5784+#endif
5785+
5786 audit_syscall_exit(regs);
5787
5788 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
fe2de317
MT
5789diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5790index 42b282f..28ce9f2 100644
5791--- a/arch/sparc/kernel/sys_sparc_32.c
5792+++ b/arch/sparc/kernel/sys_sparc_32.c
5793@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
58c5fc13
MT
5794 if (ARCH_SUN4C && len > 0x20000000)
5795 return -ENOMEM;
5796 if (!addr)
5797- addr = TASK_UNMAPPED_BASE;
5798+ addr = current->mm->mmap_base;
5799
5800 if (flags & MAP_SHARED)
5801 addr = COLOUR_ALIGN(addr);
fe2de317 5802@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
57199397
MT
5803 }
5804 if (TASK_SIZE - PAGE_SIZE - len < addr)
5805 return -ENOMEM;
5806- if (!vmm || addr + len <= vmm->vm_start)
5807+ if (check_heap_stack_gap(vmm, addr, len))
5808 return addr;
5809 addr = vmm->vm_end;
5810 if (flags & MAP_SHARED)
fe2de317 5811diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5e856224 5812index 232df99..cee1f9c 100644
fe2de317
MT
5813--- a/arch/sparc/kernel/sys_sparc_64.c
5814+++ b/arch/sparc/kernel/sys_sparc_64.c
5815@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
58c5fc13
MT
5816 /* We do not accept a shared mapping if it would violate
5817 * cache aliasing constraints.
5818 */
5819- if ((flags & MAP_SHARED) &&
5820+ if ((filp || (flags & MAP_SHARED)) &&
5821 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5822 return -EINVAL;
5823 return addr;
fe2de317 5824@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
58c5fc13
MT
5825 if (filp || (flags & MAP_SHARED))
5826 do_color_align = 1;
5827
5828+#ifdef CONFIG_PAX_RANDMMAP
5829+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5830+#endif
5831+
5832 if (addr) {
5833 if (do_color_align)
5834 addr = COLOUR_ALIGN(addr, pgoff);
fe2de317 5835@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
57199397
MT
5836 addr = PAGE_ALIGN(addr);
5837
5838 vma = find_vma(mm, addr);
5839- if (task_size - len >= addr &&
5840- (!vma || addr + len <= vma->vm_start))
5841+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5842 return addr;
58c5fc13
MT
5843 }
5844
5845 if (len > mm->cached_hole_size) {
5846- start_addr = addr = mm->free_area_cache;
5847+ start_addr = addr = mm->free_area_cache;
5848 } else {
5849- start_addr = addr = TASK_UNMAPPED_BASE;
5850+ start_addr = addr = mm->mmap_base;
5851 mm->cached_hole_size = 0;
5852 }
5853
57199397 5854@@ -174,14 +177,14 @@ full_search:
58c5fc13
MT
5855 vma = find_vma(mm, VA_EXCLUDE_END);
5856 }
5857 if (unlikely(task_size < addr)) {
5858- if (start_addr != TASK_UNMAPPED_BASE) {
5859- start_addr = addr = TASK_UNMAPPED_BASE;
5860+ if (start_addr != mm->mmap_base) {
5861+ start_addr = addr = mm->mmap_base;
5862 mm->cached_hole_size = 0;
5863 goto full_search;
5864 }
57199397
MT
5865 return -ENOMEM;
5866 }
5867- if (likely(!vma || addr + len <= vma->vm_start)) {
5868+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5869 /*
5870 * Remember the place where we stopped the search:
5871 */
fe2de317 5872@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
58c5fc13
MT
5873 /* We do not accept a shared mapping if it would violate
5874 * cache aliasing constraints.
5875 */
5876- if ((flags & MAP_SHARED) &&
5877+ if ((filp || (flags & MAP_SHARED)) &&
5878 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5879 return -EINVAL;
5880 return addr;
fe2de317 5881@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
5882 addr = PAGE_ALIGN(addr);
5883
5884 vma = find_vma(mm, addr);
5885- if (task_size - len >= addr &&
5886- (!vma || addr + len <= vma->vm_start))
5887+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5888 return addr;
5889 }
5890
fe2de317 5891@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
5892 /* make sure it can fit in the remaining address space */
5893 if (likely(addr > len)) {
5894 vma = find_vma(mm, addr-len);
5895- if (!vma || addr <= vma->vm_start) {
5896+ if (check_heap_stack_gap(vma, addr - len, len)) {
5897 /* remember the address as a hint for next time */
5898 return (mm->free_area_cache = addr-len);
5899 }
fe2de317 5900@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
5901 if (unlikely(mm->mmap_base < len))
5902 goto bottomup;
5903
5904- addr = mm->mmap_base-len;
5905- if (do_color_align)
5906- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5907+ addr = mm->mmap_base - len;
5908
5909 do {
5910+ if (do_color_align)
5911+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5912 /*
5913 * Lookup failure means no vma is above this address,
5914 * else if new region fits below vma->vm_start,
57199397
MT
5915 * return with success:
5916 */
5917 vma = find_vma(mm, addr);
5918- if (likely(!vma || addr+len <= vma->vm_start)) {
5919+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5920 /* remember the address as a hint for next time */
5921 return (mm->free_area_cache = addr);
5922 }
fe2de317 5923@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
5924 mm->cached_hole_size = vma->vm_start - addr;
5925
5926 /* try just below the current vma->vm_start */
5927- addr = vma->vm_start-len;
5928- if (do_color_align)
5929- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5930- } while (likely(len < vma->vm_start));
5931+ addr = skip_heap_stack_gap(vma, len);
5932+ } while (!IS_ERR_VALUE(addr));
5933
5934 bottomup:
5935 /*
fe2de317 5936@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
ae4e228f 5937 gap == RLIM_INFINITY ||
58c5fc13
MT
5938 sysctl_legacy_va_layout) {
5939 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5940+
5941+#ifdef CONFIG_PAX_RANDMMAP
5942+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5943+ mm->mmap_base += mm->delta_mmap;
5944+#endif
5945+
5946 mm->get_unmapped_area = arch_get_unmapped_area;
5947 mm->unmap_area = arch_unmap_area;
5948 } else {
fe2de317 5949@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
58c5fc13
MT
5950 gap = (task_size / 6 * 5);
5951
5952 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5953+
5954+#ifdef CONFIG_PAX_RANDMMAP
5955+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5956+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5957+#endif
5958+
5959 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5960 mm->unmap_area = arch_unmap_area_topdown;
5961 }
5e856224
MT
5962diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
5963index 1d7e274..b39c527 100644
5964--- a/arch/sparc/kernel/syscalls.S
5965+++ b/arch/sparc/kernel/syscalls.S
5966@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
5967 #endif
5968 .align 32
5969 1: ldx [%g6 + TI_FLAGS], %l5
5970- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
5971+ andcc %l5, _TIF_WORK_SYSCALL, %g0
5972 be,pt %icc, rtrap
5973 nop
5974 call syscall_trace_leave
5975@@ -179,7 +179,7 @@ linux_sparc_syscall32:
5976
5977 srl %i5, 0, %o5 ! IEU1
5978 srl %i2, 0, %o2 ! IEU0 Group
5979- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
5980+ andcc %l0, _TIF_WORK_SYSCALL, %g0
5981 bne,pn %icc, linux_syscall_trace32 ! CTI
5982 mov %i0, %l5 ! IEU1
5983 call %l7 ! CTI Group brk forced
5984@@ -202,7 +202,7 @@ linux_sparc_syscall:
5985
5986 mov %i3, %o3 ! IEU1
5987 mov %i4, %o4 ! IEU0 Group
5988- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
5989+ andcc %l0, _TIF_WORK_SYSCALL, %g0
5990 bne,pn %icc, linux_syscall_trace ! CTI Group
5991 mov %i0, %l5 ! IEU0
5992 2: call %l7 ! CTI Group brk forced
5993@@ -226,7 +226,7 @@ ret_sys_call:
5994
5995 cmp %o0, -ERESTART_RESTARTBLOCK
5996 bgeu,pn %xcc, 1f
5997- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
5998+ andcc %l0, _TIF_WORK_SYSCALL, %l6
5999 80:
6000 /* System call success, clear Carry condition code. */
6001 andn %g3, %g2, %g3
6002@@ -241,7 +241,7 @@ ret_sys_call:
6003 /* System call failure, set Carry condition code.
6004 * Also, get abs(errno) to return to the process.
6005 */
6006- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6007+ andcc %l0, _TIF_WORK_SYSCALL, %l6
6008 sub %g0, %o0, %o0
6009 or %g3, %g2, %g3
6010 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
fe2de317 6011diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
4c928ab7 6012index 591f20c..0f1b925 100644
fe2de317
MT
6013--- a/arch/sparc/kernel/traps_32.c
6014+++ b/arch/sparc/kernel/traps_32.c
4c928ab7 6015@@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
15a11c5b
MT
6016 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
6017 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
6018
6019+extern void gr_handle_kernel_exploit(void);
6020+
6021 void die_if_kernel(char *str, struct pt_regs *regs)
6022 {
6023 static int die_counter;
4c928ab7 6024@@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
bc901d79
MT
6025 count++ < 30 &&
6026 (((unsigned long) rw) >= PAGE_OFFSET) &&
6027 !(((unsigned long) rw) & 0x7)) {
6028- printk("Caller[%08lx]: %pS\n", rw->ins[7],
6029+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
6030 (void *) rw->ins[7]);
6031 rw = (struct reg_window32 *)rw->ins[6];
6032 }
15a11c5b
MT
6033 }
6034 printk("Instruction DUMP:");
6035 instruction_dump ((unsigned long *) regs->pc);
6036- if(regs->psr & PSR_PS)
6037+ if(regs->psr & PSR_PS) {
6038+ gr_handle_kernel_exploit();
6039 do_exit(SIGKILL);
6040+ }
6041 do_exit(SIGSEGV);
6042 }
6043
fe2de317
MT
6044diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
6045index 0cbdaa4..438e4c9 100644
6046--- a/arch/sparc/kernel/traps_64.c
6047+++ b/arch/sparc/kernel/traps_64.c
6048@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
bc901d79
MT
6049 i + 1,
6050 p->trapstack[i].tstate, p->trapstack[i].tpc,
6051 p->trapstack[i].tnpc, p->trapstack[i].tt);
6052- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
6053+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
6054 }
6055 }
6056
fe2de317 6057@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
58c5fc13
MT
6058
6059 lvl -= 0x100;
6060 if (regs->tstate & TSTATE_PRIV) {
6061+
6062+#ifdef CONFIG_PAX_REFCOUNT
6063+ if (lvl == 6)
6064+ pax_report_refcount_overflow(regs);
6065+#endif
6066+
6067 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
6068 die_if_kernel(buffer, regs);
6069 }
fe2de317 6070@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
58c5fc13
MT
6071 void bad_trap_tl1(struct pt_regs *regs, long lvl)
6072 {
6073 char buffer[32];
6074-
6075+
6076 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
6077 0, lvl, SIGTRAP) == NOTIFY_STOP)
6078 return;
6079
6080+#ifdef CONFIG_PAX_REFCOUNT
6081+ if (lvl == 6)
6082+ pax_report_refcount_overflow(regs);
6083+#endif
6084+
6085 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6086
6087 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
fe2de317 6088@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
bc901d79
MT
6089 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
6090 printk("%s" "ERROR(%d): ",
6091 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
6092- printk("TPC<%pS>\n", (void *) regs->tpc);
6093+ printk("TPC<%pA>\n", (void *) regs->tpc);
6094 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
6095 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
6096 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
fe2de317 6097@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
bc901d79
MT
6098 smp_processor_id(),
6099 (type & 0x1) ? 'I' : 'D',
6100 regs->tpc);
6101- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
6102+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
6103 panic("Irrecoverable Cheetah+ parity error.");
6104 }
6105
fe2de317 6106@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
bc901d79
MT
6107 smp_processor_id(),
6108 (type & 0x1) ? 'I' : 'D',
6109 regs->tpc);
6110- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
6111+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
6112 }
6113
6114 struct sun4v_error_entry {
fe2de317 6115@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
bc901d79
MT
6116
6117 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
6118 regs->tpc, tl);
6119- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6120+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
6121 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6122- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
6123+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
6124 (void *) regs->u_regs[UREG_I7]);
6125 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
6126 "pte[%lx] error[%lx]\n",
fe2de317 6127@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
bc901d79
MT
6128
6129 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
6130 regs->tpc, tl);
6131- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6132+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
6133 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6134- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
6135+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
6136 (void *) regs->u_regs[UREG_I7]);
6137 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
6138 "pte[%lx] error[%lx]\n",
fe2de317 6139@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
bc901d79
MT
6140 fp = (unsigned long)sf->fp + STACK_BIAS;
6141 }
6142
6143- printk(" [%016lx] %pS\n", pc, (void *) pc);
6144+ printk(" [%016lx] %pA\n", pc, (void *) pc);
6145 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6146 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
6147 int index = tsk->curr_ret_stack;
6148 if (tsk->ret_stack && index >= graph) {
6149 pc = tsk->ret_stack[index - graph].ret;
6150- printk(" [%016lx] %pS\n", pc, (void *) pc);
6151+ printk(" [%016lx] %pA\n", pc, (void *) pc);
6152 graph++;
6153 }
6154 }
fe2de317 6155@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
15a11c5b
MT
6156 return (struct reg_window *) (fp + STACK_BIAS);
6157 }
6158
6159+extern void gr_handle_kernel_exploit(void);
6160+
6161 void die_if_kernel(char *str, struct pt_regs *regs)
6162 {
6163 static int die_counter;
fe2de317 6164@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
bc901d79
MT
6165 while (rw &&
6166 count++ < 30 &&
6167 kstack_valid(tp, (unsigned long) rw)) {
6168- printk("Caller[%016lx]: %pS\n", rw->ins[7],
6169+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
6170 (void *) rw->ins[7]);
6171
6172 rw = kernel_stack_up(rw);
fe2de317 6173@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
15a11c5b
MT
6174 }
6175 user_instruction_dump ((unsigned int __user *) regs->tpc);
6176 }
6177- if (regs->tstate & TSTATE_PRIV)
6178+ if (regs->tstate & TSTATE_PRIV) {
6179+ gr_handle_kernel_exploit();
6180 do_exit(SIGKILL);
6181+ }
6182 do_exit(SIGSEGV);
6183 }
6184 EXPORT_SYMBOL(die_if_kernel);
fe2de317
MT
6185diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
6186index 76e4ac1..78f8bb1 100644
6187--- a/arch/sparc/kernel/unaligned_64.c
6188+++ b/arch/sparc/kernel/unaligned_64.c
6189@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
bc901d79
MT
6190 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
6191
6192 if (__ratelimit(&ratelimit)) {
6193- printk("Kernel unaligned access at TPC[%lx] %pS\n",
6194+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
6195 regs->tpc, (void *) regs->tpc);
6196 }
6197 }
fe2de317
MT
6198diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6199index a3fc437..fea9957 100644
6200--- a/arch/sparc/lib/Makefile
6201+++ b/arch/sparc/lib/Makefile
6202@@ -2,7 +2,7 @@
6203 #
6204
6205 asflags-y := -ansi -DST_DIV0=0x02
6206-ccflags-y := -Werror
6207+#ccflags-y := -Werror
6208
6209 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
6210 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6211diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6212index 59186e0..f747d7a 100644
6213--- a/arch/sparc/lib/atomic_64.S
6214+++ b/arch/sparc/lib/atomic_64.S
58c5fc13
MT
6215@@ -18,7 +18,12 @@
6216 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6217 BACKOFF_SETUP(%o2)
6218 1: lduw [%o1], %g1
6219- add %g1, %o0, %g7
6220+ addcc %g1, %o0, %g7
6221+
6222+#ifdef CONFIG_PAX_REFCOUNT
6223+ tvs %icc, 6
6224+#endif
6225+
6226 cas [%o1], %g1, %g7
6227 cmp %g1, %g7
6892158b 6228 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
fe2de317 6229@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
58c5fc13
MT
6230 2: BACKOFF_SPIN(%o2, %o3, 1b)
6231 .size atomic_add, .-atomic_add
6232
6233+ .globl atomic_add_unchecked
6234+ .type atomic_add_unchecked,#function
6235+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6236+ BACKOFF_SETUP(%o2)
6237+1: lduw [%o1], %g1
6238+ add %g1, %o0, %g7
6239+ cas [%o1], %g1, %g7
6240+ cmp %g1, %g7
6241+ bne,pn %icc, 2f
6242+ nop
6243+ retl
6244+ nop
6245+2: BACKOFF_SPIN(%o2, %o3, 1b)
6246+ .size atomic_add_unchecked, .-atomic_add_unchecked
6247+
6248 .globl atomic_sub
6249 .type atomic_sub,#function
6250 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6251 BACKOFF_SETUP(%o2)
6252 1: lduw [%o1], %g1
6253- sub %g1, %o0, %g7
6254+ subcc %g1, %o0, %g7
6255+
6256+#ifdef CONFIG_PAX_REFCOUNT
6257+ tvs %icc, 6
6258+#endif
6259+
6260 cas [%o1], %g1, %g7
6261 cmp %g1, %g7
6892158b 6262 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
fe2de317 6263@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
58c5fc13
MT
6264 2: BACKOFF_SPIN(%o2, %o3, 1b)
6265 .size atomic_sub, .-atomic_sub
6266
6267+ .globl atomic_sub_unchecked
6268+ .type atomic_sub_unchecked,#function
6269+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6270+ BACKOFF_SETUP(%o2)
6271+1: lduw [%o1], %g1
6272+ sub %g1, %o0, %g7
6273+ cas [%o1], %g1, %g7
6274+ cmp %g1, %g7
6275+ bne,pn %icc, 2f
6276+ nop
6277+ retl
6278+ nop
6279+2: BACKOFF_SPIN(%o2, %o3, 1b)
6280+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
6281+
6282 .globl atomic_add_ret
6283 .type atomic_add_ret,#function
6284 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6285 BACKOFF_SETUP(%o2)
6286 1: lduw [%o1], %g1
6287- add %g1, %o0, %g7
6288+ addcc %g1, %o0, %g7
6289+
6290+#ifdef CONFIG_PAX_REFCOUNT
6291+ tvs %icc, 6
6292+#endif
6293+
6294 cas [%o1], %g1, %g7
6295 cmp %g1, %g7
6892158b 6296 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
fe2de317 6297@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
57199397
MT
6298 2: BACKOFF_SPIN(%o2, %o3, 1b)
6299 .size atomic_add_ret, .-atomic_add_ret
6300
6301+ .globl atomic_add_ret_unchecked
6302+ .type atomic_add_ret_unchecked,#function
6303+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6304+ BACKOFF_SETUP(%o2)
6305+1: lduw [%o1], %g1
6306+ addcc %g1, %o0, %g7
6307+ cas [%o1], %g1, %g7
6308+ cmp %g1, %g7
6309+ bne,pn %icc, 2f
6310+ add %g7, %o0, %g7
6311+ sra %g7, 0, %o0
6312+ retl
6313+ nop
6314+2: BACKOFF_SPIN(%o2, %o3, 1b)
6315+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
6316+
6317 .globl atomic_sub_ret
6318 .type atomic_sub_ret,#function
58c5fc13
MT
6319 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6320 BACKOFF_SETUP(%o2)
6321 1: lduw [%o1], %g1
6322- sub %g1, %o0, %g7
6323+ subcc %g1, %o0, %g7
6324+
6325+#ifdef CONFIG_PAX_REFCOUNT
6326+ tvs %icc, 6
6327+#endif
6328+
6329 cas [%o1], %g1, %g7
6330 cmp %g1, %g7
6892158b 6331 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
fe2de317 6332@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
58c5fc13
MT
6333 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6334 BACKOFF_SETUP(%o2)
6335 1: ldx [%o1], %g1
6336- add %g1, %o0, %g7
6337+ addcc %g1, %o0, %g7
6338+
6339+#ifdef CONFIG_PAX_REFCOUNT
6340+ tvs %xcc, 6
6341+#endif
6342+
6343 casx [%o1], %g1, %g7
6344 cmp %g1, %g7
6892158b 6345 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
fe2de317 6346@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
ae4e228f
MT
6347 2: BACKOFF_SPIN(%o2, %o3, 1b)
6348 .size atomic64_add, .-atomic64_add
6349
6350+ .globl atomic64_add_unchecked
6351+ .type atomic64_add_unchecked,#function
6352+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6353+ BACKOFF_SETUP(%o2)
6354+1: ldx [%o1], %g1
6355+ addcc %g1, %o0, %g7
6356+ casx [%o1], %g1, %g7
6357+ cmp %g1, %g7
6358+ bne,pn %xcc, 2f
6359+ nop
6360+ retl
6361+ nop
6362+2: BACKOFF_SPIN(%o2, %o3, 1b)
6363+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
6364+
6365 .globl atomic64_sub
6366 .type atomic64_sub,#function
58c5fc13
MT
6367 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6368 BACKOFF_SETUP(%o2)
6369 1: ldx [%o1], %g1
6370- sub %g1, %o0, %g7
6371+ subcc %g1, %o0, %g7
6372+
6373+#ifdef CONFIG_PAX_REFCOUNT
6374+ tvs %xcc, 6
6375+#endif
6376+
6377 casx [%o1], %g1, %g7
6378 cmp %g1, %g7
6892158b 6379 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
fe2de317 6380@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
df50ba0c
MT
6381 2: BACKOFF_SPIN(%o2, %o3, 1b)
6382 .size atomic64_sub, .-atomic64_sub
6383
6384+ .globl atomic64_sub_unchecked
6385+ .type atomic64_sub_unchecked,#function
6386+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6387+ BACKOFF_SETUP(%o2)
6388+1: ldx [%o1], %g1
6389+ subcc %g1, %o0, %g7
6390+ casx [%o1], %g1, %g7
6391+ cmp %g1, %g7
6392+ bne,pn %xcc, 2f
6393+ nop
6394+ retl
6395+ nop
6396+2: BACKOFF_SPIN(%o2, %o3, 1b)
6397+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
6398+
6399 .globl atomic64_add_ret
6400 .type atomic64_add_ret,#function
58c5fc13
MT
6401 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6402 BACKOFF_SETUP(%o2)
6403 1: ldx [%o1], %g1
6404- add %g1, %o0, %g7
6405+ addcc %g1, %o0, %g7
6406+
6407+#ifdef CONFIG_PAX_REFCOUNT
6408+ tvs %xcc, 6
6409+#endif
6410+
6411 casx [%o1], %g1, %g7
6412 cmp %g1, %g7
6892158b 6413 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
fe2de317 6414@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
ae4e228f
MT
6415 2: BACKOFF_SPIN(%o2, %o3, 1b)
6416 .size atomic64_add_ret, .-atomic64_add_ret
6417
6418+ .globl atomic64_add_ret_unchecked
6419+ .type atomic64_add_ret_unchecked,#function
6420+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6421+ BACKOFF_SETUP(%o2)
6422+1: ldx [%o1], %g1
6423+ addcc %g1, %o0, %g7
6424+ casx [%o1], %g1, %g7
6425+ cmp %g1, %g7
6426+ bne,pn %xcc, 2f
6427+ add %g7, %o0, %g7
6428+ mov %g7, %o0
6429+ retl
6430+ nop
6431+2: BACKOFF_SPIN(%o2, %o3, 1b)
6432+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
6433+
6434 .globl atomic64_sub_ret
6435 .type atomic64_sub_ret,#function
58c5fc13
MT
6436 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6437 BACKOFF_SETUP(%o2)
6438 1: ldx [%o1], %g1
6439- sub %g1, %o0, %g7
6440+ subcc %g1, %o0, %g7
6441+
6442+#ifdef CONFIG_PAX_REFCOUNT
6443+ tvs %xcc, 6
6444+#endif
6445+
6446 casx [%o1], %g1, %g7
6447 cmp %g1, %g7
6892158b 6448 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
fe2de317 6449diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5e856224 6450index f73c224..662af10 100644
fe2de317
MT
6451--- a/arch/sparc/lib/ksyms.c
6452+++ b/arch/sparc/lib/ksyms.c
5e856224 6453@@ -136,12 +136,18 @@ EXPORT_SYMBOL(__downgrade_write);
58c5fc13
MT
6454
6455 /* Atomic counter implementation. */
6456 EXPORT_SYMBOL(atomic_add);
6457+EXPORT_SYMBOL(atomic_add_unchecked);
6458 EXPORT_SYMBOL(atomic_add_ret);
15a11c5b 6459+EXPORT_SYMBOL(atomic_add_ret_unchecked);
58c5fc13
MT
6460 EXPORT_SYMBOL(atomic_sub);
6461+EXPORT_SYMBOL(atomic_sub_unchecked);
6462 EXPORT_SYMBOL(atomic_sub_ret);
6463 EXPORT_SYMBOL(atomic64_add);
57199397 6464+EXPORT_SYMBOL(atomic64_add_unchecked);
58c5fc13 6465 EXPORT_SYMBOL(atomic64_add_ret);
57199397 6466+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
df50ba0c
MT
6467 EXPORT_SYMBOL(atomic64_sub);
6468+EXPORT_SYMBOL(atomic64_sub_unchecked);
6469 EXPORT_SYMBOL(atomic64_sub_ret);
6470
6471 /* Atomic bit operations. */
fe2de317 6472diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
4c928ab7 6473index 301421c..e2535d1 100644
fe2de317
MT
6474--- a/arch/sparc/mm/Makefile
6475+++ b/arch/sparc/mm/Makefile
66a7e928
MT
6476@@ -2,7 +2,7 @@
6477 #
6478
fe2de317 6479 asflags-y := -ansi
66a7e928
MT
6480-ccflags-y := -Werror
6481+#ccflags-y := -Werror
6482
fe2de317
MT
6483 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
6484 obj-y += fault_$(BITS).o
6485diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
4c928ab7 6486index 8023fd7..c8e89e9 100644
fe2de317
MT
6487--- a/arch/sparc/mm/fault_32.c
6488+++ b/arch/sparc/mm/fault_32.c
4c928ab7
MT
6489@@ -21,6 +21,9 @@
6490 #include <linux/perf_event.h>
58c5fc13 6491 #include <linux/interrupt.h>
58c5fc13
MT
6492 #include <linux/kdebug.h>
6493+#include <linux/slab.h>
6494+#include <linux/pagemap.h>
6495+#include <linux/compiler.h>
6496
6497 #include <asm/system.h>
6498 #include <asm/page.h>
4c928ab7 6499@@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
58c5fc13
MT
6500 return safe_compute_effective_address(regs, insn);
6501 }
6502
6503+#ifdef CONFIG_PAX_PAGEEXEC
6504+#ifdef CONFIG_PAX_DLRESOLVE
ae4e228f 6505+static void pax_emuplt_close(struct vm_area_struct *vma)
58c5fc13
MT
6506+{
6507+ vma->vm_mm->call_dl_resolve = 0UL;
6508+}
6509+
6510+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6511+{
6512+ unsigned int *kaddr;
6513+
6514+ vmf->page = alloc_page(GFP_HIGHUSER);
6515+ if (!vmf->page)
6516+ return VM_FAULT_OOM;
6517+
6518+ kaddr = kmap(vmf->page);
6519+ memset(kaddr, 0, PAGE_SIZE);
6520+ kaddr[0] = 0x9DE3BFA8U; /* save */
6521+ flush_dcache_page(vmf->page);
6522+ kunmap(vmf->page);
6523+ return VM_FAULT_MAJOR;
6524+}
6525+
6526+static const struct vm_operations_struct pax_vm_ops = {
6527+ .close = pax_emuplt_close,
6528+ .fault = pax_emuplt_fault
6529+};
6530+
6531+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6532+{
6533+ int ret;
6534+
df50ba0c 6535+ INIT_LIST_HEAD(&vma->anon_vma_chain);
58c5fc13
MT
6536+ vma->vm_mm = current->mm;
6537+ vma->vm_start = addr;
6538+ vma->vm_end = addr + PAGE_SIZE;
6539+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6540+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6541+ vma->vm_ops = &pax_vm_ops;
6542+
6543+ ret = insert_vm_struct(current->mm, vma);
6544+ if (ret)
6545+ return ret;
6546+
6547+ ++current->mm->total_vm;
6548+ return 0;
6549+}
6550+#endif
6551+
6552+/*
6553+ * PaX: decide what to do with offenders (regs->pc = fault address)
6554+ *
6555+ * returns 1 when task should be killed
6556+ * 2 when patched PLT trampoline was detected
6557+ * 3 when unpatched PLT trampoline was detected
6558+ */
6559+static int pax_handle_fetch_fault(struct pt_regs *regs)
6560+{
6561+
6562+#ifdef CONFIG_PAX_EMUPLT
6563+ int err;
6564+
6565+ do { /* PaX: patched PLT emulation #1 */
6566+ unsigned int sethi1, sethi2, jmpl;
6567+
6568+ err = get_user(sethi1, (unsigned int *)regs->pc);
6569+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6570+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6571+
6572+ if (err)
6573+ break;
6574+
6575+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6576+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6577+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6578+ {
6579+ unsigned int addr;
6580+
6581+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6582+ addr = regs->u_regs[UREG_G1];
6583+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6584+ regs->pc = addr;
6585+ regs->npc = addr+4;
6586+ return 2;
6587+ }
6588+ } while (0);
6589+
6590+ { /* PaX: patched PLT emulation #2 */
6591+ unsigned int ba;
6592+
6593+ err = get_user(ba, (unsigned int *)regs->pc);
6594+
6595+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6596+ unsigned int addr;
6597+
6598+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6599+ regs->pc = addr;
6600+ regs->npc = addr+4;
6601+ return 2;
6602+ }
6603+ }
6604+
6605+ do { /* PaX: patched PLT emulation #3 */
6606+ unsigned int sethi, jmpl, nop;
6607+
6608+ err = get_user(sethi, (unsigned int *)regs->pc);
6609+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6610+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6611+
6612+ if (err)
6613+ break;
6614+
6615+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6616+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6617+ nop == 0x01000000U)
6618+ {
6619+ unsigned int addr;
6620+
6621+ addr = (sethi & 0x003FFFFFU) << 10;
6622+ regs->u_regs[UREG_G1] = addr;
6623+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6624+ regs->pc = addr;
6625+ regs->npc = addr+4;
6626+ return 2;
6627+ }
6628+ } while (0);
6629+
6630+ do { /* PaX: unpatched PLT emulation step 1 */
6631+ unsigned int sethi, ba, nop;
6632+
6633+ err = get_user(sethi, (unsigned int *)regs->pc);
6634+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
6635+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6636+
6637+ if (err)
6638+ break;
6639+
6640+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6641+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6642+ nop == 0x01000000U)
6643+ {
6644+ unsigned int addr, save, call;
6645+
6646+ if ((ba & 0xFFC00000U) == 0x30800000U)
6647+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6648+ else
6649+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6650+
6651+ err = get_user(save, (unsigned int *)addr);
6652+ err |= get_user(call, (unsigned int *)(addr+4));
6653+ err |= get_user(nop, (unsigned int *)(addr+8));
6654+ if (err)
6655+ break;
6656+
6657+#ifdef CONFIG_PAX_DLRESOLVE
6658+ if (save == 0x9DE3BFA8U &&
6659+ (call & 0xC0000000U) == 0x40000000U &&
6660+ nop == 0x01000000U)
6661+ {
6662+ struct vm_area_struct *vma;
6663+ unsigned long call_dl_resolve;
6664+
6665+ down_read(&current->mm->mmap_sem);
6666+ call_dl_resolve = current->mm->call_dl_resolve;
6667+ up_read(&current->mm->mmap_sem);
6668+ if (likely(call_dl_resolve))
6669+ goto emulate;
6670+
6671+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6672+
6673+ down_write(&current->mm->mmap_sem);
6674+ if (current->mm->call_dl_resolve) {
6675+ call_dl_resolve = current->mm->call_dl_resolve;
6676+ up_write(&current->mm->mmap_sem);
6677+ if (vma)
6678+ kmem_cache_free(vm_area_cachep, vma);
6679+ goto emulate;
6680+ }
6681+
6682+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6683+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6684+ up_write(&current->mm->mmap_sem);
6685+ if (vma)
6686+ kmem_cache_free(vm_area_cachep, vma);
6687+ return 1;
6688+ }
6689+
6690+ if (pax_insert_vma(vma, call_dl_resolve)) {
6691+ up_write(&current->mm->mmap_sem);
6692+ kmem_cache_free(vm_area_cachep, vma);
6693+ return 1;
6694+ }
6695+
6696+ current->mm->call_dl_resolve = call_dl_resolve;
6697+ up_write(&current->mm->mmap_sem);
6698+
6699+emulate:
6700+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6701+ regs->pc = call_dl_resolve;
6702+ regs->npc = addr+4;
6703+ return 3;
6704+ }
6705+#endif
6706+
6707+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6708+ if ((save & 0xFFC00000U) == 0x05000000U &&
6709+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6710+ nop == 0x01000000U)
6711+ {
6712+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6713+ regs->u_regs[UREG_G2] = addr + 4;
6714+ addr = (save & 0x003FFFFFU) << 10;
6715+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6716+ regs->pc = addr;
6717+ regs->npc = addr+4;
6718+ return 3;
6719+ }
6720+ }
6721+ } while (0);
6722+
6723+ do { /* PaX: unpatched PLT emulation step 2 */
6724+ unsigned int save, call, nop;
6725+
6726+ err = get_user(save, (unsigned int *)(regs->pc-4));
6727+ err |= get_user(call, (unsigned int *)regs->pc);
6728+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
6729+ if (err)
6730+ break;
6731+
6732+ if (save == 0x9DE3BFA8U &&
6733+ (call & 0xC0000000U) == 0x40000000U &&
6734+ nop == 0x01000000U)
6735+ {
6736+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6737+
6738+ regs->u_regs[UREG_RETPC] = regs->pc;
6739+ regs->pc = dl_resolve;
6740+ regs->npc = dl_resolve+4;
6741+ return 3;
6742+ }
6743+ } while (0);
6744+#endif
6745+
6746+ return 1;
6747+}
6748+
6e9df6a3 6749+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
6750+{
6751+ unsigned long i;
6752+
6753+ printk(KERN_ERR "PAX: bytes at PC: ");
ae4e228f 6754+ for (i = 0; i < 8; i++) {
58c5fc13
MT
6755+ unsigned int c;
6756+ if (get_user(c, (unsigned int *)pc+i))
6757+ printk(KERN_CONT "???????? ");
6758+ else
6759+ printk(KERN_CONT "%08x ", c);
6760+ }
6761+ printk("\n");
6762+}
6763+#endif
6764+
df50ba0c
MT
6765 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6766 int text_fault)
58c5fc13 6767 {
4c928ab7 6768@@ -280,6 +545,24 @@ good_area:
58c5fc13
MT
6769 if(!(vma->vm_flags & VM_WRITE))
6770 goto bad_area;
6771 } else {
6772+
6773+#ifdef CONFIG_PAX_PAGEEXEC
6774+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6775+ up_read(&mm->mmap_sem);
6776+ switch (pax_handle_fetch_fault(regs)) {
6777+
6778+#ifdef CONFIG_PAX_EMUPLT
6779+ case 2:
6780+ case 3:
6781+ return;
6782+#endif
6783+
6784+ }
6785+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6786+ do_group_exit(SIGKILL);
6787+ }
6788+#endif
6789+
6790 /* Allow reads even for write-only mappings */
6791 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6792 goto bad_area;
fe2de317
MT
6793diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6794index 504c062..6fcb9c6 100644
6795--- a/arch/sparc/mm/fault_64.c
6796+++ b/arch/sparc/mm/fault_64.c
ae4e228f 6797@@ -21,6 +21,9 @@
58c5fc13
MT
6798 #include <linux/kprobes.h>
6799 #include <linux/kdebug.h>
6800 #include <linux/percpu.h>
6801+#include <linux/slab.h>
6802+#include <linux/pagemap.h>
6803+#include <linux/compiler.h>
6804
6805 #include <asm/page.h>
6806 #include <asm/pgtable.h>
fe2de317 6807@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
bc901d79
MT
6808 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6809 regs->tpc);
6810 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6811- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6812+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6813 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6814 dump_stack();
6815 unhandled_fault(regs->tpc, current, regs);
fe2de317 6816@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
58c5fc13
MT
6817 show_regs(regs);
6818 }
6819
6820+#ifdef CONFIG_PAX_PAGEEXEC
6821+#ifdef CONFIG_PAX_DLRESOLVE
6822+static void pax_emuplt_close(struct vm_area_struct *vma)
6823+{
6824+ vma->vm_mm->call_dl_resolve = 0UL;
6825+}
6826+
6827+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6828+{
6829+ unsigned int *kaddr;
6830+
6831+ vmf->page = alloc_page(GFP_HIGHUSER);
6832+ if (!vmf->page)
6833+ return VM_FAULT_OOM;
6834+
6835+ kaddr = kmap(vmf->page);
6836+ memset(kaddr, 0, PAGE_SIZE);
6837+ kaddr[0] = 0x9DE3BFA8U; /* save */
6838+ flush_dcache_page(vmf->page);
6839+ kunmap(vmf->page);
6840+ return VM_FAULT_MAJOR;
6841+}
6842+
6843+static const struct vm_operations_struct pax_vm_ops = {
6844+ .close = pax_emuplt_close,
6845+ .fault = pax_emuplt_fault
6846+};
6847+
6848+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6849+{
6850+ int ret;
6851+
df50ba0c 6852+ INIT_LIST_HEAD(&vma->anon_vma_chain);
58c5fc13
MT
6853+ vma->vm_mm = current->mm;
6854+ vma->vm_start = addr;
6855+ vma->vm_end = addr + PAGE_SIZE;
6856+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6857+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6858+ vma->vm_ops = &pax_vm_ops;
6859+
6860+ ret = insert_vm_struct(current->mm, vma);
6861+ if (ret)
6862+ return ret;
6863+
6864+ ++current->mm->total_vm;
6865+ return 0;
6866+}
6867+#endif
6868+
6869+/*
6870+ * PaX: decide what to do with offenders (regs->tpc = fault address)
6871+ *
6872+ * returns 1 when task should be killed
6873+ * 2 when patched PLT trampoline was detected
6874+ * 3 when unpatched PLT trampoline was detected
6875+ */
6876+static int pax_handle_fetch_fault(struct pt_regs *regs)
6877+{
6878+
6879+#ifdef CONFIG_PAX_EMUPLT
6880+ int err;
6881+
6882+ do { /* PaX: patched PLT emulation #1 */
6883+ unsigned int sethi1, sethi2, jmpl;
6884+
6885+ err = get_user(sethi1, (unsigned int *)regs->tpc);
6886+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6887+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6888+
6889+ if (err)
6890+ break;
6891+
6892+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6893+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6894+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6895+ {
6896+ unsigned long addr;
6897+
6898+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6899+ addr = regs->u_regs[UREG_G1];
6900+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6901+
6902+ if (test_thread_flag(TIF_32BIT))
6903+ addr &= 0xFFFFFFFFUL;
6904+
6905+ regs->tpc = addr;
6906+ regs->tnpc = addr+4;
6907+ return 2;
6908+ }
6909+ } while (0);
6910+
6911+ { /* PaX: patched PLT emulation #2 */
6912+ unsigned int ba;
6913+
6914+ err = get_user(ba, (unsigned int *)regs->tpc);
6915+
6916+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6917+ unsigned long addr;
6918+
6919+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6920+
6921+ if (test_thread_flag(TIF_32BIT))
6922+ addr &= 0xFFFFFFFFUL;
6923+
6924+ regs->tpc = addr;
6925+ regs->tnpc = addr+4;
6926+ return 2;
6927+ }
6928+ }
6929+
6930+ do { /* PaX: patched PLT emulation #3 */
6931+ unsigned int sethi, jmpl, nop;
6932+
6933+ err = get_user(sethi, (unsigned int *)regs->tpc);
6934+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6935+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6936+
6937+ if (err)
6938+ break;
6939+
6940+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6941+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6942+ nop == 0x01000000U)
6943+ {
6944+ unsigned long addr;
6945+
6946+ addr = (sethi & 0x003FFFFFU) << 10;
6947+ regs->u_regs[UREG_G1] = addr;
6948+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6949+
6950+ if (test_thread_flag(TIF_32BIT))
6951+ addr &= 0xFFFFFFFFUL;
6952+
6953+ regs->tpc = addr;
6954+ regs->tnpc = addr+4;
6955+ return 2;
6956+ }
6957+ } while (0);
6958+
6959+ do { /* PaX: patched PLT emulation #4 */
ae4e228f 6960+ unsigned int sethi, mov1, call, mov2;
58c5fc13 6961+
ae4e228f
MT
6962+ err = get_user(sethi, (unsigned int *)regs->tpc);
6963+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6964+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
6965+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
58c5fc13
MT
6966+
6967+ if (err)
6968+ break;
6969+
ae4e228f
MT
6970+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6971+ mov1 == 0x8210000FU &&
58c5fc13
MT
6972+ (call & 0xC0000000U) == 0x40000000U &&
6973+ mov2 == 0x9E100001U)
6974+ {
6975+ unsigned long addr;
6976+
6977+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6978+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6979+
6980+ if (test_thread_flag(TIF_32BIT))
6981+ addr &= 0xFFFFFFFFUL;
6982+
6983+ regs->tpc = addr;
6984+ regs->tnpc = addr+4;
6985+ return 2;
6986+ }
6987+ } while (0);
6988+
6989+ do { /* PaX: patched PLT emulation #5 */
ae4e228f 6990+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
58c5fc13 6991+
ae4e228f
MT
6992+ err = get_user(sethi, (unsigned int *)regs->tpc);
6993+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6994+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6995+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6996+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6997+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6998+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6999+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
58c5fc13
MT
7000+
7001+ if (err)
7002+ break;
7003+
ae4e228f
MT
7004+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7005+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
58c5fc13
MT
7006+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7007+ (or1 & 0xFFFFE000U) == 0x82106000U &&
7008+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
ae4e228f 7009+ sllx == 0x83287020U &&
58c5fc13
MT
7010+ jmpl == 0x81C04005U &&
7011+ nop == 0x01000000U)
7012+ {
7013+ unsigned long addr;
7014+
7015+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7016+ regs->u_regs[UREG_G1] <<= 32;
7017+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7018+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7019+ regs->tpc = addr;
7020+ regs->tnpc = addr+4;
7021+ return 2;
7022+ }
7023+ } while (0);
7024+
7025+ do { /* PaX: patched PLT emulation #6 */
ae4e228f 7026+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
58c5fc13 7027+
ae4e228f
MT
7028+ err = get_user(sethi, (unsigned int *)regs->tpc);
7029+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7030+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7031+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
7032+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
7033+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
7034+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
58c5fc13
MT
7035+
7036+ if (err)
7037+ break;
7038+
ae4e228f
MT
7039+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7040+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
58c5fc13 7041+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
ae4e228f 7042+ sllx == 0x83287020U &&
58c5fc13
MT
7043+ (or & 0xFFFFE000U) == 0x8A116000U &&
7044+ jmpl == 0x81C04005U &&
7045+ nop == 0x01000000U)
7046+ {
7047+ unsigned long addr;
7048+
7049+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
7050+ regs->u_regs[UREG_G1] <<= 32;
7051+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
7052+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7053+ regs->tpc = addr;
7054+ regs->tnpc = addr+4;
7055+ return 2;
7056+ }
7057+ } while (0);
7058+
7059+ do { /* PaX: unpatched PLT emulation step 1 */
7060+ unsigned int sethi, ba, nop;
7061+
7062+ err = get_user(sethi, (unsigned int *)regs->tpc);
7063+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7064+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7065+
7066+ if (err)
7067+ break;
7068+
7069+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7070+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7071+ nop == 0x01000000U)
7072+ {
7073+ unsigned long addr;
7074+ unsigned int save, call;
ae4e228f 7075+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
58c5fc13
MT
7076+
7077+ if ((ba & 0xFFC00000U) == 0x30800000U)
7078+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7079+ else
7080+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7081+
7082+ if (test_thread_flag(TIF_32BIT))
7083+ addr &= 0xFFFFFFFFUL;
7084+
7085+ err = get_user(save, (unsigned int *)addr);
7086+ err |= get_user(call, (unsigned int *)(addr+4));
7087+ err |= get_user(nop, (unsigned int *)(addr+8));
7088+ if (err)
7089+ break;
7090+
7091+#ifdef CONFIG_PAX_DLRESOLVE
7092+ if (save == 0x9DE3BFA8U &&
7093+ (call & 0xC0000000U) == 0x40000000U &&
7094+ nop == 0x01000000U)
7095+ {
7096+ struct vm_area_struct *vma;
7097+ unsigned long call_dl_resolve;
7098+
7099+ down_read(&current->mm->mmap_sem);
7100+ call_dl_resolve = current->mm->call_dl_resolve;
7101+ up_read(&current->mm->mmap_sem);
7102+ if (likely(call_dl_resolve))
7103+ goto emulate;
7104+
7105+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7106+
7107+ down_write(&current->mm->mmap_sem);
7108+ if (current->mm->call_dl_resolve) {
7109+ call_dl_resolve = current->mm->call_dl_resolve;
7110+ up_write(&current->mm->mmap_sem);
7111+ if (vma)
7112+ kmem_cache_free(vm_area_cachep, vma);
7113+ goto emulate;
7114+ }
7115+
7116+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7117+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7118+ up_write(&current->mm->mmap_sem);
7119+ if (vma)
7120+ kmem_cache_free(vm_area_cachep, vma);
7121+ return 1;
7122+ }
7123+
7124+ if (pax_insert_vma(vma, call_dl_resolve)) {
7125+ up_write(&current->mm->mmap_sem);
7126+ kmem_cache_free(vm_area_cachep, vma);
7127+ return 1;
7128+ }
7129+
7130+ current->mm->call_dl_resolve = call_dl_resolve;
7131+ up_write(&current->mm->mmap_sem);
7132+
7133+emulate:
7134+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7135+ regs->tpc = call_dl_resolve;
7136+ regs->tnpc = addr+4;
7137+ return 3;
7138+ }
7139+#endif
7140+
7141+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7142+ if ((save & 0xFFC00000U) == 0x05000000U &&
7143+ (call & 0xFFFFE000U) == 0x85C0A000U &&
7144+ nop == 0x01000000U)
7145+ {
7146+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7147+ regs->u_regs[UREG_G2] = addr + 4;
7148+ addr = (save & 0x003FFFFFU) << 10;
7149+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7150+
7151+ if (test_thread_flag(TIF_32BIT))
7152+ addr &= 0xFFFFFFFFUL;
7153+
7154+ regs->tpc = addr;
7155+ regs->tnpc = addr+4;
7156+ return 3;
7157+ }
ae4e228f
MT
7158+
7159+ /* PaX: 64-bit PLT stub */
7160+ err = get_user(sethi1, (unsigned int *)addr);
7161+ err |= get_user(sethi2, (unsigned int *)(addr+4));
7162+ err |= get_user(or1, (unsigned int *)(addr+8));
7163+ err |= get_user(or2, (unsigned int *)(addr+12));
7164+ err |= get_user(sllx, (unsigned int *)(addr+16));
7165+ err |= get_user(add, (unsigned int *)(addr+20));
7166+ err |= get_user(jmpl, (unsigned int *)(addr+24));
7167+ err |= get_user(nop, (unsigned int *)(addr+28));
7168+ if (err)
7169+ break;
7170+
7171+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7172+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7173+ (or1 & 0xFFFFE000U) == 0x88112000U &&
7174+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
7175+ sllx == 0x89293020U &&
7176+ add == 0x8A010005U &&
7177+ jmpl == 0x89C14000U &&
7178+ nop == 0x01000000U)
7179+ {
7180+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7181+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7182+ regs->u_regs[UREG_G4] <<= 32;
7183+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7184+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7185+ regs->u_regs[UREG_G4] = addr + 24;
7186+ addr = regs->u_regs[UREG_G5];
7187+ regs->tpc = addr;
7188+ regs->tnpc = addr+4;
7189+ return 3;
7190+ }
58c5fc13
MT
7191+ }
7192+ } while (0);
7193+
7194+#ifdef CONFIG_PAX_DLRESOLVE
7195+ do { /* PaX: unpatched PLT emulation step 2 */
7196+ unsigned int save, call, nop;
7197+
7198+ err = get_user(save, (unsigned int *)(regs->tpc-4));
7199+ err |= get_user(call, (unsigned int *)regs->tpc);
7200+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7201+ if (err)
7202+ break;
7203+
7204+ if (save == 0x9DE3BFA8U &&
7205+ (call & 0xC0000000U) == 0x40000000U &&
7206+ nop == 0x01000000U)
7207+ {
7208+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7209+
7210+ if (test_thread_flag(TIF_32BIT))
7211+ dl_resolve &= 0xFFFFFFFFUL;
7212+
7213+ regs->u_regs[UREG_RETPC] = regs->tpc;
7214+ regs->tpc = dl_resolve;
7215+ regs->tnpc = dl_resolve+4;
7216+ return 3;
7217+ }
7218+ } while (0);
7219+#endif
7220+
7221+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7222+ unsigned int sethi, ba, nop;
7223+
7224+ err = get_user(sethi, (unsigned int *)regs->tpc);
7225+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7226+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7227+
7228+ if (err)
7229+ break;
7230+
7231+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7232+ (ba & 0xFFF00000U) == 0x30600000U &&
7233+ nop == 0x01000000U)
7234+ {
7235+ unsigned long addr;
7236+
7237+ addr = (sethi & 0x003FFFFFU) << 10;
7238+ regs->u_regs[UREG_G1] = addr;
7239+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7240+
7241+ if (test_thread_flag(TIF_32BIT))
7242+ addr &= 0xFFFFFFFFUL;
7243+
7244+ regs->tpc = addr;
7245+ regs->tnpc = addr+4;
7246+ return 2;
7247+ }
7248+ } while (0);
7249+
7250+#endif
7251+
7252+ return 1;
7253+}
7254+
6e9df6a3 7255+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
7256+{
7257+ unsigned long i;
7258+
7259+ printk(KERN_ERR "PAX: bytes at PC: ");
ae4e228f 7260+ for (i = 0; i < 8; i++) {
58c5fc13
MT
7261+ unsigned int c;
7262+ if (get_user(c, (unsigned int *)pc+i))
7263+ printk(KERN_CONT "???????? ");
7264+ else
7265+ printk(KERN_CONT "%08x ", c);
7266+ }
7267+ printk("\n");
7268+}
7269+#endif
7270+
7271 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7272 {
7273 struct mm_struct *mm = current->mm;
fe2de317 7274@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
58c5fc13
MT
7275 if (!vma)
7276 goto bad_area;
7277
7278+#ifdef CONFIG_PAX_PAGEEXEC
7279+ /* PaX: detect ITLB misses on non-exec pages */
7280+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
7281+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
7282+ {
7283+ if (address != regs->tpc)
7284+ goto good_area;
7285+
7286+ up_read(&mm->mmap_sem);
7287+ switch (pax_handle_fetch_fault(regs)) {
7288+
7289+#ifdef CONFIG_PAX_EMUPLT
7290+ case 2:
7291+ case 3:
7292+ return;
7293+#endif
7294+
7295+ }
7296+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
7297+ do_group_exit(SIGKILL);
7298+ }
7299+#endif
7300+
7301 /* Pure DTLB misses do not tell us whether the fault causing
7302 * load/store/atomic was a write or not, it only says that there
7303 * was no match. So in such a case we (carefully) read the
fe2de317 7304diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
4c928ab7 7305index 07e1453..0a7d9e9 100644
fe2de317
MT
7306--- a/arch/sparc/mm/hugetlbpage.c
7307+++ b/arch/sparc/mm/hugetlbpage.c
4c928ab7 7308@@ -67,7 +67,7 @@ full_search:
57199397
MT
7309 }
7310 return -ENOMEM;
7311 }
7312- if (likely(!vma || addr + len <= vma->vm_start)) {
7313+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7314 /*
7315 * Remember the place where we stopped the search:
7316 */
4c928ab7 7317@@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
7318 /* make sure it can fit in the remaining address space */
7319 if (likely(addr > len)) {
7320 vma = find_vma(mm, addr-len);
7321- if (!vma || addr <= vma->vm_start) {
7322+ if (check_heap_stack_gap(vma, addr - len, len)) {
7323 /* remember the address as a hint for next time */
7324 return (mm->free_area_cache = addr-len);
7325 }
4c928ab7 7326@@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
7327 if (unlikely(mm->mmap_base < len))
7328 goto bottomup;
7329
7330- addr = (mm->mmap_base-len) & HPAGE_MASK;
7331+ addr = mm->mmap_base - len;
7332
7333 do {
7334+ addr &= HPAGE_MASK;
7335 /*
7336 * Lookup failure means no vma is above this address,
7337 * else if new region fits below vma->vm_start,
57199397
MT
7338 * return with success:
7339 */
7340 vma = find_vma(mm, addr);
7341- if (likely(!vma || addr+len <= vma->vm_start)) {
7342+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7343 /* remember the address as a hint for next time */
7344 return (mm->free_area_cache = addr);
7345 }
4c928ab7 7346@@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
7347 mm->cached_hole_size = vma->vm_start - addr;
7348
7349 /* try just below the current vma->vm_start */
7350- addr = (vma->vm_start-len) & HPAGE_MASK;
7351- } while (likely(len < vma->vm_start));
7352+ addr = skip_heap_stack_gap(vma, len);
7353+ } while (!IS_ERR_VALUE(addr));
7354
7355 bottomup:
7356 /*
4c928ab7 7357@@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
57199397
MT
7358 if (addr) {
7359 addr = ALIGN(addr, HPAGE_SIZE);
7360 vma = find_vma(mm, addr);
7361- if (task_size - len >= addr &&
7362- (!vma || addr + len <= vma->vm_start))
7363+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7364 return addr;
7365 }
7366 if (mm->get_unmapped_area == arch_get_unmapped_area)
fe2de317
MT
7367diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
7368index 7b00de6..78239f4 100644
7369--- a/arch/sparc/mm/init_32.c
7370+++ b/arch/sparc/mm/init_32.c
15a11c5b 7371@@ -316,6 +316,9 @@ extern void device_scan(void);
58c5fc13
MT
7372 pgprot_t PAGE_SHARED __read_mostly;
7373 EXPORT_SYMBOL(PAGE_SHARED);
7374
7375+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
7376+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
7377+
7378 void __init paging_init(void)
7379 {
7380 switch(sparc_cpu_model) {
15a11c5b 7381@@ -344,17 +347,17 @@ void __init paging_init(void)
58c5fc13
MT
7382
7383 /* Initialize the protection map with non-constant, MMU dependent values. */
7384 protection_map[0] = PAGE_NONE;
7385- protection_map[1] = PAGE_READONLY;
7386- protection_map[2] = PAGE_COPY;
7387- protection_map[3] = PAGE_COPY;
7388+ protection_map[1] = PAGE_READONLY_NOEXEC;
7389+ protection_map[2] = PAGE_COPY_NOEXEC;
7390+ protection_map[3] = PAGE_COPY_NOEXEC;
7391 protection_map[4] = PAGE_READONLY;
7392 protection_map[5] = PAGE_READONLY;
7393 protection_map[6] = PAGE_COPY;
7394 protection_map[7] = PAGE_COPY;
7395 protection_map[8] = PAGE_NONE;
7396- protection_map[9] = PAGE_READONLY;
7397- protection_map[10] = PAGE_SHARED;
7398- protection_map[11] = PAGE_SHARED;
7399+ protection_map[9] = PAGE_READONLY_NOEXEC;
7400+ protection_map[10] = PAGE_SHARED_NOEXEC;
7401+ protection_map[11] = PAGE_SHARED_NOEXEC;
7402 protection_map[12] = PAGE_READONLY;
7403 protection_map[13] = PAGE_READONLY;
7404 protection_map[14] = PAGE_SHARED;
fe2de317
MT
7405diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
7406index cbef74e..c38fead 100644
7407--- a/arch/sparc/mm/srmmu.c
7408+++ b/arch/sparc/mm/srmmu.c
bc901d79 7409@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
58c5fc13
MT
7410 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
7411 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
7412 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
7413+
7414+#ifdef CONFIG_PAX_PAGEEXEC
7415+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
7416+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
7417+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
7418+#endif
7419+
7420 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
7421 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
7422
4c928ab7
MT
7423diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
7424index 27fe667..36d474c 100644
7425--- a/arch/tile/include/asm/atomic_64.h
7426+++ b/arch/tile/include/asm/atomic_64.h
7427@@ -142,6 +142,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7428
7429 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7430
7431+#define atomic64_read_unchecked(v) atomic64_read(v)
7432+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7433+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7434+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7435+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7436+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7437+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7438+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7439+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7440+
7441 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
7442 #define smp_mb__before_atomic_dec() smp_mb()
7443 #define smp_mb__after_atomic_dec() smp_mb()
7444diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
7445index 392e533..536b092 100644
7446--- a/arch/tile/include/asm/cache.h
7447+++ b/arch/tile/include/asm/cache.h
7448@@ -15,11 +15,12 @@
7449 #ifndef _ASM_TILE_CACHE_H
7450 #define _ASM_TILE_CACHE_H
7451
7452+#include <linux/const.h>
7453 #include <arch/chip.h>
7454
7455 /* bytes per L1 data cache line */
7456 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
7457-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7458+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7459
7460 /* bytes per L2 cache line */
7461 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
fe2de317 7462diff --git a/arch/um/Makefile b/arch/um/Makefile
5e856224 7463index 28688e6..4c0aa1c 100644
fe2de317
MT
7464--- a/arch/um/Makefile
7465+++ b/arch/um/Makefile
4c928ab7 7466@@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
fe2de317 7467 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
4c928ab7 7468 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
fe2de317
MT
7469
7470+ifdef CONSTIFY_PLUGIN
7471+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7472+endif
7473+
fe2de317 7474 #This will adjust *FLAGS accordingly to the platform.
4c928ab7
MT
7475 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
7476
7477diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7478index 19e1bdd..3665b77 100644
7479--- a/arch/um/include/asm/cache.h
7480+++ b/arch/um/include/asm/cache.h
7481@@ -1,6 +1,7 @@
7482 #ifndef __UM_CACHE_H
7483 #define __UM_CACHE_H
7484
7485+#include <linux/const.h>
7486
7487 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7488 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7489@@ -12,6 +13,6 @@
7490 # define L1_CACHE_SHIFT 5
7491 #endif
7492
7493-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7494+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7495
7496 #endif
fe2de317
MT
7497diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7498index 6c03acd..a5e0215 100644
7499--- a/arch/um/include/asm/kmap_types.h
7500+++ b/arch/um/include/asm/kmap_types.h
58c5fc13
MT
7501@@ -23,6 +23,7 @@ enum km_type {
7502 KM_IRQ1,
7503 KM_SOFTIRQ0,
7504 KM_SOFTIRQ1,
7505+ KM_CLEARPAGE,
7506 KM_TYPE_NR
7507 };
7508
fe2de317 7509diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
4c928ab7 7510index 7cfc3ce..cbd1a58 100644
fe2de317
MT
7511--- a/arch/um/include/asm/page.h
7512+++ b/arch/um/include/asm/page.h
58c5fc13
MT
7513@@ -14,6 +14,9 @@
7514 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7515 #define PAGE_MASK (~(PAGE_SIZE-1))
7516
7517+#define ktla_ktva(addr) (addr)
7518+#define ktva_ktla(addr) (addr)
7519+
7520 #ifndef __ASSEMBLY__
7521
7522 struct page;
5e856224
MT
7523diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
7524index 0032f92..cd151e0 100644
7525--- a/arch/um/include/asm/pgtable-3level.h
7526+++ b/arch/um/include/asm/pgtable-3level.h
7527@@ -58,6 +58,7 @@
7528 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
7529 #define pud_populate(mm, pud, pmd) \
7530 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
7531+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7532
7533 #ifdef CONFIG_64BIT
7534 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
fe2de317 7535diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
5e856224 7536index 69f2490..2634831 100644
fe2de317
MT
7537--- a/arch/um/kernel/process.c
7538+++ b/arch/um/kernel/process.c
5e856224 7539@@ -408,22 +408,6 @@ int singlestepping(void * t)
bc901d79
MT
7540 return 2;
7541 }
7542
7543-/*
7544- * Only x86 and x86_64 have an arch_align_stack().
7545- * All other arches have "#define arch_align_stack(x) (x)"
7546- * in their asm/system.h
7547- * As this is included in UML from asm-um/system-generic.h,
7548- * we can use it to behave as the subarch does.
7549- */
7550-#ifndef arch_align_stack
7551-unsigned long arch_align_stack(unsigned long sp)
7552-{
7553- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7554- sp -= get_random_int() % 8192;
7555- return sp & ~0xf;
7556-}
7557-#endif
7558-
7559 unsigned long get_wchan(struct task_struct *p)
7560 {
7561 unsigned long stack_page, sp, ip;
4c928ab7
MT
7562diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7563index ad8f795..2c7eec6 100644
7564--- a/arch/unicore32/include/asm/cache.h
7565+++ b/arch/unicore32/include/asm/cache.h
7566@@ -12,8 +12,10 @@
7567 #ifndef __UNICORE_CACHE_H__
7568 #define __UNICORE_CACHE_H__
7569
7570-#define L1_CACHE_SHIFT (5)
7571-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7572+#include <linux/const.h>
58c5fc13 7573+
4c928ab7
MT
7574+#define L1_CACHE_SHIFT 5
7575+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6e9df6a3 7576
4c928ab7
MT
7577 /*
7578 * Memory returned by kmalloc() may be used for DMA, so we must make
fe2de317 7579diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
5e856224 7580index 5bed94e..fbcf200 100644
fe2de317
MT
7581--- a/arch/x86/Kconfig
7582+++ b/arch/x86/Kconfig
5e856224 7583@@ -226,7 +226,7 @@ config X86_HT
fe2de317
MT
7584
7585 config X86_32_LAZY_GS
7586 def_bool y
7587- depends on X86_32 && !CC_STACKPROTECTOR
7588+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7589
7590 config ARCH_HWEIGHT_CFLAGS
7591 string
5e856224 7592@@ -1058,7 +1058,7 @@ choice
fe2de317
MT
7593
7594 config NOHIGHMEM
7595 bool "off"
7596- depends on !X86_NUMAQ
7597+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7598 ---help---
7599 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7600 However, the address space of 32-bit x86 processors is only 4
5e856224 7601@@ -1095,7 +1095,7 @@ config NOHIGHMEM
fe2de317
MT
7602
7603 config HIGHMEM4G
7604 bool "4GB"
7605- depends on !X86_NUMAQ
7606+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7607 ---help---
7608 Select this if you have a 32-bit processor and between 1 and 4
7609 gigabytes of physical RAM.
5e856224 7610@@ -1149,7 +1149,7 @@ config PAGE_OFFSET
fe2de317
MT
7611 hex
7612 default 0xB0000000 if VMSPLIT_3G_OPT
7613 default 0x80000000 if VMSPLIT_2G
7614- default 0x78000000 if VMSPLIT_2G_OPT
7615+ default 0x70000000 if VMSPLIT_2G_OPT
7616 default 0x40000000 if VMSPLIT_1G
7617 default 0xC0000000
7618 depends on X86_32
5e856224 7619@@ -1539,6 +1539,7 @@ config SECCOMP
fe2de317
MT
7620
7621 config CC_STACKPROTECTOR
7622 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7623+ depends on X86_64 || !PAX_MEMORY_UDEREF
7624 ---help---
7625 This option turns on the -fstack-protector GCC feature. This
7626 feature puts, at the beginning of functions, a canary value on
5e856224 7627@@ -1596,6 +1597,7 @@ config KEXEC_JUMP
fe2de317
MT
7628 config PHYSICAL_START
7629 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7630 default "0x1000000"
7631+ range 0x400000 0x40000000
7632 ---help---
7633 This gives the physical address where the kernel is loaded.
7634
5e856224 7635@@ -1659,6 +1661,7 @@ config X86_NEED_RELOCS
fe2de317
MT
7636 config PHYSICAL_ALIGN
7637 hex "Alignment value to which kernel should be aligned" if X86_32
7638 default "0x1000000"
7639+ range 0x400000 0x1000000 if PAX_KERNEXEC
7640 range 0x2000 0x1000000
7641 ---help---
7642 This value puts the alignment restrictions on physical address
5e856224 7643@@ -1690,9 +1693,10 @@ config HOTPLUG_CPU
fe2de317
MT
7644 Say N if you want to disable CPU hotplug.
7645
7646 config COMPAT_VDSO
7647- def_bool y
7648+ def_bool n
7649 prompt "Compat VDSO support"
7650 depends on X86_32 || IA32_EMULATION
7651+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7652 ---help---
7653 Map the 32-bit VDSO to the predictable old-style address too.
7654
7655diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
5e856224 7656index 3c57033..22d44aa 100644
fe2de317
MT
7657--- a/arch/x86/Kconfig.cpu
7658+++ b/arch/x86/Kconfig.cpu
5e856224 7659@@ -335,7 +335,7 @@ config X86_PPRO_FENCE
fe2de317
MT
7660
7661 config X86_F00F_BUG
7662 def_bool y
7663- depends on M586MMX || M586TSC || M586 || M486 || M386
7664+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7665
7666 config X86_INVD_BUG
7667 def_bool y
5e856224 7668@@ -359,7 +359,7 @@ config X86_POPAD_OK
fe2de317
MT
7669
7670 config X86_ALIGNMENT_16
7671 def_bool y
7672- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7673+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7674
7675 config X86_INTEL_USERCOPY
7676 def_bool y
5e856224 7677@@ -405,7 +405,7 @@ config X86_CMPXCHG64
fe2de317
MT
7678 # generates cmov.
7679 config X86_CMOV
7680 def_bool y
7681- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7682+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7683
7684 config X86_MINIMUM_CPU_FAMILY
7685 int
7686diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
5e856224 7687index e46c214..7c72b55 100644
fe2de317
MT
7688--- a/arch/x86/Kconfig.debug
7689+++ b/arch/x86/Kconfig.debug
5e856224 7690@@ -84,7 +84,7 @@ config X86_PTDUMP
fe2de317
MT
7691 config DEBUG_RODATA
7692 bool "Write protect kernel read-only data structures"
7693 default y
7694- depends on DEBUG_KERNEL
7695+ depends on DEBUG_KERNEL && BROKEN
7696 ---help---
7697 Mark the kernel read-only data as write-protected in the pagetables,
7698 in order to catch accidental (and incorrect) writes to such const
5e856224 7699@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
fe2de317
MT
7700
7701 config DEBUG_SET_MODULE_RONX
7702 bool "Set loadable kernel module data as NX and text as RO"
7703- depends on MODULES
7704+ depends on MODULES && BROKEN
7705 ---help---
7706 This option helps catch unintended modifications to loadable
7707 kernel module's text and read-only data. It also prevents execution
7708diff --git a/arch/x86/Makefile b/arch/x86/Makefile
5e856224 7709index 209ba12..15140db 100644
fe2de317
MT
7710--- a/arch/x86/Makefile
7711+++ b/arch/x86/Makefile
7712@@ -46,6 +46,7 @@ else
7713 UTS_MACHINE := x86_64
7714 CHECKFLAGS += -D__x86_64__ -m64
7715
7716+ biarch := $(call cc-option,-m64)
7717 KBUILD_AFLAGS += -m64
7718 KBUILD_CFLAGS += -m64
7719
5e856224 7720@@ -201,3 +202,12 @@ define archhelp
fe2de317
MT
7721 echo ' FDARGS="..." arguments for the booted kernel'
7722 echo ' FDINITRD=file initrd for the booted kernel'
7723 endef
7724+
7725+define OLD_LD
7726+
7727+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7728+*** Please upgrade your binutils to 2.18 or newer
7729+endef
7730+
7731+archprepare:
7732+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7733diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
5e856224 7734index 5a747dd..ff7b12c 100644
fe2de317
MT
7735--- a/arch/x86/boot/Makefile
7736+++ b/arch/x86/boot/Makefile
5e856224 7737@@ -64,6 +64,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
fe2de317
MT
7738 $(call cc-option, -fno-stack-protector) \
7739 $(call cc-option, -mpreferred-stack-boundary=2)
7740 KBUILD_CFLAGS += $(call cc-option, -m32)
7741+ifdef CONSTIFY_PLUGIN
7742+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7743+endif
7744 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7745 GCOV_PROFILE := n
7746
7747diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7748index 878e4b9..20537ab 100644
7749--- a/arch/x86/boot/bitops.h
7750+++ b/arch/x86/boot/bitops.h
7751@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
58c5fc13
MT
7752 u8 v;
7753 const u32 *p = (const u32 *)addr;
7754
7755- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7756+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7757 return v;
7758 }
7759
fe2de317 7760@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
58c5fc13
MT
7761
7762 static inline void set_bit(int nr, void *addr)
7763 {
7764- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7765+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7766 }
7767
7768 #endif /* BOOT_BITOPS_H */
fe2de317
MT
7769diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7770index c7093bd..d4247ffe0 100644
7771--- a/arch/x86/boot/boot.h
7772+++ b/arch/x86/boot/boot.h
6892158b 7773@@ -85,7 +85,7 @@ static inline void io_delay(void)
58c5fc13
MT
7774 static inline u16 ds(void)
7775 {
7776 u16 seg;
7777- asm("movw %%ds,%0" : "=rm" (seg));
7778+ asm volatile("movw %%ds,%0" : "=rm" (seg));
7779 return seg;
7780 }
7781
fe2de317 7782@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
58c5fc13
MT
7783 static inline int memcmp(const void *s1, const void *s2, size_t len)
7784 {
7785 u8 diff;
7786- asm("repe; cmpsb; setnz %0"
7787+ asm volatile("repe; cmpsb; setnz %0"
7788 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7789 return diff;
7790 }
fe2de317 7791diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
5e856224 7792index fd55a2f..217b501 100644
fe2de317
MT
7793--- a/arch/x86/boot/compressed/Makefile
7794+++ b/arch/x86/boot/compressed/Makefile
7795@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7796 KBUILD_CFLAGS += $(cflags-y)
7797 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7798 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7799+ifdef CONSTIFY_PLUGIN
7800+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7801+endif
7802
7803 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7804 GCOV_PROFILE := n
7805diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
5e856224 7806index c85e3ac..6f5aa80 100644
fe2de317
MT
7807--- a/arch/x86/boot/compressed/head_32.S
7808+++ b/arch/x86/boot/compressed/head_32.S
5e856224 7809@@ -106,7 +106,7 @@ preferred_addr:
58c5fc13
MT
7810 notl %eax
7811 andl %eax, %ebx
7812 #else
7813- movl $LOAD_PHYSICAL_ADDR, %ebx
7814+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7815 #endif
7816
7817 /* Target address to relocate to for decompression */
5e856224 7818@@ -192,7 +192,7 @@ relocated:
58c5fc13
MT
7819 * and where it was actually loaded.
7820 */
7821 movl %ebp, %ebx
7822- subl $LOAD_PHYSICAL_ADDR, %ebx
7823+ subl $____LOAD_PHYSICAL_ADDR, %ebx
7824 jz 2f /* Nothing to be done if loaded at compiled addr. */
7825 /*
7826 * Process relocations.
5e856224 7827@@ -200,8 +200,7 @@ relocated:
58c5fc13
MT
7828
7829 1: subl $4, %edi
7830 movl (%edi), %ecx
7831- testl %ecx, %ecx
7832- jz 2f
7833+ jecxz 2f
7834 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7835 jmp 1b
7836 2:
fe2de317 7837diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
5e856224 7838index 87e03a1..0d94c76 100644
fe2de317
MT
7839--- a/arch/x86/boot/compressed/head_64.S
7840+++ b/arch/x86/boot/compressed/head_64.S
ae4e228f 7841@@ -91,7 +91,7 @@ ENTRY(startup_32)
58c5fc13
MT
7842 notl %eax
7843 andl %eax, %ebx
7844 #else
7845- movl $LOAD_PHYSICAL_ADDR, %ebx
7846+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7847 #endif
7848
7849 /* Target address to relocate to for decompression */
5e856224 7850@@ -263,7 +263,7 @@ preferred_addr:
58c5fc13
MT
7851 notq %rax
7852 andq %rax, %rbp
7853 #else
7854- movq $LOAD_PHYSICAL_ADDR, %rbp
7855+ movq $____LOAD_PHYSICAL_ADDR, %rbp
7856 #endif
7857
7858 /* Target address to relocate to for decompression */
fe2de317 7859diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
5e856224 7860index 7116dcb..d9ae1d7 100644
fe2de317
MT
7861--- a/arch/x86/boot/compressed/misc.c
7862+++ b/arch/x86/boot/compressed/misc.c
16454cff 7863@@ -310,7 +310,7 @@ static void parse_elf(void *output)
58c5fc13
MT
7864 case PT_LOAD:
7865 #ifdef CONFIG_RELOCATABLE
7866 dest = output;
7867- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7868+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7869 #else
7870 dest = (void *)(phdr->p_paddr);
7871 #endif
5e856224 7872@@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
58c5fc13
MT
7873 error("Destination address too large");
7874 #endif
7875 #ifndef CONFIG_RELOCATABLE
7876- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7877+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7878 error("Wrong destination address");
7879 #endif
7880
fe2de317 7881diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
5e856224 7882index e77f4e4..17e511f 100644
fe2de317
MT
7883--- a/arch/x86/boot/compressed/relocs.c
7884+++ b/arch/x86/boot/compressed/relocs.c
ae4e228f 7885@@ -13,8 +13,11 @@
58c5fc13 7886
ae4e228f
MT
7887 static void die(char *fmt, ...);
7888
7889+#include "../../../../include/generated/autoconf.h"
58c5fc13
MT
7890+
7891 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7892 static Elf32_Ehdr ehdr;
7893+static Elf32_Phdr *phdr;
7894 static unsigned long reloc_count, reloc_idx;
7895 static unsigned long *relocs;
7896
ae4e228f 7897@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
58c5fc13
MT
7898 }
7899 }
7900
7901+static void read_phdrs(FILE *fp)
7902+{
7903+ unsigned int i;
7904+
7905+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7906+ if (!phdr) {
7907+ die("Unable to allocate %d program headers\n",
7908+ ehdr.e_phnum);
7909+ }
7910+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7911+ die("Seek to %d failed: %s\n",
7912+ ehdr.e_phoff, strerror(errno));
7913+ }
7914+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7915+ die("Cannot read ELF program headers: %s\n",
7916+ strerror(errno));
7917+ }
7918+ for(i = 0; i < ehdr.e_phnum; i++) {
7919+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7920+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7921+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7922+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7923+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7924+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7925+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7926+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7927+ }
7928+
7929+}
7930+
7931 static void read_shdrs(FILE *fp)
7932 {
7933- int i;
7934+ unsigned int i;
7935 Elf32_Shdr shdr;
7936
7937 secs = calloc(ehdr.e_shnum, sizeof(struct section));
ae4e228f 7938@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
58c5fc13
MT
7939
7940 static void read_strtabs(FILE *fp)
7941 {
7942- int i;
7943+ unsigned int i;
7944 for (i = 0; i < ehdr.e_shnum; i++) {
7945 struct section *sec = &secs[i];
7946 if (sec->shdr.sh_type != SHT_STRTAB) {
ae4e228f 7947@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
58c5fc13
MT
7948
7949 static void read_symtabs(FILE *fp)
7950 {
7951- int i,j;
7952+ unsigned int i,j;
7953 for (i = 0; i < ehdr.e_shnum; i++) {
7954 struct section *sec = &secs[i];
7955 if (sec->shdr.sh_type != SHT_SYMTAB) {
ae4e228f 7956@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
58c5fc13
MT
7957
7958 static void read_relocs(FILE *fp)
7959 {
7960- int i,j;
7961+ unsigned int i,j;
7962+ uint32_t base;
7963+
7964 for (i = 0; i < ehdr.e_shnum; i++) {
7965 struct section *sec = &secs[i];
7966 if (sec->shdr.sh_type != SHT_REL) {
ae4e228f 7967@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
58c5fc13
MT
7968 die("Cannot read symbol table: %s\n",
7969 strerror(errno));
7970 }
7971+ base = 0;
7972+ for (j = 0; j < ehdr.e_phnum; j++) {
7973+ if (phdr[j].p_type != PT_LOAD )
7974+ continue;
7975+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7976+ continue;
7977+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7978+ break;
7979+ }
7980 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7981 Elf32_Rel *rel = &sec->reltab[j];
7982- rel->r_offset = elf32_to_cpu(rel->r_offset);
7983+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7984 rel->r_info = elf32_to_cpu(rel->r_info);
7985 }
7986 }
5e856224 7987@@ -396,13 +440,13 @@ static void read_relocs(FILE *fp)
58c5fc13
MT
7988
7989 static void print_absolute_symbols(void)
7990 {
7991- int i;
7992+ unsigned int i;
7993 printf("Absolute symbols\n");
7994 printf(" Num: Value Size Type Bind Visibility Name\n");
7995 for (i = 0; i < ehdr.e_shnum; i++) {
7996 struct section *sec = &secs[i];
7997 char *sym_strtab;
58c5fc13
MT
7998- int j;
7999+ unsigned int j;
8000
8001 if (sec->shdr.sh_type != SHT_SYMTAB) {
8002 continue;
5e856224 8003@@ -429,14 +473,14 @@ static void print_absolute_symbols(void)
58c5fc13
MT
8004
8005 static void print_absolute_relocs(void)
8006 {
8007- int i, printed = 0;
8008+ unsigned int i, printed = 0;
8009
8010 for (i = 0; i < ehdr.e_shnum; i++) {
8011 struct section *sec = &secs[i];
8012 struct section *sec_applies, *sec_symtab;
8013 char *sym_strtab;
8014 Elf32_Sym *sh_symtab;
8015- int j;
8016+ unsigned int j;
8017 if (sec->shdr.sh_type != SHT_REL) {
8018 continue;
8019 }
5e856224 8020@@ -497,13 +541,13 @@ static void print_absolute_relocs(void)
58c5fc13
MT
8021
8022 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
8023 {
8024- int i;
8025+ unsigned int i;
8026 /* Walk through the relocations */
8027 for (i = 0; i < ehdr.e_shnum; i++) {
8028 char *sym_strtab;
8029 Elf32_Sym *sh_symtab;
8030 struct section *sec_applies, *sec_symtab;
8031- int j;
8032+ unsigned int j;
8033 struct section *sec = &secs[i];
8034
8035 if (sec->shdr.sh_type != SHT_REL) {
5e856224 8036@@ -528,6 +572,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
ae4e228f 8037 !is_rel_reloc(sym_name(sym_strtab, sym))) {
58c5fc13
MT
8038 continue;
8039 }
8040+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
57199397 8041+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
58c5fc13
MT
8042+ continue;
8043+
8044+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
8045+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
57199397 8046+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
ae4e228f 8047+ continue;
58c5fc13
MT
8048+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
8049+ continue;
8050+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
8051+ continue;
8052+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
8053+ continue;
8054+#endif
ae4e228f
MT
8055+
8056 switch (r_type) {
8057 case R_386_NONE:
8058 case R_386_PC32:
5e856224 8059@@ -569,7 +629,7 @@ static int cmp_relocs(const void *va, const void *vb)
58c5fc13
MT
8060
8061 static void emit_relocs(int as_text)
8062 {
8063- int i;
8064+ unsigned int i;
8065 /* Count how many relocations I have and allocate space for them. */
8066 reloc_count = 0;
8067 walk_relocs(count_reloc);
5e856224 8068@@ -663,6 +723,7 @@ int main(int argc, char **argv)
58c5fc13
MT
8069 fname, strerror(errno));
8070 }
8071 read_ehdr(fp);
8072+ read_phdrs(fp);
8073 read_shdrs(fp);
8074 read_strtabs(fp);
8075 read_symtabs(fp);
fe2de317
MT
8076diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
8077index 4d3ff03..e4972ff 100644
8078--- a/arch/x86/boot/cpucheck.c
8079+++ b/arch/x86/boot/cpucheck.c
58c5fc13
MT
8080@@ -74,7 +74,7 @@ static int has_fpu(void)
8081 u16 fcw = -1, fsw = -1;
8082 u32 cr0;
8083
8084- asm("movl %%cr0,%0" : "=r" (cr0));
8085+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
8086 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
8087 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
8088 asm volatile("movl %0,%%cr0" : : "r" (cr0));
8089@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
8090 {
8091 u32 f0, f1;
8092
8093- asm("pushfl ; "
8094+ asm volatile("pushfl ; "
8095 "pushfl ; "
8096 "popl %0 ; "
8097 "movl %0,%1 ; "
8098@@ -115,7 +115,7 @@ static void get_flags(void)
8099 set_bit(X86_FEATURE_FPU, cpu.flags);
8100
8101 if (has_eflag(X86_EFLAGS_ID)) {
8102- asm("cpuid"
8103+ asm volatile("cpuid"
8104 : "=a" (max_intel_level),
8105 "=b" (cpu_vendor[0]),
8106 "=d" (cpu_vendor[1]),
8107@@ -124,7 +124,7 @@ static void get_flags(void)
8108
8109 if (max_intel_level >= 0x00000001 &&
8110 max_intel_level <= 0x0000ffff) {
8111- asm("cpuid"
8112+ asm volatile("cpuid"
8113 : "=a" (tfms),
8114 "=c" (cpu.flags[4]),
8115 "=d" (cpu.flags[0])
8116@@ -136,7 +136,7 @@ static void get_flags(void)
8117 cpu.model += ((tfms >> 16) & 0xf) << 4;
8118 }
8119
8120- asm("cpuid"
8121+ asm volatile("cpuid"
8122 : "=a" (max_amd_level)
8123 : "a" (0x80000000)
8124 : "ebx", "ecx", "edx");
8125@@ -144,7 +144,7 @@ static void get_flags(void)
8126 if (max_amd_level >= 0x80000001 &&
8127 max_amd_level <= 0x8000ffff) {
8128 u32 eax = 0x80000001;
8129- asm("cpuid"
8130+ asm volatile("cpuid"
8131 : "+a" (eax),
8132 "=c" (cpu.flags[6]),
8133 "=d" (cpu.flags[1])
fe2de317 8134@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
58c5fc13
MT
8135 u32 ecx = MSR_K7_HWCR;
8136 u32 eax, edx;
8137
8138- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8139+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8140 eax &= ~(1 << 15);
8141- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8142+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8143
8144 get_flags(); /* Make sure it really did something */
8145 err = check_flags();
fe2de317 8146@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
58c5fc13
MT
8147 u32 ecx = MSR_VIA_FCR;
8148 u32 eax, edx;
8149
8150- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8151+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8152 eax |= (1<<1)|(1<<7);
8153- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8154+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8155
8156 set_bit(X86_FEATURE_CX8, cpu.flags);
8157 err = check_flags();
fe2de317 8158@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
58c5fc13
MT
8159 u32 eax, edx;
8160 u32 level = 1;
8161
8162- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8163- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8164- asm("cpuid"
8165+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8166+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8167+ asm volatile("cpuid"
8168 : "+a" (level), "=d" (cpu.flags[0])
8169 : : "ecx", "ebx");
8170- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8171+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8172
8173 err = check_flags();
8174 }
fe2de317 8175diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
5e856224 8176index f1bbeeb..aff09cb 100644
fe2de317
MT
8177--- a/arch/x86/boot/header.S
8178+++ b/arch/x86/boot/header.S
5e856224 8179@@ -372,7 +372,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
58c5fc13
MT
8180 # single linked list of
8181 # struct setup_data
8182
8183-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8184+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8185
8186 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8187 #define VO_INIT_SIZE (VO__end - VO__text)
fe2de317
MT
8188diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
8189index db75d07..8e6d0af 100644
8190--- a/arch/x86/boot/memory.c
8191+++ b/arch/x86/boot/memory.c
df50ba0c
MT
8192@@ -19,7 +19,7 @@
8193
8194 static int detect_memory_e820(void)
8195 {
8196- int count = 0;
8197+ unsigned int count = 0;
8198 struct biosregs ireg, oreg;
8199 struct e820entry *desc = boot_params.e820_map;
8200 static struct e820entry buf; /* static so it is zeroed */
fe2de317
MT
8201diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
8202index 11e8c6e..fdbb1ed 100644
8203--- a/arch/x86/boot/video-vesa.c
8204+++ b/arch/x86/boot/video-vesa.c
8205@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
8206
8207 boot_params.screen_info.vesapm_seg = oreg.es;
8208 boot_params.screen_info.vesapm_off = oreg.di;
8209+ boot_params.screen_info.vesapm_size = oreg.cx;
8210 }
8211
8212 /*
8213diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8214index 43eda28..5ab5fdb 100644
8215--- a/arch/x86/boot/video.c
8216+++ b/arch/x86/boot/video.c
df50ba0c
MT
8217@@ -96,7 +96,7 @@ static void store_mode_params(void)
8218 static unsigned int get_entry(void)
8219 {
8220 char entry_buf[4];
8221- int i, len = 0;
8222+ unsigned int i, len = 0;
8223 int key;
8224 unsigned int v;
8225
fe2de317
MT
8226diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8227index 5b577d5..3c1fed4 100644
8228--- a/arch/x86/crypto/aes-x86_64-asm_64.S
8229+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
6e9df6a3
MT
8230@@ -8,6 +8,8 @@
8231 * including this sentence is retained in full.
8232 */
8233
8234+#include <asm/alternative-asm.h>
8235+
8236 .extern crypto_ft_tab
8237 .extern crypto_it_tab
8238 .extern crypto_fl_tab
8239@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
15a11c5b
MT
8240 je B192; \
8241 leaq 32(r9),r9;
8242
fe2de317 8243+#define ret pax_force_retaddr 0, 1; ret
15a11c5b
MT
8244+
8245 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8246 movq r1,r2; \
8247 movq r3,r4; \
fe2de317
MT
8248diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
8249index be6d9e3..21fbbca 100644
8250--- a/arch/x86/crypto/aesni-intel_asm.S
8251+++ b/arch/x86/crypto/aesni-intel_asm.S
8252@@ -31,6 +31,7 @@
8253
8254 #include <linux/linkage.h>
8255 #include <asm/inst.h>
8256+#include <asm/alternative-asm.h>
8257
8258 #ifdef __x86_64__
8259 .data
8260@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
8261 pop %r14
8262 pop %r13
8263 pop %r12
8264+ pax_force_retaddr 0, 1
8265 ret
8266+ENDPROC(aesni_gcm_dec)
8267
8268
8269 /*****************************************************************************
8270@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
8271 pop %r14
8272 pop %r13
8273 pop %r12
8274+ pax_force_retaddr 0, 1
8275 ret
8276+ENDPROC(aesni_gcm_enc)
8277
8278 #endif
8279
8280@@ -1714,6 +1719,7 @@ _key_expansion_256a:
8281 pxor %xmm1, %xmm0
8282 movaps %xmm0, (TKEYP)
8283 add $0x10, TKEYP
8284+ pax_force_retaddr_bts
8285 ret
8286
8287 .align 4
8288@@ -1738,6 +1744,7 @@ _key_expansion_192a:
8289 shufps $0b01001110, %xmm2, %xmm1
8290 movaps %xmm1, 0x10(TKEYP)
8291 add $0x20, TKEYP
8292+ pax_force_retaddr_bts
8293 ret
8294
8295 .align 4
8296@@ -1757,6 +1764,7 @@ _key_expansion_192b:
8297
8298 movaps %xmm0, (TKEYP)
8299 add $0x10, TKEYP
8300+ pax_force_retaddr_bts
8301 ret
8302
8303 .align 4
8304@@ -1769,6 +1777,7 @@ _key_expansion_256b:
8305 pxor %xmm1, %xmm2
8306 movaps %xmm2, (TKEYP)
8307 add $0x10, TKEYP
8308+ pax_force_retaddr_bts
8309 ret
8310
8311 /*
8312@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
8313 #ifndef __x86_64__
8314 popl KEYP
8315 #endif
8316+ pax_force_retaddr 0, 1
8317 ret
8318+ENDPROC(aesni_set_key)
8319
8320 /*
8321 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8322@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
8323 popl KLEN
8324 popl KEYP
8325 #endif
8326+ pax_force_retaddr 0, 1
8327 ret
8328+ENDPROC(aesni_enc)
8329
8330 /*
8331 * _aesni_enc1: internal ABI
8332@@ -1959,6 +1972,7 @@ _aesni_enc1:
8333 AESENC KEY STATE
8334 movaps 0x70(TKEYP), KEY
8335 AESENCLAST KEY STATE
8336+ pax_force_retaddr_bts
8337 ret
8338
8339 /*
8340@@ -2067,6 +2081,7 @@ _aesni_enc4:
8341 AESENCLAST KEY STATE2
8342 AESENCLAST KEY STATE3
8343 AESENCLAST KEY STATE4
8344+ pax_force_retaddr_bts
8345 ret
8346
8347 /*
8348@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
8349 popl KLEN
8350 popl KEYP
8351 #endif
8352+ pax_force_retaddr 0, 1
8353 ret
8354+ENDPROC(aesni_dec)
8355
8356 /*
8357 * _aesni_dec1: internal ABI
8358@@ -2146,6 +2163,7 @@ _aesni_dec1:
8359 AESDEC KEY STATE
8360 movaps 0x70(TKEYP), KEY
8361 AESDECLAST KEY STATE
8362+ pax_force_retaddr_bts
8363 ret
8364
8365 /*
8366@@ -2254,6 +2272,7 @@ _aesni_dec4:
8367 AESDECLAST KEY STATE2
8368 AESDECLAST KEY STATE3
8369 AESDECLAST KEY STATE4
8370+ pax_force_retaddr_bts
8371 ret
8372
8373 /*
8374@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
8375 popl KEYP
8376 popl LEN
8377 #endif
8378+ pax_force_retaddr 0, 1
8379 ret
8380+ENDPROC(aesni_ecb_enc)
8381
8382 /*
8383 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8384@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
8385 popl KEYP
8386 popl LEN
8387 #endif
8388+ pax_force_retaddr 0, 1
8389 ret
8390+ENDPROC(aesni_ecb_dec)
8391
8392 /*
8393 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8394@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
8395 popl LEN
8396 popl IVP
8397 #endif
8398+ pax_force_retaddr 0, 1
8399 ret
8400+ENDPROC(aesni_cbc_enc)
8401
8402 /*
8403 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8404@@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
8405 popl LEN
8406 popl IVP
8407 #endif
8408+ pax_force_retaddr 0, 1
8409 ret
8410+ENDPROC(aesni_cbc_dec)
8411
8412 #ifdef __x86_64__
8413 .align 16
8414@@ -2524,6 +2551,7 @@ _aesni_inc_init:
8415 mov $1, TCTR_LOW
8416 MOVQ_R64_XMM TCTR_LOW INC
8417 MOVQ_R64_XMM CTR TCTR_LOW
8418+ pax_force_retaddr_bts
8419 ret
8420
8421 /*
8422@@ -2552,6 +2580,7 @@ _aesni_inc:
8423 .Linc_low:
8424 movaps CTR, IV
8425 PSHUFB_XMM BSWAP_MASK IV
8426+ pax_force_retaddr_bts
8427 ret
8428
8429 /*
8430@@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
8431 .Lctr_enc_ret:
8432 movups IV, (IVP)
8433 .Lctr_enc_just_ret:
8434+ pax_force_retaddr 0, 1
8435 ret
8436+ENDPROC(aesni_ctr_enc)
8437 #endif
5e856224
MT
8438diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
8439index 545d0ce..14841a6 100644
8440--- a/arch/x86/crypto/aesni-intel_glue.c
8441+++ b/arch/x86/crypto/aesni-intel_glue.c
8442@@ -929,6 +929,8 @@ out_free_ablkcipher:
8443 }
8444
8445 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
8446+ unsigned int key_len) __size_overflow(3);
8447+static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
8448 unsigned int key_len)
8449 {
8450 int ret = 0;
4c928ab7
MT
8451diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8452index 391d245..67f35c2 100644
8453--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
8454+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8455@@ -20,6 +20,8 @@
8456 *
8457 */
8458
8459+#include <asm/alternative-asm.h>
8460+
8461 .file "blowfish-x86_64-asm.S"
8462 .text
8463
8464@@ -151,9 +153,11 @@ __blowfish_enc_blk:
8465 jnz __enc_xor;
8466
8467 write_block();
8468+ pax_force_retaddr 0, 1
8469 ret;
8470 __enc_xor:
8471 xor_block();
8472+ pax_force_retaddr 0, 1
8473 ret;
8474
8475 .align 8
8476@@ -188,6 +192,7 @@ blowfish_dec_blk:
8477
8478 movq %r11, %rbp;
8479
8480+ pax_force_retaddr 0, 1
8481 ret;
8482
8483 /**********************************************************************
8484@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
8485
8486 popq %rbx;
8487 popq %rbp;
8488+ pax_force_retaddr 0, 1
8489 ret;
8490
8491 __enc_xor4:
8492@@ -349,6 +355,7 @@ __enc_xor4:
8493
8494 popq %rbx;
8495 popq %rbp;
8496+ pax_force_retaddr 0, 1
8497 ret;
8498
8499 .align 8
8500@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
8501 popq %rbx;
8502 popq %rbp;
8503
8504+ pax_force_retaddr 0, 1
8505 ret;
8506
fe2de317
MT
8507diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8508index 6214a9b..1f4fc9a 100644
8509--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8510+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6e9df6a3
MT
8511@@ -1,3 +1,5 @@
8512+#include <asm/alternative-asm.h>
8513+
8514 # enter ECRYPT_encrypt_bytes
8515 .text
8516 .p2align 5
8517@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
15a11c5b
MT
8518 add %r11,%rsp
8519 mov %rdi,%rax
8520 mov %rsi,%rdx
fe2de317 8521+ pax_force_retaddr 0, 1
15a11c5b
MT
8522 ret
8523 # bytesatleast65:
8524 ._bytesatleast65:
6e9df6a3 8525@@ -891,6 +894,7 @@ ECRYPT_keysetup:
15a11c5b
MT
8526 add %r11,%rsp
8527 mov %rdi,%rax
8528 mov %rsi,%rdx
6e9df6a3 8529+ pax_force_retaddr
15a11c5b
MT
8530 ret
8531 # enter ECRYPT_ivsetup
8532 .text
6e9df6a3 8533@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
15a11c5b
MT
8534 add %r11,%rsp
8535 mov %rdi,%rax
8536 mov %rsi,%rdx
6e9df6a3 8537+ pax_force_retaddr
15a11c5b 8538 ret
5e856224
MT
8539diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8540index 7f24a15..9cd3ffe 100644
8541--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8542+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8543@@ -24,6 +24,8 @@
8544 *
8545 */
8546
8547+#include <asm/alternative-asm.h>
8548+
8549 .file "serpent-sse2-x86_64-asm_64.S"
8550 .text
8551
8552@@ -695,12 +697,14 @@ __serpent_enc_blk_8way:
8553 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8554 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8555
8556+ pax_force_retaddr
8557 ret;
8558
8559 __enc_xor8:
8560 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8561 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8562
8563+ pax_force_retaddr
8564 ret;
8565
8566 .align 8
8567@@ -758,4 +762,5 @@ serpent_dec_blk_8way:
8568 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
8569 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
8570
8571+ pax_force_retaddr
8572 ret;
4c928ab7
MT
8573diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
8574index b2c2f57..8470cab 100644
8575--- a/arch/x86/crypto/sha1_ssse3_asm.S
8576+++ b/arch/x86/crypto/sha1_ssse3_asm.S
8577@@ -28,6 +28,8 @@
8578 * (at your option) any later version.
8579 */
8580
8581+#include <asm/alternative-asm.h>
8582+
8583 #define CTX %rdi // arg1
8584 #define BUF %rsi // arg2
8585 #define CNT %rdx // arg3
8586@@ -104,6 +106,7 @@
8587 pop %r12
8588 pop %rbp
8589 pop %rbx
8590+ pax_force_retaddr 0, 1
8591 ret
8592
8593 .size \name, .-\name
8594diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8595index 5b012a2..36d5364 100644
8596--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8597+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8598@@ -20,6 +20,8 @@
8599 *
8600 */
8601
8602+#include <asm/alternative-asm.h>
8603+
8604 .file "twofish-x86_64-asm-3way.S"
8605 .text
8606
8607@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8608 popq %r13;
8609 popq %r14;
8610 popq %r15;
8611+ pax_force_retaddr 0, 1
8612 ret;
8613
8614 __enc_xor3:
8615@@ -271,6 +274,7 @@ __enc_xor3:
8616 popq %r13;
8617 popq %r14;
8618 popq %r15;
8619+ pax_force_retaddr 0, 1
8620 ret;
8621
8622 .global twofish_dec_blk_3way
8623@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8624 popq %r13;
8625 popq %r14;
8626 popq %r15;
8627+ pax_force_retaddr 0, 1
8628 ret;
8629
fe2de317 8630diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
4c928ab7 8631index 7bcf3fc..f53832f 100644
fe2de317
MT
8632--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8633+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
6e9df6a3
MT
8634@@ -21,6 +21,7 @@
8635 .text
8636
8637 #include <asm/asm-offsets.h>
8638+#include <asm/alternative-asm.h>
8639
8640 #define a_offset 0
8641 #define b_offset 4
4c928ab7 8642@@ -268,6 +269,7 @@ twofish_enc_blk:
15a11c5b
MT
8643
8644 popq R1
8645 movq $1,%rax
fe2de317 8646+ pax_force_retaddr 0, 1
15a11c5b
MT
8647 ret
8648
8649 twofish_dec_blk:
4c928ab7 8650@@ -319,4 +321,5 @@ twofish_dec_blk:
15a11c5b
MT
8651
8652 popq R1
8653 movq $1,%rax
fe2de317 8654+ pax_force_retaddr 0, 1
15a11c5b 8655 ret
fe2de317 8656diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
5e856224 8657index 39e4909..887aa7e 100644
fe2de317
MT
8658--- a/arch/x86/ia32/ia32_aout.c
8659+++ b/arch/x86/ia32/ia32_aout.c
8660@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
6892158b
MT
8661 unsigned long dump_start, dump_size;
8662 struct user32 dump;
8663
8664+ memset(&dump, 0, sizeof(dump));
8665+
8666 fs = get_fs();
8667 set_fs(KERNEL_DS);
8668 has_dumped = 1;
fe2de317
MT
8669diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8670index 6557769..ef6ae89 100644
8671--- a/arch/x86/ia32/ia32_signal.c
8672+++ b/arch/x86/ia32/ia32_signal.c
8673@@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
6e9df6a3
MT
8674 }
8675 seg = get_fs();
8676 set_fs(KERNEL_DS);
8677- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8678+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8679 set_fs(seg);
8680 if (ret >= 0 && uoss_ptr) {
8681 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
fe2de317 8682@@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
6e9df6a3
MT
8683 */
8684 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8685 size_t frame_size,
8686- void **fpstate)
8687+ void __user **fpstate)
8688 {
8689 unsigned long sp;
8690
fe2de317 8691@@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6e9df6a3
MT
8692
8693 if (used_math()) {
8694 sp = sp - sig_xstate_ia32_size;
8695- *fpstate = (struct _fpstate_ia32 *) sp;
8696+ *fpstate = (struct _fpstate_ia32 __user *) sp;
8697 if (save_i387_xstate_ia32(*fpstate) < 0)
8698 return (void __user *) -1L;
8699 }
fe2de317 8700@@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
57199397
MT
8701 sp -= frame_size;
8702 /* Align the stack pointer according to the i386 ABI,
8703 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8704- sp = ((sp + 4) & -16ul) - 4;
8705+ sp = ((sp - 12) & -16ul) - 4;
8706 return (void __user *) sp;
8707 }
8708
fe2de317 8709@@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
bc901d79
MT
8710 * These are actually not used anymore, but left because some
8711 * gdb versions depend on them as a marker.
8712 */
8713- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6e9df6a3 8714+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
bc901d79
MT
8715 } put_user_catch(err);
8716
8717 if (err)
fe2de317 8718@@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
57199397
MT
8719 0xb8,
8720 __NR_ia32_rt_sigreturn,
8721 0x80cd,
8722- 0,
8723+ 0
8724 };
8725
8726 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
fe2de317 8727@@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6892158b
MT
8728
8729 if (ka->sa.sa_flags & SA_RESTORER)
8730 restorer = ka->sa.sa_restorer;
8731+ else if (current->mm->context.vdso)
8732+ /* Return stub is in 32bit vsyscall page */
8733+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8734 else
8735- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8736- rt_sigreturn);
8737+ restorer = &frame->retcode;
8738 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8739
8740 /*
bc901d79
MT
8741 * Not actually used anymore, but left because some gdb
8742 * versions need it.
8743 */
8744- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6e9df6a3 8745+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
bc901d79
MT
8746 } put_user_catch(err);
8747
8748 if (err)
fe2de317 8749diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
5e856224 8750index e3e7340..05ed805 100644
fe2de317
MT
8751--- a/arch/x86/ia32/ia32entry.S
8752+++ b/arch/x86/ia32/ia32entry.S
5e856224 8753@@ -13,8 +13,10 @@
fe2de317
MT
8754 #include <asm/thread_info.h>
8755 #include <asm/segment.h>
8756 #include <asm/irqflags.h>
8757+#include <asm/pgtable.h>
8758 #include <linux/linkage.h>
5e856224 8759 #include <linux/err.h>
fe2de317
MT
8760+#include <asm/alternative-asm.h>
8761
8762 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8763 #include <linux/elf-em.h>
5e856224 8764@@ -94,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
fe2de317
MT
8765 ENDPROC(native_irq_enable_sysexit)
8766 #endif
8767
8768+ .macro pax_enter_kernel_user
8769+ pax_set_fptr_mask
8770+#ifdef CONFIG_PAX_MEMORY_UDEREF
8771+ call pax_enter_kernel_user
8772+#endif
8773+ .endm
8774+
8775+ .macro pax_exit_kernel_user
8776+#ifdef CONFIG_PAX_MEMORY_UDEREF
8777+ call pax_exit_kernel_user
8778+#endif
8779+#ifdef CONFIG_PAX_RANDKSTACK
8780+ pushq %rax
4c928ab7 8781+ pushq %r11
fe2de317 8782+ call pax_randomize_kstack
4c928ab7 8783+ popq %r11
fe2de317
MT
8784+ popq %rax
8785+#endif
8786+ .endm
8787+
8788+.macro pax_erase_kstack
8789+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8790+ call pax_erase_kstack
8791+#endif
8792+.endm
8793+
8794 /*
8795 * 32bit SYSENTER instruction entry.
8796 *
5e856224 8797@@ -120,12 +148,6 @@ ENTRY(ia32_sysenter_target)
fe2de317
MT
8798 CFI_REGISTER rsp,rbp
8799 SWAPGS_UNSAFE_STACK
8800 movq PER_CPU_VAR(kernel_stack), %rsp
8801- addq $(KERNEL_STACK_OFFSET),%rsp
8802- /*
8803- * No need to follow this irqs on/off section: the syscall
8804- * disabled irqs, here we enable it straight after entry:
8805- */
8806- ENABLE_INTERRUPTS(CLBR_NONE)
8807 movl %ebp,%ebp /* zero extension */
8808 pushq_cfi $__USER32_DS
8809 /*CFI_REL_OFFSET ss,0*/
5e856224 8810@@ -133,24 +155,39 @@ ENTRY(ia32_sysenter_target)
fe2de317
MT
8811 CFI_REL_OFFSET rsp,0
8812 pushfq_cfi
8813 /*CFI_REL_OFFSET rflags,0*/
5e856224 8814- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
fe2de317 8815- CFI_REGISTER rip,r10
4c928ab7 8816+ orl $X86_EFLAGS_IF,(%rsp)
fe2de317
MT
8817+ GET_THREAD_INFO(%r11)
8818+ movl TI_sysenter_return(%r11), %r11d
8819+ CFI_REGISTER rip,r11
8820 pushq_cfi $__USER32_CS
8821 /*CFI_REL_OFFSET cs,0*/
8822 movl %eax, %eax
8823- pushq_cfi %r10
8824+ pushq_cfi %r11
8825 CFI_REL_OFFSET rip,0
8826 pushq_cfi %rax
8827 cld
8828 SAVE_ARGS 0,1,0
8829+ pax_enter_kernel_user
8830+ /*
8831+ * No need to follow this irqs on/off section: the syscall
8832+ * disabled irqs, here we enable it straight after entry:
8833+ */
8834+ ENABLE_INTERRUPTS(CLBR_NONE)
8835 /* no need to do an access_ok check here because rbp has been
8836 32bit zero extended */
8837+
8838+#ifdef CONFIG_PAX_MEMORY_UDEREF
8839+ mov $PAX_USER_SHADOW_BASE,%r11
8840+ add %r11,%rbp
8841+#endif
8842+
8843 1: movl (%rbp),%ebp
8844 .section __ex_table,"a"
8845 .quad 1b,ia32_badarg
8846 .previous
5e856224
MT
8847- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8848- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
8849+ GET_THREAD_INFO(%r11)
8850+ orl $TS_COMPAT,TI_status(%r11)
8851+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8852 CFI_REMEMBER_STATE
8853 jnz sysenter_tracesys
8854 cmpq $(IA32_NR_syscalls-1),%rax
5e856224 8855@@ -160,12 +197,15 @@ sysenter_do_call:
fe2de317
MT
8856 sysenter_dispatch:
8857 call *ia32_sys_call_table(,%rax,8)
8858 movq %rax,RAX-ARGOFFSET(%rsp)
fe2de317
MT
8859+ GET_THREAD_INFO(%r11)
8860 DISABLE_INTERRUPTS(CLBR_NONE)
8861 TRACE_IRQS_OFF
5e856224 8862- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
8863+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8864 jnz sysexit_audit
8865 sysexit_from_sys_call:
5e856224 8866- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
8867+ pax_exit_kernel_user
8868+ pax_erase_kstack
8869+ andl $~TS_COMPAT,TI_status(%r11)
8870 /* clear IF, that popfq doesn't enable interrupts early */
8871 andl $~0x200,EFLAGS-R11(%rsp)
8872 movl RIP-R11(%rsp),%edx /* User %eip */
5e856224 8873@@ -191,6 +231,9 @@ sysexit_from_sys_call:
fe2de317
MT
8874 movl %eax,%esi /* 2nd arg: syscall number */
8875 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5e856224 8876 call __audit_syscall_entry
fe2de317
MT
8877+
8878+ pax_erase_kstack
8879+
8880 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8881 cmpq $(IA32_NR_syscalls-1),%rax
8882 ja ia32_badsys
5e856224 8883@@ -202,7 +245,7 @@ sysexit_from_sys_call:
fe2de317
MT
8884 .endm
8885
8886 .macro auditsys_exit exit
5e856224 8887- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
8888+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8889 jnz ia32_ret_from_sys_call
8890 TRACE_IRQS_ON
8891 sti
5e856224
MT
8892@@ -213,11 +256,12 @@ sysexit_from_sys_call:
8893 1: setbe %al /* 1 if error, 0 if not */
fe2de317 8894 movzbl %al,%edi /* zero-extend that into %edi */
5e856224 8895 call __audit_syscall_exit
fe2de317 8896+ GET_THREAD_INFO(%r11)
5e856224 8897 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
fe2de317
MT
8898 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8899 cli
8900 TRACE_IRQS_OFF
5e856224 8901- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
8902+ testl %edi,TI_flags(%r11)
8903 jz \exit
8904 CLEAR_RREGS -ARGOFFSET
8905 jmp int_with_check
5e856224 8906@@ -235,7 +279,7 @@ sysexit_audit:
fe2de317
MT
8907
8908 sysenter_tracesys:
8909 #ifdef CONFIG_AUDITSYSCALL
5e856224 8910- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
8911+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8912 jz sysenter_auditsys
8913 #endif
8914 SAVE_REST
5e856224 8915@@ -243,6 +287,9 @@ sysenter_tracesys:
fe2de317
MT
8916 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8917 movq %rsp,%rdi /* &pt_regs -> arg1 */
8918 call syscall_trace_enter
8919+
8920+ pax_erase_kstack
8921+
8922 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8923 RESTORE_REST
8924 cmpq $(IA32_NR_syscalls-1),%rax
5e856224 8925@@ -274,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
fe2de317
MT
8926 ENTRY(ia32_cstar_target)
8927 CFI_STARTPROC32 simple
8928 CFI_SIGNAL_FRAME
8929- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8930+ CFI_DEF_CFA rsp,0
8931 CFI_REGISTER rip,rcx
8932 /*CFI_REGISTER rflags,r11*/
8933 SWAPGS_UNSAFE_STACK
8934 movl %esp,%r8d
8935 CFI_REGISTER rsp,r8
8936 movq PER_CPU_VAR(kernel_stack),%rsp
8937+ SAVE_ARGS 8*6,0,0
8938+ pax_enter_kernel_user
8939 /*
8940 * No need to follow this irqs on/off section: the syscall
8941 * disabled irqs and here we enable it straight after entry:
8942 */
8943 ENABLE_INTERRUPTS(CLBR_NONE)
8944- SAVE_ARGS 8,0,0
8945 movl %eax,%eax /* zero extension */
8946 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8947 movq %rcx,RIP-ARGOFFSET(%rsp)
5e856224 8948@@ -302,12 +350,19 @@ ENTRY(ia32_cstar_target)
fe2de317
MT
8949 /* no need to do an access_ok check here because r8 has been
8950 32bit zero extended */
8951 /* hardware stack frame is complete now */
8952+
8953+#ifdef CONFIG_PAX_MEMORY_UDEREF
8954+ mov $PAX_USER_SHADOW_BASE,%r11
8955+ add %r11,%r8
8956+#endif
8957+
8958 1: movl (%r8),%r9d
8959 .section __ex_table,"a"
8960 .quad 1b,ia32_badarg
8961 .previous
5e856224
MT
8962- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8963- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
8964+ GET_THREAD_INFO(%r11)
8965+ orl $TS_COMPAT,TI_status(%r11)
8966+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8967 CFI_REMEMBER_STATE
8968 jnz cstar_tracesys
8969 cmpq $IA32_NR_syscalls-1,%rax
5e856224 8970@@ -317,12 +372,15 @@ cstar_do_call:
fe2de317
MT
8971 cstar_dispatch:
8972 call *ia32_sys_call_table(,%rax,8)
8973 movq %rax,RAX-ARGOFFSET(%rsp)
fe2de317
MT
8974+ GET_THREAD_INFO(%r11)
8975 DISABLE_INTERRUPTS(CLBR_NONE)
8976 TRACE_IRQS_OFF
5e856224 8977- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
8978+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8979 jnz sysretl_audit
8980 sysretl_from_sys_call:
5e856224 8981- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
8982+ pax_exit_kernel_user
8983+ pax_erase_kstack
8984+ andl $~TS_COMPAT,TI_status(%r11)
8985 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8986 movl RIP-ARGOFFSET(%rsp),%ecx
8987 CFI_REGISTER rip,rcx
5e856224 8988@@ -350,7 +408,7 @@ sysretl_audit:
fe2de317
MT
8989
8990 cstar_tracesys:
8991 #ifdef CONFIG_AUDITSYSCALL
5e856224 8992- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
8993+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8994 jz cstar_auditsys
8995 #endif
8996 xchgl %r9d,%ebp
5e856224 8997@@ -359,6 +417,9 @@ cstar_tracesys:
fe2de317
MT
8998 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8999 movq %rsp,%rdi /* &pt_regs -> arg1 */
9000 call syscall_trace_enter
9001+
9002+ pax_erase_kstack
9003+
9004 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
9005 RESTORE_REST
9006 xchgl %ebp,%r9d
5e856224 9007@@ -404,19 +465,21 @@ ENTRY(ia32_syscall)
fe2de317
MT
9008 CFI_REL_OFFSET rip,RIP-RIP
9009 PARAVIRT_ADJUST_EXCEPTION_FRAME
9010 SWAPGS
9011- /*
9012- * No need to follow this irqs on/off section: the syscall
9013- * disabled irqs and here we enable it straight after entry:
9014- */
9015- ENABLE_INTERRUPTS(CLBR_NONE)
9016 movl %eax,%eax
9017 pushq_cfi %rax
9018 cld
9019 /* note the registers are not zero extended to the sf.
9020 this could be a problem. */
9021 SAVE_ARGS 0,1,0
5e856224
MT
9022- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9023- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
9024+ pax_enter_kernel_user
9025+ /*
9026+ * No need to follow this irqs on/off section: the syscall
9027+ * disabled irqs and here we enable it straight after entry:
9028+ */
9029+ ENABLE_INTERRUPTS(CLBR_NONE)
9030+ GET_THREAD_INFO(%r11)
9031+ orl $TS_COMPAT,TI_status(%r11)
9032+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9033 jnz ia32_tracesys
9034 cmpq $(IA32_NR_syscalls-1),%rax
9035 ja ia32_badsys
5e856224 9036@@ -435,6 +498,9 @@ ia32_tracesys:
fe2de317
MT
9037 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
9038 movq %rsp,%rdi /* &pt_regs -> arg1 */
9039 call syscall_trace_enter
9040+
9041+ pax_erase_kstack
9042+
9043 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
9044 RESTORE_REST
9045 cmpq $(IA32_NR_syscalls-1),%rax
fe2de317
MT
9046diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
9047index f6f5c53..b358b28 100644
9048--- a/arch/x86/ia32/sys_ia32.c
9049+++ b/arch/x86/ia32/sys_ia32.c
9050@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
6e9df6a3
MT
9051 */
9052 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
9053 {
9054- typeof(ubuf->st_uid) uid = 0;
9055- typeof(ubuf->st_gid) gid = 0;
9056+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
9057+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
9058 SET_UID(uid, stat->uid);
9059 SET_GID(gid, stat->gid);
9060 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
fe2de317 9061@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
6e9df6a3
MT
9062 }
9063 set_fs(KERNEL_DS);
9064 ret = sys_rt_sigprocmask(how,
9065- set ? (sigset_t __user *)&s : NULL,
9066- oset ? (sigset_t __user *)&s : NULL,
9067+ set ? (sigset_t __force_user *)&s : NULL,
9068+ oset ? (sigset_t __force_user *)&s : NULL,
9069 sigsetsize);
9070 set_fs(old_fs);
9071 if (ret)
fe2de317 9072@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
6e9df6a3
MT
9073 return alarm_setitimer(seconds);
9074 }
9075
9076-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
9077+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
9078 int options)
9079 {
9080 return compat_sys_wait4(pid, stat_addr, options, NULL);
fe2de317 9081@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
6e9df6a3
MT
9082 mm_segment_t old_fs = get_fs();
9083
9084 set_fs(KERNEL_DS);
9085- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
9086+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
9087 set_fs(old_fs);
9088 if (put_compat_timespec(&t, interval))
9089 return -EFAULT;
fe2de317 9090@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
6e9df6a3
MT
9091 mm_segment_t old_fs = get_fs();
9092
9093 set_fs(KERNEL_DS);
9094- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
9095+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
9096 set_fs(old_fs);
9097 if (!ret) {
9098 switch (_NSIG_WORDS) {
fe2de317 9099@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
6e9df6a3
MT
9100 if (copy_siginfo_from_user32(&info, uinfo))
9101 return -EFAULT;
9102 set_fs(KERNEL_DS);
9103- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
9104+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
9105 set_fs(old_fs);
9106 return ret;
9107 }
fe2de317 9108@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
6e9df6a3
MT
9109 return -EFAULT;
9110
9111 set_fs(KERNEL_DS);
9112- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
9113+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
9114 count);
9115 set_fs(old_fs);
9116
fe2de317 9117diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
5e856224 9118index 952bd01..7692c6f 100644
fe2de317
MT
9119--- a/arch/x86/include/asm/alternative-asm.h
9120+++ b/arch/x86/include/asm/alternative-asm.h
9121@@ -15,6 +15,45 @@
6e9df6a3
MT
9122 .endm
9123 #endif
9124
4c928ab7 9125+#ifdef KERNEXEC_PLUGIN
fe2de317
MT
9126+ .macro pax_force_retaddr_bts rip=0
9127+ btsq $63,\rip(%rsp)
9128+ .endm
9129+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9130+ .macro pax_force_retaddr rip=0, reload=0
6e9df6a3
MT
9131+ btsq $63,\rip(%rsp)
9132+ .endm
9133+ .macro pax_force_fptr ptr
9134+ btsq $63,\ptr
9135+ .endm
fe2de317
MT
9136+ .macro pax_set_fptr_mask
9137+ .endm
9138+#endif
9139+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
9140+ .macro pax_force_retaddr rip=0, reload=0
9141+ .if \reload
9142+ pax_set_fptr_mask
9143+ .endif
9144+ orq %r10,\rip(%rsp)
9145+ .endm
9146+ .macro pax_force_fptr ptr
9147+ orq %r10,\ptr
9148+ .endm
9149+ .macro pax_set_fptr_mask
9150+ movabs $0x8000000000000000,%r10
9151+ .endm
9152+#endif
6e9df6a3 9153+#else
fe2de317 9154+ .macro pax_force_retaddr rip=0, reload=0
6e9df6a3
MT
9155+ .endm
9156+ .macro pax_force_fptr ptr
9157+ .endm
fe2de317
MT
9158+ .macro pax_force_retaddr_bts rip=0
9159+ .endm
9160+ .macro pax_set_fptr_mask
9161+ .endm
6e9df6a3
MT
9162+#endif
9163+
9164 .macro altinstruction_entry orig alt feature orig_len alt_len
9165 .long \orig - .
9166 .long \alt - .
fe2de317
MT
9167diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
9168index 37ad100..7d47faa 100644
9169--- a/arch/x86/include/asm/alternative.h
9170+++ b/arch/x86/include/asm/alternative.h
9171@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
6892158b 9172 ".section .discard,\"aw\",@progbits\n" \
ae4e228f 9173 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
58c5fc13
MT
9174 ".previous\n" \
9175- ".section .altinstr_replacement, \"ax\"\n" \
9176+ ".section .altinstr_replacement, \"a\"\n" \
9177 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
9178 ".previous"
9179
fe2de317 9180diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
5e856224 9181index 3ab9bdd..238033e 100644
fe2de317
MT
9182--- a/arch/x86/include/asm/apic.h
9183+++ b/arch/x86/include/asm/apic.h
9184@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15a11c5b
MT
9185
9186 #ifdef CONFIG_X86_LOCAL_APIC
9187
9188-extern unsigned int apic_verbosity;
9189+extern int apic_verbosity;
9190 extern int local_apic_timer_c2_ok;
9191
9192 extern int disable_apic;
fe2de317
MT
9193diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
9194index 20370c6..a2eb9b0 100644
9195--- a/arch/x86/include/asm/apm.h
9196+++ b/arch/x86/include/asm/apm.h
9197@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
58c5fc13
MT
9198 __asm__ __volatile__(APM_DO_ZERO_SEGS
9199 "pushl %%edi\n\t"
9200 "pushl %%ebp\n\t"
9201- "lcall *%%cs:apm_bios_entry\n\t"
9202+ "lcall *%%ss:apm_bios_entry\n\t"
9203 "setc %%al\n\t"
9204 "popl %%ebp\n\t"
9205 "popl %%edi\n\t"
fe2de317 9206@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
58c5fc13
MT
9207 __asm__ __volatile__(APM_DO_ZERO_SEGS
9208 "pushl %%edi\n\t"
9209 "pushl %%ebp\n\t"
9210- "lcall *%%cs:apm_bios_entry\n\t"
9211+ "lcall *%%ss:apm_bios_entry\n\t"
9212 "setc %%bl\n\t"
9213 "popl %%ebp\n\t"
9214 "popl %%edi\n\t"
fe2de317 9215diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
4c928ab7 9216index 58cb6d4..ca9010d 100644
fe2de317
MT
9217--- a/arch/x86/include/asm/atomic.h
9218+++ b/arch/x86/include/asm/atomic.h
9219@@ -22,7 +22,18 @@
bc901d79 9220 */
fe2de317 9221 static inline int atomic_read(const atomic_t *v)
bc901d79 9222 {
fe2de317
MT
9223- return (*(volatile int *)&(v)->counter);
9224+ return (*(volatile const int *)&(v)->counter);
bc901d79
MT
9225+}
9226+
9227+/**
fe2de317
MT
9228+ * atomic_read_unchecked - read atomic variable
9229+ * @v: pointer of type atomic_unchecked_t
ae4e228f
MT
9230+ *
9231+ * Atomically reads the value of @v.
ae4e228f 9232+ */
fe2de317 9233+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
ae4e228f 9234+{
fe2de317 9235+ return (*(volatile const int *)&(v)->counter);
bc901d79
MT
9236 }
9237
9238 /**
fe2de317 9239@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
ae4e228f
MT
9240 }
9241
9242 /**
fe2de317
MT
9243+ * atomic_set_unchecked - set atomic variable
9244+ * @v: pointer of type atomic_unchecked_t
ae4e228f
MT
9245+ * @i: required value
9246+ *
9247+ * Atomically sets the value of @v to @i.
9248+ */
fe2de317 9249+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
ae4e228f
MT
9250+{
9251+ v->counter = i;
9252+}
9253+
9254+/**
fe2de317 9255 * atomic_add - add integer to atomic variable
ae4e228f 9256 * @i: integer value to add
fe2de317
MT
9257 * @v: pointer of type atomic_t
9258@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
58c5fc13 9259 */
fe2de317 9260 static inline void atomic_add(int i, atomic_t *v)
58c5fc13 9261 {
fe2de317
MT
9262- asm volatile(LOCK_PREFIX "addl %1,%0"
9263+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
58c5fc13
MT
9264+
9265+#ifdef CONFIG_PAX_REFCOUNT
9266+ "jno 0f\n"
fe2de317 9267+ LOCK_PREFIX "subl %1,%0\n"
58c5fc13
MT
9268+ "int $4\n0:\n"
9269+ _ASM_EXTABLE(0b, 0b)
9270+#endif
9271+
fe2de317
MT
9272+ : "+m" (v->counter)
9273+ : "ir" (i));
ae4e228f
MT
9274+}
9275+
9276+/**
fe2de317 9277+ * atomic_add_unchecked - add integer to atomic variable
ae4e228f 9278+ * @i: integer value to add
fe2de317 9279+ * @v: pointer of type atomic_unchecked_t
ae4e228f
MT
9280+ *
9281+ * Atomically adds @i to @v.
9282+ */
fe2de317 9283+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
ae4e228f 9284+{
fe2de317
MT
9285+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9286 : "+m" (v->counter)
9287 : "ir" (i));
9288 }
9289@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
58c5fc13 9290 */
fe2de317 9291 static inline void atomic_sub(int i, atomic_t *v)
58c5fc13 9292 {
fe2de317
MT
9293- asm volatile(LOCK_PREFIX "subl %1,%0"
9294+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
58c5fc13
MT
9295+
9296+#ifdef CONFIG_PAX_REFCOUNT
9297+ "jno 0f\n"
fe2de317 9298+ LOCK_PREFIX "addl %1,%0\n"
58c5fc13
MT
9299+ "int $4\n0:\n"
9300+ _ASM_EXTABLE(0b, 0b)
9301+#endif
9302+
fe2de317
MT
9303+ : "+m" (v->counter)
9304+ : "ir" (i));
6892158b
MT
9305+}
9306+
9307+/**
fe2de317 9308+ * atomic_sub_unchecked - subtract integer from atomic variable
6892158b 9309+ * @i: integer value to subtract
fe2de317 9310+ * @v: pointer of type atomic_unchecked_t
6892158b
MT
9311+ *
9312+ * Atomically subtracts @i from @v.
9313+ */
fe2de317 9314+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6892158b 9315+{
fe2de317
MT
9316+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9317 : "+m" (v->counter)
9318 : "ir" (i));
58c5fc13 9319 }
fe2de317 9320@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
58c5fc13
MT
9321 {
9322 unsigned char c;
9323
fe2de317
MT
9324- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9325+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
58c5fc13
MT
9326+
9327+#ifdef CONFIG_PAX_REFCOUNT
9328+ "jno 0f\n"
fe2de317 9329+ LOCK_PREFIX "addl %2,%0\n"
58c5fc13
MT
9330+ "int $4\n0:\n"
9331+ _ASM_EXTABLE(0b, 0b)
9332+#endif
9333+
9334+ "sete %1\n"
fe2de317
MT
9335 : "+m" (v->counter), "=qm" (c)
9336 : "ir" (i) : "memory");
58c5fc13 9337 return c;
fe2de317 9338@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
58c5fc13 9339 */
fe2de317 9340 static inline void atomic_inc(atomic_t *v)
58c5fc13 9341 {
fe2de317
MT
9342- asm volatile(LOCK_PREFIX "incl %0"
9343+ asm volatile(LOCK_PREFIX "incl %0\n"
58c5fc13
MT
9344+
9345+#ifdef CONFIG_PAX_REFCOUNT
9346+ "jno 0f\n"
fe2de317 9347+ LOCK_PREFIX "decl %0\n"
6892158b
MT
9348+ "int $4\n0:\n"
9349+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
9350+#endif
9351+
fe2de317 9352+ : "+m" (v->counter));
ae4e228f
MT
9353+}
9354+
9355+/**
fe2de317
MT
9356+ * atomic_inc_unchecked - increment atomic variable
9357+ * @v: pointer of type atomic_unchecked_t
ae4e228f
MT
9358+ *
9359+ * Atomically increments @v by 1.
9360+ */
fe2de317 9361+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
ae4e228f 9362+{
fe2de317
MT
9363+ asm volatile(LOCK_PREFIX "incl %0\n"
9364 : "+m" (v->counter));
9365 }
9366
9367@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
58c5fc13 9368 */
fe2de317 9369 static inline void atomic_dec(atomic_t *v)
58c5fc13 9370 {
fe2de317
MT
9371- asm volatile(LOCK_PREFIX "decl %0"
9372+ asm volatile(LOCK_PREFIX "decl %0\n"
58c5fc13
MT
9373+
9374+#ifdef CONFIG_PAX_REFCOUNT
9375+ "jno 0f\n"
fe2de317 9376+ LOCK_PREFIX "incl %0\n"
6892158b
MT
9377+ "int $4\n0:\n"
9378+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
9379+#endif
9380+
fe2de317 9381+ : "+m" (v->counter));
df50ba0c
MT
9382+}
9383+
9384+/**
fe2de317
MT
9385+ * atomic_dec_unchecked - decrement atomic variable
9386+ * @v: pointer of type atomic_unchecked_t
df50ba0c
MT
9387+ *
9388+ * Atomically decrements @v by 1.
9389+ */
fe2de317 9390+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
df50ba0c 9391+{
fe2de317
MT
9392+ asm volatile(LOCK_PREFIX "decl %0\n"
9393 : "+m" (v->counter));
58c5fc13 9394 }
fe2de317
MT
9395
9396@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
58c5fc13
MT
9397 {
9398 unsigned char c;
9399
fe2de317
MT
9400- asm volatile(LOCK_PREFIX "decl %0; sete %1"
9401+ asm volatile(LOCK_PREFIX "decl %0\n"
58c5fc13
MT
9402+
9403+#ifdef CONFIG_PAX_REFCOUNT
9404+ "jno 0f\n"
fe2de317 9405+ LOCK_PREFIX "incl %0\n"
6892158b
MT
9406+ "int $4\n0:\n"
9407+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
9408+#endif
9409+
9410+ "sete %1\n"
fe2de317
MT
9411 : "+m" (v->counter), "=qm" (c)
9412 : : "memory");
58c5fc13 9413 return c != 0;
fe2de317 9414@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
58c5fc13
MT
9415 {
9416 unsigned char c;
9417
fe2de317
MT
9418- asm volatile(LOCK_PREFIX "incl %0; sete %1"
9419+ asm volatile(LOCK_PREFIX "incl %0\n"
58c5fc13
MT
9420+
9421+#ifdef CONFIG_PAX_REFCOUNT
9422+ "jno 0f\n"
fe2de317 9423+ LOCK_PREFIX "decl %0\n"
6892158b
MT
9424+ "int $4\n0:\n"
9425+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
9426+#endif
9427+
9428+ "sete %1\n"
fe2de317
MT
9429+ : "+m" (v->counter), "=qm" (c)
9430+ : : "memory");
9431+ return c != 0;
9432+}
9433+
9434+/**
9435+ * atomic_inc_and_test_unchecked - increment and test
9436+ * @v: pointer of type atomic_unchecked_t
9437+ *
9438+ * Atomically increments @v by 1
9439+ * and returns true if the result is zero, or false for all
9440+ * other cases.
9441+ */
9442+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9443+{
9444+ unsigned char c;
9445+
9446+ asm volatile(LOCK_PREFIX "incl %0\n"
9447+ "sete %1\n"
9448 : "+m" (v->counter), "=qm" (c)
9449 : : "memory");
58c5fc13 9450 return c != 0;
fe2de317 9451@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
58c5fc13
MT
9452 {
9453 unsigned char c;
9454
fe2de317
MT
9455- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9456+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
58c5fc13
MT
9457+
9458+#ifdef CONFIG_PAX_REFCOUNT
9459+ "jno 0f\n"
fe2de317 9460+ LOCK_PREFIX "subl %2,%0\n"
58c5fc13
MT
9461+ "int $4\n0:\n"
9462+ _ASM_EXTABLE(0b, 0b)
9463+#endif
9464+
9465+ "sets %1\n"
fe2de317
MT
9466 : "+m" (v->counter), "=qm" (c)
9467 : "ir" (i) : "memory");
58c5fc13 9468 return c;
4c928ab7
MT
9469@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
9470 goto no_xadd;
fe2de317
MT
9471 #endif
9472 /* Modern 486+ processor */
4c928ab7
MT
9473- return i + xadd(&v->counter, i);
9474+ return i + xadd_check_overflow(&v->counter, i);
9475
9476 #ifdef CONFIG_M386
9477 no_xadd: /* Legacy 386 processor */
9478@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
9479 }
9480
9481 /**
fe2de317 9482+ * atomic_add_return_unchecked - add integer and return
ae4e228f 9483+ * @i: integer value to add
4c928ab7 9484+ * @v: pointer of type atomic_unchecked_t
ae4e228f
MT
9485+ *
9486+ * Atomically adds @i to @v and returns @i + @v
9487+ */
fe2de317 9488+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
ae4e228f 9489+{
fe2de317 9490+#ifdef CONFIG_M386
4c928ab7 9491+ int __i;
fe2de317
MT
9492+ unsigned long flags;
9493+ if (unlikely(boot_cpu_data.x86 <= 3))
9494+ goto no_xadd;
9495+#endif
9496+ /* Modern 486+ processor */
4c928ab7
MT
9497+ return i + xadd(&v->counter, i);
9498+
9499+#ifdef CONFIG_M386
9500+no_xadd: /* Legacy 386 processor */
9501+ raw_local_irq_save(flags);
9502+ __i = atomic_read_unchecked(v);
9503+ atomic_set_unchecked(v, i + __i);
9504+ raw_local_irq_restore(flags);
9505+ return i + __i;
9506+#endif
9507+}
9508+
9509+/**
9510 * atomic_sub_return - subtract integer and return
9511 * @v: pointer of type atomic_t
9512 * @i: integer value to subtract
9513@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
ae4e228f
MT
9514 }
9515
fe2de317
MT
9516 #define atomic_inc_return(v) (atomic_add_return(1, v))
9517+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
57199397 9518+{
fe2de317 9519+ return atomic_add_return_unchecked(1, v);
57199397 9520+}
fe2de317 9521 #define atomic_dec_return(v) (atomic_sub_return(1, v))
ae4e228f 9522
fe2de317 9523 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
4c928ab7 9524@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
66a7e928
MT
9525 return cmpxchg(&v->counter, old, new);
9526 }
9527
fe2de317 9528+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
66a7e928
MT
9529+{
9530+ return cmpxchg(&v->counter, old, new);
9531+}
9532+
fe2de317 9533 static inline int atomic_xchg(atomic_t *v, int new)
66a7e928
MT
9534 {
9535 return xchg(&v->counter, new);
fe2de317
MT
9536 }
9537
9538+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9539+{
9540+ return xchg(&v->counter, new);
9541+}
9542+
9543 /**
9544 * __atomic_add_unless - add unless the number is already a given value
9545 * @v: pointer of type atomic_t
4c928ab7 9546@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
58c5fc13 9547 */
fe2de317 9548 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
58c5fc13 9549 {
fe2de317
MT
9550- int c, old;
9551+ int c, old, new;
9552 c = atomic_read(v);
58c5fc13
MT
9553 for (;;) {
9554- if (unlikely(c == (u)))
9555+ if (unlikely(c == u))
9556 break;
fe2de317 9557- old = atomic_cmpxchg((v), c, c + (a));
58c5fc13 9558+
fe2de317 9559+ asm volatile("addl %2,%0\n"
58c5fc13
MT
9560+
9561+#ifdef CONFIG_PAX_REFCOUNT
9562+ "jno 0f\n"
fe2de317 9563+ "subl %2,%0\n"
58c5fc13
MT
9564+ "int $4\n0:\n"
9565+ _ASM_EXTABLE(0b, 0b)
9566+#endif
9567+
9568+ : "=r" (new)
9569+ : "0" (c), "ir" (a));
9570+
fe2de317 9571+ old = atomic_cmpxchg(v, c, new);
58c5fc13
MT
9572 if (likely(old == c))
9573 break;
9574 c = old;
4c928ab7 9575@@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
fe2de317 9576 return c;
58c5fc13
MT
9577 }
9578
fe2de317
MT
9579+/**
9580+ * atomic_inc_not_zero_hint - increment if not null
9581+ * @v: pointer of type atomic_t
9582+ * @hint: probable value of the atomic before the increment
9583+ *
9584+ * This version of atomic_inc_not_zero() gives a hint of probable
9585+ * value of the atomic. This helps processor to not read the memory
9586+ * before doing the atomic read/modify/write cycle, lowering
9587+ * number of bus transactions on some arches.
9588+ *
9589+ * Returns: 0 if increment was not done, 1 otherwise.
9590+ */
9591+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9592+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9593+{
9594+ int val, c = hint, new;
9595+
9596+ /* sanity test, should be removed by compiler if hint is a constant */
9597+ if (!hint)
9598+ return __atomic_add_unless(v, 1, 0);
9599+
9600+ do {
9601+ asm volatile("incl %0\n"
9602+
9603+#ifdef CONFIG_PAX_REFCOUNT
9604+ "jno 0f\n"
9605+ "decl %0\n"
9606+ "int $4\n0:\n"
9607+ _ASM_EXTABLE(0b, 0b)
9608+#endif
9609+
9610+ : "=r" (new)
9611+ : "0" (c));
9612+
9613+ val = atomic_cmpxchg(v, c, new);
9614+ if (val == c)
9615+ return 1;
9616+ c = val;
9617+ } while (c);
9618+
9619+ return 0;
9620+}
9621
9622 /*
9623 * atomic_dec_if_positive - decrement by 1 if old value positive
9624diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
5e856224 9625index fa13f0e..27c2e08 100644
fe2de317
MT
9626--- a/arch/x86/include/asm/atomic64_32.h
9627+++ b/arch/x86/include/asm/atomic64_32.h
9628@@ -12,6 +12,14 @@ typedef struct {
9629 u64 __aligned(8) counter;
9630 } atomic64_t;
9631
9632+#ifdef CONFIG_PAX_REFCOUNT
9633+typedef struct {
9634+ u64 __aligned(8) counter;
9635+} atomic64_unchecked_t;
9636+#else
9637+typedef atomic64_t atomic64_unchecked_t;
9638+#endif
9639+
9640 #define ATOMIC64_INIT(val) { (val) }
9641
9642 #ifdef CONFIG_X86_CMPXCHG64
9643@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9644 }
9645
9646 /**
9647+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9648+ * @p: pointer to type atomic64_unchecked_t
9649+ * @o: expected value
9650+ * @n: new value
9651+ *
9652+ * Atomically sets @v to @n if it was equal to @o and returns
9653+ * the old value.
9654+ */
9655+
9656+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9657+{
9658+ return cmpxchg64(&v->counter, o, n);
9659+}
9660+
9661+/**
9662 * atomic64_xchg - xchg atomic64 variable
9663 * @v: pointer to type atomic64_t
9664 * @n: value to assign
9665@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9666 }
9667
9668 /**
9669+ * atomic64_set_unchecked - set atomic64 variable
9670+ * @v: pointer to type atomic64_unchecked_t
9671+ * @n: value to assign
9672+ *
9673+ * Atomically sets the value of @v to @n.
9674+ */
9675+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9676+{
9677+ unsigned high = (unsigned)(i >> 32);
9678+ unsigned low = (unsigned)i;
9679+ asm volatile(ATOMIC64_ALTERNATIVE(set)
9680+ : "+b" (low), "+c" (high)
9681+ : "S" (v)
9682+ : "eax", "edx", "memory"
9683+ );
9684+}
9685+
9686+/**
9687 * atomic64_read - read atomic64 variable
9688 * @v: pointer to type atomic64_t
9689 *
5e856224 9690@@ -93,6 +134,22 @@ static inline long long atomic64_read(const atomic64_t *v)
fe2de317
MT
9691 }
9692
9693 /**
9694+ * atomic64_read_unchecked - read atomic64 variable
9695+ * @v: pointer to type atomic64_unchecked_t
9696+ *
9697+ * Atomically reads the value of @v and returns it.
9698+ */
9699+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9700+{
9701+ long long r;
9702+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
9703+ : "=A" (r), "+c" (v)
9704+ : : "memory"
9705+ );
9706+ return r;
9707+ }
9708+
9709+/**
9710 * atomic64_add_return - add and return
9711 * @i: integer value to add
9712 * @v: pointer to type atomic64_t
9713@@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9714 return i;
9715 }
9716
9717+/**
9718+ * atomic64_add_return_unchecked - add and return
9719+ * @i: integer value to add
9720+ * @v: pointer to type atomic64_unchecked_t
9721+ *
9722+ * Atomically adds @i to @v and returns @i + *@v
9723+ */
9724+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9725+{
9726+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
9727+ : "+A" (i), "+c" (v)
9728+ : : "memory"
9729+ );
9730+ return i;
9731+}
9732+
9733 /*
9734 * Other variants with different arithmetic operators:
bc901d79 9735 */
fe2de317
MT
9736@@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9737 return a;
9738 }
9739
9740+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9741+{
9742+ long long a;
9743+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
9744+ : "=A" (a)
9745+ : "S" (v)
9746+ : "memory", "ecx"
9747+ );
9748+ return a;
9749+}
9750+
9751 static inline long long atomic64_dec_return(atomic64_t *v)
bc901d79 9752 {
fe2de317
MT
9753 long long a;
9754@@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9755 }
9756
9757 /**
9758+ * atomic64_add_unchecked - add integer to atomic64 variable
9759+ * @i: integer value to add
9760+ * @v: pointer to type atomic64_unchecked_t
9761+ *
9762+ * Atomically adds @i to @v.
9763+ */
9764+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9765+{
9766+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
9767+ : "+A" (i), "+c" (v)
9768+ : : "memory"
9769+ );
9770+ return i;
bc901d79
MT
9771+}
9772+
9773+/**
fe2de317
MT
9774 * atomic64_sub - subtract the atomic64 variable
9775 * @i: integer value to subtract
9776 * @v: pointer to type atomic64_t
9777diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
4c928ab7 9778index 0e1cbfc..5623683 100644
fe2de317
MT
9779--- a/arch/x86/include/asm/atomic64_64.h
9780+++ b/arch/x86/include/asm/atomic64_64.h
9781@@ -18,7 +18,19 @@
9782 */
9783 static inline long atomic64_read(const atomic64_t *v)
9784 {
9785- return (*(volatile long *)&(v)->counter);
9786+ return (*(volatile const long *)&(v)->counter);
9787+}
9788+
9789+/**
9790+ * atomic64_read_unchecked - read atomic64 variable
9791+ * @v: pointer of type atomic64_unchecked_t
57199397
MT
9792+ *
9793+ * Atomically reads the value of @v.
fe2de317 9794+ * Doesn't imply a read memory barrier.
57199397 9795+ */
fe2de317 9796+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
57199397 9797+{
fe2de317 9798+ return (*(volatile const long *)&(v)->counter);
bc901d79
MT
9799 }
9800
9801 /**
fe2de317 9802@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
57199397
MT
9803 }
9804
9805 /**
fe2de317
MT
9806+ * atomic64_set_unchecked - set atomic64 variable
9807+ * @v: pointer to type atomic64_unchecked_t
57199397
MT
9808+ * @i: required value
9809+ *
9810+ * Atomically sets the value of @v to @i.
9811+ */
fe2de317 9812+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
57199397
MT
9813+{
9814+ v->counter = i;
9815+}
9816+
9817+/**
fe2de317 9818 * atomic64_add - add integer to atomic64 variable
57199397 9819 * @i: integer value to add
fe2de317
MT
9820 * @v: pointer to type atomic64_t
9821@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
57199397 9822 */
fe2de317 9823 static inline void atomic64_add(long i, atomic64_t *v)
57199397 9824 {
fe2de317 9825+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
57199397
MT
9826+
9827+#ifdef CONFIG_PAX_REFCOUNT
9828+ "jno 0f\n"
fe2de317 9829+ LOCK_PREFIX "subq %1,%0\n"
bc901d79 9830+ "int $4\n0:\n"
57199397
MT
9831+ _ASM_EXTABLE(0b, 0b)
9832+#endif
9833+
fe2de317
MT
9834+ : "=m" (v->counter)
9835+ : "er" (i), "m" (v->counter));
57199397
MT
9836+}
9837+
9838+/**
fe2de317 9839+ * atomic64_add_unchecked - add integer to atomic64 variable
57199397 9840+ * @i: integer value to add
fe2de317 9841+ * @v: pointer to type atomic64_unchecked_t
57199397
MT
9842+ *
9843+ * Atomically adds @i to @v.
9844+ */
fe2de317 9845+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
57199397 9846+{
fe2de317
MT
9847 asm volatile(LOCK_PREFIX "addq %1,%0"
9848 : "=m" (v->counter)
9849 : "er" (i), "m" (v->counter));
9850@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
57199397 9851 */
fe2de317 9852 static inline void atomic64_sub(long i, atomic64_t *v)
57199397 9853 {
fe2de317
MT
9854- asm volatile(LOCK_PREFIX "subq %1,%0"
9855+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
57199397
MT
9856+
9857+#ifdef CONFIG_PAX_REFCOUNT
9858+ "jno 0f\n"
fe2de317 9859+ LOCK_PREFIX "addq %1,%0\n"
bc901d79 9860+ "int $4\n0:\n"
57199397
MT
9861+ _ASM_EXTABLE(0b, 0b)
9862+#endif
9863+
fe2de317
MT
9864+ : "=m" (v->counter)
9865+ : "er" (i), "m" (v->counter));
57199397
MT
9866+}
9867+
9868+/**
fe2de317 9869+ * atomic64_sub_unchecked - subtract the atomic64 variable
57199397 9870+ * @i: integer value to subtract
fe2de317 9871+ * @v: pointer to type atomic64_unchecked_t
57199397
MT
9872+ *
9873+ * Atomically subtracts @i from @v.
9874+ */
fe2de317 9875+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
57199397 9876+{
fe2de317
MT
9877+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9878 : "=m" (v->counter)
9879 : "er" (i), "m" (v->counter));
57199397 9880 }
fe2de317 9881@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
57199397
MT
9882 {
9883 unsigned char c;
9884
fe2de317
MT
9885- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9886+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
57199397
MT
9887+
9888+#ifdef CONFIG_PAX_REFCOUNT
9889+ "jno 0f\n"
fe2de317 9890+ LOCK_PREFIX "addq %2,%0\n"
bc901d79 9891+ "int $4\n0:\n"
57199397
MT
9892+ _ASM_EXTABLE(0b, 0b)
9893+#endif
9894+
9895+ "sete %1\n"
fe2de317
MT
9896 : "=m" (v->counter), "=qm" (c)
9897 : "er" (i), "m" (v->counter) : "memory");
57199397 9898 return c;
fe2de317 9899@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
57199397 9900 */
fe2de317 9901 static inline void atomic64_inc(atomic64_t *v)
57199397 9902 {
fe2de317 9903+ asm volatile(LOCK_PREFIX "incq %0\n"
57199397
MT
9904+
9905+#ifdef CONFIG_PAX_REFCOUNT
9906+ "jno 0f\n"
fe2de317 9907+ LOCK_PREFIX "decq %0\n"
bc901d79 9908+ "int $4\n0:\n"
57199397
MT
9909+ _ASM_EXTABLE(0b, 0b)
9910+#endif
9911+
fe2de317
MT
9912+ : "=m" (v->counter)
9913+ : "m" (v->counter));
57199397
MT
9914+}
9915+
9916+/**
fe2de317
MT
9917+ * atomic64_inc_unchecked - increment atomic64 variable
9918+ * @v: pointer to type atomic64_unchecked_t
57199397
MT
9919+ *
9920+ * Atomically increments @v by 1.
9921+ */
fe2de317 9922+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
57199397 9923+{
fe2de317
MT
9924 asm volatile(LOCK_PREFIX "incq %0"
9925 : "=m" (v->counter)
9926 : "m" (v->counter));
9927@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
57199397 9928 */
fe2de317 9929 static inline void atomic64_dec(atomic64_t *v)
57199397 9930 {
fe2de317
MT
9931- asm volatile(LOCK_PREFIX "decq %0"
9932+ asm volatile(LOCK_PREFIX "decq %0\n"
57199397
MT
9933+
9934+#ifdef CONFIG_PAX_REFCOUNT
9935+ "jno 0f\n"
fe2de317 9936+ LOCK_PREFIX "incq %0\n"
bc901d79 9937+ "int $4\n0:\n"
57199397
MT
9938+ _ASM_EXTABLE(0b, 0b)
9939+#endif
9940+
fe2de317
MT
9941+ : "=m" (v->counter)
9942+ : "m" (v->counter));
57199397
MT
9943+}
9944+
9945+/**
fe2de317
MT
9946+ * atomic64_dec_unchecked - decrement atomic64 variable
9947+ * @v: pointer to type atomic64_t
57199397
MT
9948+ *
9949+ * Atomically decrements @v by 1.
9950+ */
fe2de317 9951+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
57199397 9952+{
fe2de317
MT
9953+ asm volatile(LOCK_PREFIX "decq %0\n"
9954 : "=m" (v->counter)
9955 : "m" (v->counter));
57199397 9956 }
fe2de317 9957@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
57199397
MT
9958 {
9959 unsigned char c;
9960
fe2de317
MT
9961- asm volatile(LOCK_PREFIX "decq %0; sete %1"
9962+ asm volatile(LOCK_PREFIX "decq %0\n"
57199397
MT
9963+
9964+#ifdef CONFIG_PAX_REFCOUNT
9965+ "jno 0f\n"
fe2de317 9966+ LOCK_PREFIX "incq %0\n"
bc901d79 9967+ "int $4\n0:\n"
57199397
MT
9968+ _ASM_EXTABLE(0b, 0b)
9969+#endif
9970+
9971+ "sete %1\n"
fe2de317
MT
9972 : "=m" (v->counter), "=qm" (c)
9973 : "m" (v->counter) : "memory");
57199397 9974 return c != 0;
fe2de317 9975@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
57199397
MT
9976 {
9977 unsigned char c;
9978
fe2de317
MT
9979- asm volatile(LOCK_PREFIX "incq %0; sete %1"
9980+ asm volatile(LOCK_PREFIX "incq %0\n"
57199397
MT
9981+
9982+#ifdef CONFIG_PAX_REFCOUNT
9983+ "jno 0f\n"
fe2de317 9984+ LOCK_PREFIX "decq %0\n"
bc901d79 9985+ "int $4\n0:\n"
57199397
MT
9986+ _ASM_EXTABLE(0b, 0b)
9987+#endif
9988+
8308f9c9 9989+ "sete %1\n"
fe2de317
MT
9990 : "=m" (v->counter), "=qm" (c)
9991 : "m" (v->counter) : "memory");
57199397 9992 return c != 0;
fe2de317 9993@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
57199397
MT
9994 {
9995 unsigned char c;
9996
fe2de317
MT
9997- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9998+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
57199397
MT
9999+
10000+#ifdef CONFIG_PAX_REFCOUNT
10001+ "jno 0f\n"
fe2de317 10002+ LOCK_PREFIX "subq %2,%0\n"
bc901d79 10003+ "int $4\n0:\n"
57199397
MT
10004+ _ASM_EXTABLE(0b, 0b)
10005+#endif
10006+
10007+ "sets %1\n"
fe2de317
MT
10008 : "=m" (v->counter), "=qm" (c)
10009 : "er" (i), "m" (v->counter) : "memory");
57199397 10010 return c;
4c928ab7
MT
10011@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10012 */
fe2de317
MT
10013 static inline long atomic64_add_return(long i, atomic64_t *v)
10014 {
4c928ab7 10015+ return i + xadd_check_overflow(&v->counter, i);
57199397
MT
10016+}
10017+
10018+/**
fe2de317 10019+ * atomic64_add_return_unchecked - add and return
57199397 10020+ * @i: integer value to add
fe2de317 10021+ * @v: pointer to type atomic64_unchecked_t
57199397
MT
10022+ *
10023+ * Atomically adds @i to @v and returns @i + @v
10024+ */
fe2de317 10025+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
57199397 10026+{
4c928ab7
MT
10027 return i + xadd(&v->counter, i);
10028 }
10029
10030@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
57199397
MT
10031 }
10032
fe2de317
MT
10033 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
10034+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
57199397 10035+{
fe2de317 10036+ return atomic64_add_return_unchecked(1, v);
57199397 10037+}
fe2de317 10038 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
57199397 10039
fe2de317 10040 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
4c928ab7 10041@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8308f9c9
MT
10042 return cmpxchg(&v->counter, old, new);
10043 }
10044
fe2de317 10045+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8308f9c9
MT
10046+{
10047+ return cmpxchg(&v->counter, old, new);
10048+}
10049+
fe2de317 10050 static inline long atomic64_xchg(atomic64_t *v, long new)
8308f9c9
MT
10051 {
10052 return xchg(&v->counter, new);
4c928ab7 10053@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
57199397 10054 */
fe2de317 10055 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
57199397 10056 {
fe2de317
MT
10057- long c, old;
10058+ long c, old, new;
10059 c = atomic64_read(v);
57199397
MT
10060 for (;;) {
10061- if (unlikely(c == (u)))
10062+ if (unlikely(c == u))
10063 break;
fe2de317 10064- old = atomic64_cmpxchg((v), c, c + (a));
57199397 10065+
fe2de317 10066+ asm volatile("add %2,%0\n"
57199397
MT
10067+
10068+#ifdef CONFIG_PAX_REFCOUNT
10069+ "jno 0f\n"
fe2de317 10070+ "sub %2,%0\n"
bc901d79 10071+ "int $4\n0:\n"
57199397
MT
10072+ _ASM_EXTABLE(0b, 0b)
10073+#endif
10074+
10075+ : "=r" (new)
10076+ : "0" (c), "ir" (a));
10077+
fe2de317 10078+ old = atomic64_cmpxchg(v, c, new);
57199397
MT
10079 if (likely(old == c))
10080 break;
10081 c = old;
fe2de317
MT
10082 }
10083- return c != (u);
10084+ return c != u;
57199397
MT
10085 }
10086
fe2de317
MT
10087 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10088diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
5e856224 10089index b97596e..9bd48b06 100644
fe2de317
MT
10090--- a/arch/x86/include/asm/bitops.h
10091+++ b/arch/x86/include/asm/bitops.h
bc901d79
MT
10092@@ -38,7 +38,7 @@
10093 * a mask operation on a byte.
10094 */
10095 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
10096-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
10097+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
10098 #define CONST_MASK(nr) (1 << ((nr) & 7))
10099
10100 /**
fe2de317
MT
10101diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
10102index 5e1a2ee..c9f9533 100644
10103--- a/arch/x86/include/asm/boot.h
10104+++ b/arch/x86/include/asm/boot.h
efbe55a5
MT
10105@@ -11,10 +11,15 @@
10106 #include <asm/pgtable_types.h>
df50ba0c 10107
efbe55a5
MT
10108 /* Physical address where kernel should be loaded. */
10109-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10110+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10111 + (CONFIG_PHYSICAL_ALIGN - 1)) \
10112 & ~(CONFIG_PHYSICAL_ALIGN - 1))
df50ba0c 10113
efbe55a5
MT
10114+#ifndef __ASSEMBLY__
10115+extern unsigned char __LOAD_PHYSICAL_ADDR[];
10116+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
df50ba0c
MT
10117+#endif
10118+
efbe55a5
MT
10119 /* Minimum kernel alignment, as a power of two */
10120 #ifdef CONFIG_X86_64
10121 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
fe2de317
MT
10122diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
10123index 48f99f1..d78ebf9 100644
10124--- a/arch/x86/include/asm/cache.h
10125+++ b/arch/x86/include/asm/cache.h
8308f9c9
MT
10126@@ -5,12 +5,13 @@
10127
10128 /* L1 cache line size */
10129 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10130-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
15a11c5b 10131+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
efbe55a5 10132
57199397
MT
10133 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10134+#define __read_only __attribute__((__section__(".data..read_only")))
efbe55a5
MT
10135
10136 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
8308f9c9 10137-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
15a11c5b 10138+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
8308f9c9
MT
10139
10140 #ifdef CONFIG_X86_VSMP
10141 #ifdef CONFIG_SMP
fe2de317
MT
10142diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
10143index 4e12668..501d239 100644
10144--- a/arch/x86/include/asm/cacheflush.h
10145+++ b/arch/x86/include/asm/cacheflush.h
10146@@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
10147 unsigned long pg_flags = pg->flags & _PGMT_MASK;
10148
10149 if (pg_flags == _PGMT_DEFAULT)
10150- return -1;
10151+ return ~0UL;
10152 else if (pg_flags == _PGMT_WC)
10153 return _PAGE_CACHE_WC;
10154 else if (pg_flags == _PGMT_UC_MINUS)
10155diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10156index 46fc474..b02b0f9 100644
10157--- a/arch/x86/include/asm/checksum_32.h
10158+++ b/arch/x86/include/asm/checksum_32.h
10159@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
efbe55a5
MT
10160 int len, __wsum sum,
10161 int *src_err_ptr, int *dst_err_ptr);
df50ba0c 10162
efbe55a5
MT
10163+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10164+ int len, __wsum sum,
10165+ int *src_err_ptr, int *dst_err_ptr);
df50ba0c 10166+
efbe55a5
MT
10167+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10168+ int len, __wsum sum,
10169+ int *src_err_ptr, int *dst_err_ptr);
df50ba0c 10170+
efbe55a5
MT
10171 /*
10172 * Note: when you get a NULL pointer exception here this means someone
10173 * passed in an incorrect kernel address to one of these functions.
fe2de317 10174@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
efbe55a5 10175 int *err_ptr)
df50ba0c 10176 {
efbe55a5
MT
10177 might_sleep();
10178- return csum_partial_copy_generic((__force void *)src, dst,
10179+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
10180 len, sum, err_ptr, NULL);
58c5fc13
MT
10181 }
10182
fe2de317 10183@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
58c5fc13
MT
10184 {
10185 might_sleep();
10186 if (access_ok(VERIFY_WRITE, dst, len))
10187- return csum_partial_copy_generic(src, (__force void *)dst,
10188+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10189 len, sum, NULL, err_ptr);
10190
10191 if (len)
4c928ab7 10192diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
5e856224 10193index 99480e5..d81165b 100644
4c928ab7
MT
10194--- a/arch/x86/include/asm/cmpxchg.h
10195+++ b/arch/x86/include/asm/cmpxchg.h
5e856224 10196@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
4c928ab7
MT
10197 __compiletime_error("Bad argument size for cmpxchg");
10198 extern void __xadd_wrong_size(void)
10199 __compiletime_error("Bad argument size for xadd");
10200+extern void __xadd_check_overflow_wrong_size(void)
10201+ __compiletime_error("Bad argument size for xadd_check_overflow");
5e856224
MT
10202 extern void __add_wrong_size(void)
10203 __compiletime_error("Bad argument size for add");
10204+extern void __add_check_overflow_wrong_size(void)
10205+ __compiletime_error("Bad argument size for add_check_overflow");
4c928ab7
MT
10206
10207 /*
10208 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
5e856224 10209@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
4c928ab7
MT
10210 __ret; \
10211 })
10212
5e856224 10213+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
4c928ab7 10214+ ({ \
5e856224 10215+ __typeof__ (*(ptr)) __ret = (arg); \
4c928ab7
MT
10216+ switch (sizeof(*(ptr))) { \
10217+ case __X86_CASE_L: \
5e856224 10218+ asm volatile (lock #op "l %0, %1\n" \
4c928ab7
MT
10219+ "jno 0f\n" \
10220+ "mov %0,%1\n" \
10221+ "int $4\n0:\n" \
10222+ _ASM_EXTABLE(0b, 0b) \
10223+ : "+r" (__ret), "+m" (*(ptr)) \
10224+ : : "memory", "cc"); \
10225+ break; \
10226+ case __X86_CASE_Q: \
5e856224 10227+ asm volatile (lock #op "q %q0, %1\n" \
4c928ab7
MT
10228+ "jno 0f\n" \
10229+ "mov %0,%1\n" \
10230+ "int $4\n0:\n" \
10231+ _ASM_EXTABLE(0b, 0b) \
10232+ : "+r" (__ret), "+m" (*(ptr)) \
10233+ : : "memory", "cc"); \
10234+ break; \
10235+ default: \
5e856224 10236+ __ ## op ## _check_overflow_wrong_size(); \
4c928ab7
MT
10237+ } \
10238+ __ret; \
10239+ })
10240+
10241 /*
5e856224
MT
10242 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
10243 * Since this is generally used to protect other memory information, we
10244@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
4c928ab7
MT
10245 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
10246 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
10247
5e856224
MT
10248+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
10249+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
4c928ab7 10250+
5e856224
MT
10251 #define __add(ptr, inc, lock) \
10252 ({ \
10253 __typeof__ (*(ptr)) __ret = (inc); \
fe2de317 10254diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
5e856224 10255index 8d67d42..183d0eb 100644
fe2de317
MT
10256--- a/arch/x86/include/asm/cpufeature.h
10257+++ b/arch/x86/include/asm/cpufeature.h
5e856224 10258@@ -367,7 +367,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
6892158b
MT
10259 ".section .discard,\"aw\",@progbits\n"
10260 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
57199397
MT
10261 ".previous\n"
10262- ".section .altinstr_replacement,\"ax\"\n"
10263+ ".section .altinstr_replacement,\"a\"\n"
10264 "3: movb $1,%0\n"
10265 "4:\n"
10266 ".previous\n"
fe2de317 10267diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
5e856224 10268index e95822d..a90010e 100644
fe2de317
MT
10269--- a/arch/x86/include/asm/desc.h
10270+++ b/arch/x86/include/asm/desc.h
ae4e228f
MT
10271@@ -4,6 +4,7 @@
10272 #include <asm/desc_defs.h>
10273 #include <asm/ldt.h>
10274 #include <asm/mmu.h>
10275+#include <asm/pgtable.h>
15a11c5b 10276
ae4e228f
MT
10277 #include <linux/smp.h>
10278
fe2de317 10279@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
15a11c5b
MT
10280
10281 desc->type = (info->read_exec_only ^ 1) << 1;
10282 desc->type |= info->contents << 2;
10283+ desc->type |= info->seg_not_present ^ 1;
10284
10285 desc->s = 1;
10286 desc->dpl = 0x3;
5e856224 10287@@ -34,19 +36,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
58c5fc13
MT
10288 }
10289
10290 extern struct desc_ptr idt_descr;
10291-extern gate_desc idt_table[];
5e856224
MT
10292 extern struct desc_ptr nmi_idt_descr;
10293-extern gate_desc nmi_idt_table[];
58c5fc13
MT
10294-
10295-struct gdt_page {
10296- struct desc_struct gdt[GDT_ENTRIES];
10297-} __attribute__((aligned(PAGE_SIZE)));
15a11c5b 10298-
58c5fc13
MT
10299-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
10300+extern gate_desc idt_table[256];
5e856224 10301+extern gate_desc nmi_idt_table[256];
58c5fc13
MT
10302
10303+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
10304 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10305 {
10306- return per_cpu(gdt_page, cpu).gdt;
10307+ return cpu_gdt_table[cpu];
10308 }
10309
10310 #ifdef CONFIG_X86_64
5e856224 10311@@ -71,8 +68,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
317566c1
MT
10312 unsigned long base, unsigned dpl, unsigned flags,
10313 unsigned short seg)
10314 {
10315- gate->a = (seg << 16) | (base & 0xffff);
15a11c5b
MT
10316- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10317+ gate->gate.offset_low = base;
10318+ gate->gate.seg = seg;
10319+ gate->gate.reserved = 0;
10320+ gate->gate.type = type;
10321+ gate->gate.s = 0;
10322+ gate->gate.dpl = dpl;
10323+ gate->gate.p = 1;
10324+ gate->gate.offset_high = base >> 16;
317566c1
MT
10325 }
10326
10327 #endif
5e856224 10328@@ -117,12 +120,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
15a11c5b
MT
10329
10330 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
58c5fc13 10331 {
ae4e228f 10332+ pax_open_kernel();
58c5fc13 10333 memcpy(&idt[entry], gate, sizeof(*gate));
ae4e228f 10334+ pax_close_kernel();
58c5fc13
MT
10335 }
10336
15a11c5b 10337 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
58c5fc13 10338 {
ae4e228f 10339+ pax_open_kernel();
58c5fc13 10340 memcpy(&ldt[entry], desc, 8);
ae4e228f 10341+ pax_close_kernel();
58c5fc13
MT
10342 }
10343
15a11c5b 10344 static inline void
5e856224 10345@@ -136,7 +143,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
15a11c5b 10346 default: size = sizeof(*gdt); break;
58c5fc13 10347 }
15a11c5b 10348
ae4e228f 10349+ pax_open_kernel();
58c5fc13 10350 memcpy(&gdt[entry], desc, size);
ae4e228f 10351+ pax_close_kernel();
58c5fc13
MT
10352 }
10353
10354 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
5e856224 10355@@ -209,7 +218,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
58c5fc13
MT
10356
10357 static inline void native_load_tr_desc(void)
10358 {
ae4e228f 10359+ pax_open_kernel();
58c5fc13 10360 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
ae4e228f 10361+ pax_close_kernel();
58c5fc13
MT
10362 }
10363
10364 static inline void native_load_gdt(const struct desc_ptr *dtr)
5e856224 10365@@ -246,8 +257,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
58c5fc13 10366 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
15a11c5b 10367 unsigned int i;
58c5fc13 10368
ae4e228f 10369+ pax_open_kernel();
58c5fc13
MT
10370 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10371 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
ae4e228f 10372+ pax_close_kernel();
58c5fc13
MT
10373 }
10374
10375 #define _LDT_empty(info) \
5e856224 10376@@ -310,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
df50ba0c
MT
10377 }
10378
5e856224
MT
10379 #ifdef CONFIG_X86_64
10380-static inline void set_nmi_gate(int gate, void *addr)
10381+static inline void set_nmi_gate(int gate, const void *addr)
10382 {
10383 gate_desc s;
10384
10385@@ -319,7 +332,7 @@ static inline void set_nmi_gate(int gate, void *addr)
10386 }
10387 #endif
10388
df50ba0c
MT
10389-static inline void _set_gate(int gate, unsigned type, void *addr,
10390+static inline void _set_gate(int gate, unsigned type, const void *addr,
10391 unsigned dpl, unsigned ist, unsigned seg)
10392 {
10393 gate_desc s;
5e856224 10394@@ -338,7 +351,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
df50ba0c
MT
10395 * Pentium F0 0F bugfix can have resulted in the mapped
10396 * IDT being write-protected.
10397 */
10398-static inline void set_intr_gate(unsigned int n, void *addr)
10399+static inline void set_intr_gate(unsigned int n, const void *addr)
10400 {
10401 BUG_ON((unsigned)n > 0xFF);
10402 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
5e856224 10403@@ -368,19 +381,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
df50ba0c
MT
10404 /*
10405 * This routine sets up an interrupt gate at directory privilege level 3.
10406 */
10407-static inline void set_system_intr_gate(unsigned int n, void *addr)
10408+static inline void set_system_intr_gate(unsigned int n, const void *addr)
10409 {
10410 BUG_ON((unsigned)n > 0xFF);
10411 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10412 }
10413
10414-static inline void set_system_trap_gate(unsigned int n, void *addr)
10415+static inline void set_system_trap_gate(unsigned int n, const void *addr)
10416 {
10417 BUG_ON((unsigned)n > 0xFF);
10418 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10419 }
10420
10421-static inline void set_trap_gate(unsigned int n, void *addr)
10422+static inline void set_trap_gate(unsigned int n, const void *addr)
10423 {
10424 BUG_ON((unsigned)n > 0xFF);
10425 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
5e856224 10426@@ -389,19 +402,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
df50ba0c
MT
10427 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10428 {
10429 BUG_ON((unsigned)n > 0xFF);
10430- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10431+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10432 }
10433
10434-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10435+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10436 {
10437 BUG_ON((unsigned)n > 0xFF);
10438 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10439 }
10440
10441-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10442+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10443 {
10444 BUG_ON((unsigned)n > 0xFF);
58c5fc13
MT
10445 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10446 }
10447
10448+#ifdef CONFIG_X86_32
10449+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10450+{
10451+ struct desc_struct d;
10452+
10453+ if (likely(limit))
10454+ limit = (limit - 1UL) >> PAGE_SHIFT;
10455+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
10456+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10457+}
10458+#endif
10459+
10460 #endif /* _ASM_X86_DESC_H */
fe2de317
MT
10461diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10462index 278441f..b95a174 100644
10463--- a/arch/x86/include/asm/desc_defs.h
10464+++ b/arch/x86/include/asm/desc_defs.h
10465@@ -31,6 +31,12 @@ struct desc_struct {
10466 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10467 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10468 };
10469+ struct {
10470+ u16 offset_low;
10471+ u16 seg;
10472+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10473+ unsigned offset_high: 16;
10474+ } gate;
10475 };
10476 } __attribute__((packed));
10477
10478diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
5e856224 10479index 3778256..c5d4fce 100644
fe2de317
MT
10480--- a/arch/x86/include/asm/e820.h
10481+++ b/arch/x86/include/asm/e820.h
57199397 10482@@ -69,7 +69,7 @@ struct e820map {
ae4e228f 10483 #define ISA_START_ADDRESS 0xa0000
58c5fc13 10484 #define ISA_END_ADDRESS 0x100000
58c5fc13
MT
10485
10486-#define BIOS_BEGIN 0x000a0000
10487+#define BIOS_BEGIN 0x000c0000
10488 #define BIOS_END 0x00100000
10489
bc901d79 10490 #define BIOS_ROM_BASE 0xffe00000
fe2de317 10491diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
4c928ab7 10492index 5f962df..7289f09 100644
fe2de317
MT
10493--- a/arch/x86/include/asm/elf.h
10494+++ b/arch/x86/include/asm/elf.h
4c928ab7 10495@@ -238,7 +238,25 @@ extern int force_personality32;
58c5fc13
MT
10496 the loader. We need to make sure that it is out of the way of the program
10497 that it will "exec", and that there is sufficient room for the brk. */
10498
10499+#ifdef CONFIG_PAX_SEGMEXEC
10500+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10501+#else
10502 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10503+#endif
10504+
10505+#ifdef CONFIG_PAX_ASLR
10506+#ifdef CONFIG_X86_32
10507+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10508+
10509+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10510+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10511+#else
10512+#define PAX_ELF_ET_DYN_BASE 0x400000UL
10513+
df50ba0c
MT
10514+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10515+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
58c5fc13
MT
10516+#endif
10517+#endif
10518
10519 /* This yields a mask that user programs can use to figure out what
10520 instruction set this CPU supports. This could be done in user space,
4c928ab7 10521@@ -291,9 +309,7 @@ do { \
15a11c5b 10522
58c5fc13
MT
10523 #define ARCH_DLINFO \
10524 do { \
15a11c5b 10525- if (vdso_enabled) \
58c5fc13
MT
10526- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10527- (unsigned long)current->mm->context.vdso); \
15a11c5b 10528+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
58c5fc13
MT
10529 } while (0)
10530
10531 #define AT_SYSINFO 32
4c928ab7 10532@@ -304,7 +320,7 @@ do { \
58c5fc13
MT
10533
10534 #endif /* !CONFIG_X86_32 */
10535
10536-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10537+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10538
10539 #define VDSO_ENTRY \
10540 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
4c928ab7 10541@@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
58c5fc13
MT
10542 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10543 #define compat_arch_setup_additional_pages syscall32_setup_pages
10544
10545-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10546-#define arch_randomize_brk arch_randomize_brk
10547-
4c928ab7
MT
10548 /*
10549 * True on X86_32 or when emulating IA32 on X86_64
10550 */
fe2de317
MT
10551diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10552index cc70c1c..d96d011 100644
10553--- a/arch/x86/include/asm/emergency-restart.h
10554+++ b/arch/x86/include/asm/emergency-restart.h
66a7e928
MT
10555@@ -15,6 +15,6 @@ enum reboot_type {
10556
10557 extern enum reboot_type reboot_type;
10558
10559-extern void machine_emergency_restart(void);
10560+extern void machine_emergency_restart(void) __noreturn;
10561
10562 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
4c928ab7
MT
10563diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
10564index dbe82a5..c6d8a00 100644
10565--- a/arch/x86/include/asm/floppy.h
10566+++ b/arch/x86/include/asm/floppy.h
10567@@ -157,6 +157,7 @@ static unsigned long dma_mem_alloc(unsigned long size)
10568 }
10569
10570
10571+static unsigned long vdma_mem_alloc(unsigned long size) __size_overflow(1);
10572 static unsigned long vdma_mem_alloc(unsigned long size)
10573 {
10574 return (unsigned long)vmalloc(size);
fe2de317
MT
10575diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10576index d09bb03..4ea4194 100644
10577--- a/arch/x86/include/asm/futex.h
10578+++ b/arch/x86/include/asm/futex.h
bc901d79 10579@@ -12,16 +12,18 @@
58c5fc13
MT
10580 #include <asm/system.h>
10581
df50ba0c 10582 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
6e9df6a3 10583+ typecheck(u32 __user *, uaddr); \
58c5fc13
MT
10584 asm volatile("1:\t" insn "\n" \
10585 "2:\t.section .fixup,\"ax\"\n" \
df50ba0c
MT
10586 "3:\tmov\t%3, %1\n" \
10587 "\tjmp\t2b\n" \
10588 "\t.previous\n" \
10589 _ASM_EXTABLE(1b, 3b) \
10590- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
6e9df6a3 10591+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
df50ba0c
MT
10592 : "i" (-EFAULT), "0" (oparg), "1" (0))
10593
10594 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
6e9df6a3 10595+ typecheck(u32 __user *, uaddr); \
df50ba0c
MT
10596 asm volatile("1:\tmovl %2, %0\n" \
10597 "\tmovl\t%0, %3\n" \
10598 "\t" insn "\n" \
66a7e928 10599@@ -34,7 +36,7 @@
df50ba0c
MT
10600 _ASM_EXTABLE(1b, 4b) \
10601 _ASM_EXTABLE(2b, 4b) \
58c5fc13 10602 : "=&a" (oldval), "=&r" (ret), \
df50ba0c 10603- "+m" (*uaddr), "=&r" (tem) \
6e9df6a3 10604+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
58c5fc13 10605 : "r" (oparg), "i" (-EFAULT), "1" (0))
58c5fc13 10606
66a7e928 10607 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
fe2de317 10608@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
58c5fc13
MT
10609
10610 switch (op) {
10611 case FUTEX_OP_SET:
bc901d79 10612- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
16454cff 10613+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
58c5fc13
MT
10614 break;
10615 case FUTEX_OP_ADD:
bc901d79 10616- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
16454cff 10617+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
58c5fc13 10618 uaddr, oparg);
58c5fc13
MT
10619 break;
10620 case FUTEX_OP_OR:
fe2de317 10621@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
66a7e928 10622 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
58c5fc13
MT
10623 return -EFAULT;
10624
66a7e928
MT
10625- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10626+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
bc901d79 10627 "2:\t.section .fixup, \"ax\"\n"
66a7e928 10628 "3:\tmov %3, %0\n"
58c5fc13
MT
10629 "\tjmp 2b\n"
10630 "\t.previous\n"
10631 _ASM_EXTABLE(1b, 3b)
66a7e928 10632- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
6e9df6a3 10633+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
66a7e928 10634 : "i" (-EFAULT), "r" (newval), "1" (oldval)
58c5fc13
MT
10635 : "memory"
10636 );
fe2de317 10637diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
4c928ab7 10638index eb92a6e..b98b2f4 100644
fe2de317
MT
10639--- a/arch/x86/include/asm/hw_irq.h
10640+++ b/arch/x86/include/asm/hw_irq.h
6e9df6a3 10641@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
8308f9c9
MT
10642 extern void enable_IO_APIC(void);
10643
10644 /* Statistics */
10645-extern atomic_t irq_err_count;
10646-extern atomic_t irq_mis_count;
10647+extern atomic_unchecked_t irq_err_count;
10648+extern atomic_unchecked_t irq_mis_count;
10649
10650 /* EISA */
10651 extern void eisa_set_level_irq(unsigned int irq);
fe2de317 10652diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
5e856224 10653index 2479049..3fb9795 100644
fe2de317
MT
10654--- a/arch/x86/include/asm/i387.h
10655+++ b/arch/x86/include/asm/i387.h
5e856224 10656@@ -93,6 +93,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
df50ba0c
MT
10657 {
10658 int err;
10659
10660+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10661+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
6e9df6a3 10662+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
df50ba0c
MT
10663+#endif
10664+
bc901d79 10665 /* See comment in fxsave() below. */
16454cff
MT
10666 #ifdef CONFIG_AS_FXSAVEQ
10667 asm volatile("1: fxrstorq %[fx]\n\t"
5e856224 10668@@ -122,6 +127,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
df50ba0c
MT
10669 {
10670 int err;
10671
10672+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10673+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10674+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10675+#endif
10676+
6892158b
MT
10677 /*
10678 * Clear the bytes not touched by the fxsave and reserved
10679 * for the SW usage.
5e856224
MT
10680@@ -278,7 +288,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
10681 "emms\n\t" /* clear stack tags */
10682 "fildl %P[addr]", /* set F?P to defined value */
10683 X86_FEATURE_FXSAVE_LEAK,
10684- [addr] "m" (tsk->thread.fpu.has_fpu));
10685+ [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
10686
10687 return fpu_restore_checking(&tsk->thread.fpu);
10688 }
10689@@ -445,7 +455,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
4c928ab7
MT
10690 static inline bool interrupted_user_mode(void)
10691 {
10692 struct pt_regs *regs = get_irq_regs();
10693- return regs && user_mode_vm(regs);
10694+ return regs && user_mode(regs);
10695 }
58c5fc13
MT
10696
10697 /*
fe2de317
MT
10698diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10699index d8e8eef..99f81ae 100644
10700--- a/arch/x86/include/asm/io.h
10701+++ b/arch/x86/include/asm/io.h
6e9df6a3 10702@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
58c5fc13
MT
10703
10704 #include <linux/vmalloc.h>
10705
10706+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
ae4e228f 10707+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
58c5fc13 10708+{
c52201e0 10709+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
58c5fc13
MT
10710+}
10711+
ae4e228f 10712+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
58c5fc13 10713+{
c52201e0 10714+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
58c5fc13
MT
10715+}
10716+
df50ba0c
MT
10717 /*
10718 * Convert a virtual cached pointer to an uncached pointer
10719 */
fe2de317
MT
10720diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10721index bba3cf8..06bc8da 100644
10722--- a/arch/x86/include/asm/irqflags.h
10723+++ b/arch/x86/include/asm/irqflags.h
10724@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
ae4e228f
MT
10725 sti; \
10726 sysexit
10727
df50ba0c
MT
10728+#define GET_CR0_INTO_RDI mov %cr0, %rdi
10729+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10730+#define GET_CR3_INTO_RDI mov %cr3, %rdi
10731+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
ae4e228f
MT
10732+
10733 #else
58c5fc13
MT
10734 #define INTERRUPT_RETURN iret
10735 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
fe2de317
MT
10736diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10737index 5478825..839e88c 100644
10738--- a/arch/x86/include/asm/kprobes.h
10739+++ b/arch/x86/include/asm/kprobes.h
71d190be
MT
10740@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10741 #define RELATIVEJUMP_SIZE 5
10742 #define RELATIVECALL_OPCODE 0xe8
10743 #define RELATIVE_ADDR_SIZE 4
10744-#define MAX_STACK_SIZE 64
10745-#define MIN_STACK_SIZE(ADDR) \
10746- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10747- THREAD_SIZE - (unsigned long)(ADDR))) \
10748- ? (MAX_STACK_SIZE) \
10749- : (((unsigned long)current_thread_info()) + \
10750- THREAD_SIZE - (unsigned long)(ADDR)))
10751+#define MAX_STACK_SIZE 64UL
10752+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10753
10754 #define flush_insn_slot(p) do { } while (0)
10755
fe2de317 10756diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
5e856224 10757index 52d6640..136b3bd 100644
fe2de317
MT
10758--- a/arch/x86/include/asm/kvm_host.h
10759+++ b/arch/x86/include/asm/kvm_host.h
5e856224 10760@@ -663,7 +663,7 @@ struct kvm_x86_ops {
4c928ab7
MT
10761 int (*check_intercept)(struct kvm_vcpu *vcpu,
10762 struct x86_instruction_info *info,
15a11c5b 10763 enum x86_intercept_stage stage);
15a11c5b
MT
10764-};
10765+} __do_const;
10766
10767 struct kvm_arch_async_pf {
10768 u32 token;
5e856224 10769@@ -694,7 +694,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
4c928ab7
MT
10770 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
10771
10772 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
10773- const void *val, int bytes);
10774+ const void *val, int bytes) __size_overflow(2);
4c928ab7
MT
10775 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
10776
10777 extern bool tdp_enabled;
5e856224 10778@@ -781,7 +781,7 @@ int fx_init(struct kvm_vcpu *vcpu);
4c928ab7 10779
4c928ab7
MT
10780 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
10781 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
5e856224
MT
10782- const u8 *new, int bytes);
10783+ const u8 *new, int bytes) __size_overflow(2);
10784 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
4c928ab7
MT
10785 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
10786 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
fe2de317
MT
10787diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10788index 9cdae5d..300d20f 100644
10789--- a/arch/x86/include/asm/local.h
10790+++ b/arch/x86/include/asm/local.h
bc901d79 10791@@ -18,26 +18,58 @@ typedef struct {
58c5fc13
MT
10792
10793 static inline void local_inc(local_t *l)
10794 {
10795- asm volatile(_ASM_INC "%0"
10796+ asm volatile(_ASM_INC "%0\n"
10797+
10798+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10799+ "jno 0f\n"
58c5fc13 10800+ _ASM_DEC "%0\n"
bc901d79
MT
10801+ "int $4\n0:\n"
10802+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10803+#endif
10804+
10805 : "+m" (l->a.counter));
10806 }
10807
10808 static inline void local_dec(local_t *l)
10809 {
10810- asm volatile(_ASM_DEC "%0"
10811+ asm volatile(_ASM_DEC "%0\n"
10812+
10813+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10814+ "jno 0f\n"
58c5fc13 10815+ _ASM_INC "%0\n"
bc901d79
MT
10816+ "int $4\n0:\n"
10817+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10818+#endif
10819+
10820 : "+m" (l->a.counter));
10821 }
10822
10823 static inline void local_add(long i, local_t *l)
10824 {
10825- asm volatile(_ASM_ADD "%1,%0"
10826+ asm volatile(_ASM_ADD "%1,%0\n"
10827+
10828+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10829+ "jno 0f\n"
58c5fc13 10830+ _ASM_SUB "%1,%0\n"
bc901d79
MT
10831+ "int $4\n0:\n"
10832+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10833+#endif
10834+
10835 : "+m" (l->a.counter)
10836 : "ir" (i));
10837 }
10838
10839 static inline void local_sub(long i, local_t *l)
10840 {
10841- asm volatile(_ASM_SUB "%1,%0"
10842+ asm volatile(_ASM_SUB "%1,%0\n"
10843+
10844+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10845+ "jno 0f\n"
58c5fc13 10846+ _ASM_ADD "%1,%0\n"
bc901d79
MT
10847+ "int $4\n0:\n"
10848+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10849+#endif
10850+
10851 : "+m" (l->a.counter)
10852 : "ir" (i));
10853 }
fe2de317 10854@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
58c5fc13
MT
10855 {
10856 unsigned char c;
10857
10858- asm volatile(_ASM_SUB "%2,%0; sete %1"
10859+ asm volatile(_ASM_SUB "%2,%0\n"
10860+
10861+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10862+ "jno 0f\n"
58c5fc13 10863+ _ASM_ADD "%2,%0\n"
bc901d79
MT
10864+ "int $4\n0:\n"
10865+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10866+#endif
10867+
10868+ "sete %1\n"
10869 : "+m" (l->a.counter), "=qm" (c)
10870 : "ir" (i) : "memory");
10871 return c;
fe2de317 10872@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
58c5fc13
MT
10873 {
10874 unsigned char c;
10875
10876- asm volatile(_ASM_DEC "%0; sete %1"
10877+ asm volatile(_ASM_DEC "%0\n"
10878+
10879+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10880+ "jno 0f\n"
58c5fc13 10881+ _ASM_INC "%0\n"
bc901d79
MT
10882+ "int $4\n0:\n"
10883+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10884+#endif
10885+
10886+ "sete %1\n"
10887 : "+m" (l->a.counter), "=qm" (c)
10888 : : "memory");
10889 return c != 0;
fe2de317 10890@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
58c5fc13
MT
10891 {
10892 unsigned char c;
10893
10894- asm volatile(_ASM_INC "%0; sete %1"
10895+ asm volatile(_ASM_INC "%0\n"
10896+
10897+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10898+ "jno 0f\n"
58c5fc13 10899+ _ASM_DEC "%0\n"
bc901d79
MT
10900+ "int $4\n0:\n"
10901+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10902+#endif
10903+
10904+ "sete %1\n"
10905 : "+m" (l->a.counter), "=qm" (c)
10906 : : "memory");
10907 return c != 0;
fe2de317 10908@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
58c5fc13
MT
10909 {
10910 unsigned char c;
10911
10912- asm volatile(_ASM_ADD "%2,%0; sets %1"
10913+ asm volatile(_ASM_ADD "%2,%0\n"
10914+
10915+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10916+ "jno 0f\n"
58c5fc13 10917+ _ASM_SUB "%2,%0\n"
bc901d79
MT
10918+ "int $4\n0:\n"
10919+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10920+#endif
10921+
10922+ "sets %1\n"
10923 : "+m" (l->a.counter), "=qm" (c)
10924 : "ir" (i) : "memory");
10925 return c;
fe2de317 10926@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
58c5fc13
MT
10927 #endif
10928 /* Modern 486+ processor */
10929 __i = i;
10930- asm volatile(_ASM_XADD "%0, %1;"
10931+ asm volatile(_ASM_XADD "%0, %1\n"
10932+
10933+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10934+ "jno 0f\n"
58c5fc13 10935+ _ASM_MOV "%0,%1\n"
bc901d79
MT
10936+ "int $4\n0:\n"
10937+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10938+#endif
10939+
10940 : "+r" (i), "+m" (l->a.counter)
10941 : : "memory");
10942 return i + __i;
fe2de317
MT
10943diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10944index 593e51d..fa69c9a 100644
10945--- a/arch/x86/include/asm/mman.h
10946+++ b/arch/x86/include/asm/mman.h
ae4e228f
MT
10947@@ -5,4 +5,14 @@
10948
10949 #include <asm-generic/mman.h>
58c5fc13
MT
10950
10951+#ifdef __KERNEL__
10952+#ifndef __ASSEMBLY__
10953+#ifdef CONFIG_X86_32
10954+#define arch_mmap_check i386_mmap_check
10955+int i386_mmap_check(unsigned long addr, unsigned long len,
10956+ unsigned long flags);
10957+#endif
10958+#endif
10959+#endif
10960+
10961 #endif /* _ASM_X86_MMAN_H */
fe2de317
MT
10962diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10963index 5f55e69..e20bfb1 100644
10964--- a/arch/x86/include/asm/mmu.h
10965+++ b/arch/x86/include/asm/mmu.h
10966@@ -9,7 +9,7 @@
10967 * we put the segment information here.
10968 */
10969 typedef struct {
10970- void *ldt;
10971+ struct desc_struct *ldt;
10972 int size;
10973
10974 #ifdef CONFIG_X86_64
10975@@ -18,7 +18,19 @@ typedef struct {
10976 #endif
10977
10978 struct mutex lock;
10979- void *vdso;
10980+ unsigned long vdso;
10981+
10982+#ifdef CONFIG_X86_32
10983+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10984+ unsigned long user_cs_base;
10985+ unsigned long user_cs_limit;
10986+
10987+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10988+ cpumask_t cpu_user_cs_mask;
10989+#endif
10990+
10991+#endif
10992+#endif
10993 } mm_context_t;
10994
10995 #ifdef CONFIG_SMP
10996diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10997index 6902152..399f3a2 100644
10998--- a/arch/x86/include/asm/mmu_context.h
10999+++ b/arch/x86/include/asm/mmu_context.h
11000@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
df50ba0c
MT
11001
11002 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
11003 {
11004+
11005+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11006+ unsigned int i;
11007+ pgd_t *pgd;
11008+
11009+ pax_open_kernel();
11010+ pgd = get_cpu_pgd(smp_processor_id());
11011+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
15a11c5b 11012+ set_pgd_batched(pgd+i, native_make_pgd(0));
df50ba0c
MT
11013+ pax_close_kernel();
11014+#endif
11015+
11016 #ifdef CONFIG_SMP
11017 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
11018 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
fe2de317 11019@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
58c5fc13
MT
11020 struct task_struct *tsk)
11021 {
11022 unsigned cpu = smp_processor_id();
15a11c5b 11023+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
58c5fc13
MT
11024+ int tlbstate = TLBSTATE_OK;
11025+#endif
11026
11027 if (likely(prev != next)) {
58c5fc13 11028 #ifdef CONFIG_SMP
15a11c5b 11029+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
58c5fc13
MT
11030+ tlbstate = percpu_read(cpu_tlbstate.state);
11031+#endif
11032 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11033 percpu_write(cpu_tlbstate.active_mm, next);
11034 #endif
df50ba0c
MT
11035 cpumask_set_cpu(cpu, mm_cpumask(next));
11036
11037 /* Re-load page tables */
11038+#ifdef CONFIG_PAX_PER_CPU_PGD
11039+ pax_open_kernel();
11040+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
11041+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
11042+ pax_close_kernel();
11043+ load_cr3(get_cpu_pgd(cpu));
11044+#else
11045 load_cr3(next->pgd);
11046+#endif
ea610fa8 11047
c52201e0
MT
11048 /* stop flush ipis for the previous mm */
11049 cpumask_clear_cpu(cpu, mm_cpumask(prev));
fe2de317 11050@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
58c5fc13
MT
11051 */
11052 if (unlikely(prev->context.ldt != next->context.ldt))
11053 load_LDT_nolock(&next->context);
df50ba0c 11054- }
58c5fc13
MT
11055+
11056+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
ae4e228f 11057+ if (!(__supported_pte_mask & _PAGE_NX)) {
58c5fc13
MT
11058+ smp_mb__before_clear_bit();
11059+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
11060+ smp_mb__after_clear_bit();
11061+ cpu_set(cpu, next->context.cpu_user_cs_mask);
11062+ }
11063+#endif
11064+
11065+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11066+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
ae4e228f
MT
11067+ prev->context.user_cs_limit != next->context.user_cs_limit))
11068+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
df50ba0c 11069 #ifdef CONFIG_SMP
ae4e228f 11070+ else if (unlikely(tlbstate != TLBSTATE_OK))
58c5fc13
MT
11071+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11072+#endif
ae4e228f 11073+#endif
58c5fc13 11074+
df50ba0c 11075+ }
58c5fc13 11076 else {
df50ba0c
MT
11077+
11078+#ifdef CONFIG_PAX_PER_CPU_PGD
11079+ pax_open_kernel();
11080+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
11081+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
11082+ pax_close_kernel();
11083+ load_cr3(get_cpu_pgd(cpu));
11084+#endif
11085+
11086+#ifdef CONFIG_SMP
11087 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11088 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
11089
fe2de317 11090@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
df50ba0c
MT
11091 * tlb flush IPI delivery. We must reload CR3
11092 * to make sure to use no freed page tables.
58c5fc13 11093 */
df50ba0c
MT
11094+
11095+#ifndef CONFIG_PAX_PER_CPU_PGD
58c5fc13 11096 load_cr3(next->pgd);
df50ba0c
MT
11097+#endif
11098+
58c5fc13
MT
11099 load_LDT_nolock(&next->context);
11100+
11101+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
ae4e228f 11102+ if (!(__supported_pte_mask & _PAGE_NX))
58c5fc13
MT
11103+ cpu_set(cpu, next->context.cpu_user_cs_mask);
11104+#endif
11105+
11106+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11107+#ifdef CONFIG_PAX_PAGEEXEC
ae4e228f 11108+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
58c5fc13
MT
11109+#endif
11110+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11111+#endif
11112+
11113 }
fe2de317
MT
11114+#endif
11115 }
11116-#endif
df50ba0c
MT
11117 }
11118
11119 #define activate_mm(prev, next) \
fe2de317
MT
11120diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
11121index 9eae775..c914fea 100644
11122--- a/arch/x86/include/asm/module.h
11123+++ b/arch/x86/include/asm/module.h
71d190be
MT
11124@@ -5,6 +5,7 @@
11125
11126 #ifdef CONFIG_X86_64
11127 /* X86_64 does not define MODULE_PROC_FAMILY */
11128+#define MODULE_PROC_FAMILY ""
11129 #elif defined CONFIG_M386
11130 #define MODULE_PROC_FAMILY "386 "
11131 #elif defined CONFIG_M486
fe2de317 11132@@ -59,8 +60,20 @@
df50ba0c
MT
11133 #error unknown processor family
11134 #endif
11135
71d190be
MT
11136-#ifdef CONFIG_X86_32
11137-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
fe2de317
MT
11138+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11139+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
11140+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
11141+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
df50ba0c 11142+#else
71d190be 11143+#define MODULE_PAX_KERNEXEC ""
58c5fc13
MT
11144 #endif
11145
6e9df6a3
MT
11146+#ifdef CONFIG_PAX_MEMORY_UDEREF
11147+#define MODULE_PAX_UDEREF "UDEREF "
71d190be 11148+#else
6e9df6a3 11149+#define MODULE_PAX_UDEREF ""
71d190be
MT
11150+#endif
11151+
6e9df6a3 11152+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
71d190be 11153+
58c5fc13 11154 #endif /* _ASM_X86_MODULE_H */
fe2de317
MT
11155diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
11156index 7639dbf..e08a58c 100644
11157--- a/arch/x86/include/asm/page_64_types.h
11158+++ b/arch/x86/include/asm/page_64_types.h
bc901d79
MT
11159@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
11160
11161 /* duplicated to the one in bootmem.h */
11162 extern unsigned long max_pfn;
11163-extern unsigned long phys_base;
11164+extern const unsigned long phys_base;
11165
11166 extern unsigned long __phys_addr(unsigned long);
11167 #define __phys_reloc_hide(x) (x)
fe2de317
MT
11168diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
11169index a7d2db9..edb023e 100644
11170--- a/arch/x86/include/asm/paravirt.h
11171+++ b/arch/x86/include/asm/paravirt.h
11172@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
15a11c5b
MT
11173 val);
11174 }
11175
11176+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11177+{
11178+ pgdval_t val = native_pgd_val(pgd);
11179+
11180+ if (sizeof(pgdval_t) > sizeof(long))
11181+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
11182+ val, (u64)val >> 32);
11183+ else
11184+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
11185+ val);
11186+}
11187+
11188 static inline void pgd_clear(pgd_t *pgdp)
11189 {
11190 set_pgd(pgdp, __pgd(0));
fe2de317 11191@@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
ae4e228f
MT
11192 pv_mmu_ops.set_fixmap(idx, phys, flags);
11193 }
11194
11195+#ifdef CONFIG_PAX_KERNEXEC
11196+static inline unsigned long pax_open_kernel(void)
11197+{
efbe55a5 11198+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
ae4e228f
MT
11199+}
11200+
11201+static inline unsigned long pax_close_kernel(void)
11202+{
efbe55a5 11203+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
ae4e228f
MT
11204+}
11205+#else
11206+static inline unsigned long pax_open_kernel(void) { return 0; }
11207+static inline unsigned long pax_close_kernel(void) { return 0; }
11208+#endif
11209+
11210 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11211
11212 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
6e9df6a3 11213@@ -964,7 +991,7 @@ extern void default_banner(void);
58c5fc13
MT
11214
11215 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11216 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11217-#define PARA_INDIRECT(addr) *%cs:addr
11218+#define PARA_INDIRECT(addr) *%ss:addr
11219 #endif
11220
11221 #define INTERRUPT_RETURN \
6e9df6a3 11222@@ -1041,6 +1068,21 @@ extern void default_banner(void);
df50ba0c 11223 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
ae4e228f
MT
11224 CLBR_NONE, \
11225 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
58c5fc13 11226+
df50ba0c 11227+#define GET_CR0_INTO_RDI \
ae4e228f 11228+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
df50ba0c 11229+ mov %rax,%rdi
ae4e228f 11230+
df50ba0c
MT
11231+#define SET_RDI_INTO_CR0 \
11232+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
ae4e228f 11233+
df50ba0c
MT
11234+#define GET_CR3_INTO_RDI \
11235+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11236+ mov %rax,%rdi
11237+
11238+#define SET_RDI_INTO_CR3 \
11239+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
ae4e228f
MT
11240+
11241 #endif /* CONFIG_X86_32 */
11242
11243 #endif /* __ASSEMBLY__ */
fe2de317
MT
11244diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11245index 8e8b9a4..f07d725 100644
11246--- a/arch/x86/include/asm/paravirt_types.h
11247+++ b/arch/x86/include/asm/paravirt_types.h
6e9df6a3 11248@@ -84,20 +84,20 @@ struct pv_init_ops {
15a11c5b
MT
11249 */
11250 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11251 unsigned long addr, unsigned len);
11252-};
11253+} __no_const;
11254
11255
11256 struct pv_lazy_ops {
11257 /* Set deferred update mode, used for batching operations. */
11258 void (*enter)(void);
11259 void (*leave)(void);
11260-};
11261+} __no_const;
11262
11263 struct pv_time_ops {
11264 unsigned long long (*sched_clock)(void);
6e9df6a3 11265 unsigned long long (*steal_clock)(int cpu);
15a11c5b
MT
11266 unsigned long (*get_tsc_khz)(void);
11267-};
11268+} __no_const;
11269
11270 struct pv_cpu_ops {
11271 /* hooks for various privileged instructions */
6e9df6a3 11272@@ -193,7 +193,7 @@ struct pv_cpu_ops {
15a11c5b
MT
11273
11274 void (*start_context_switch)(struct task_struct *prev);
11275 void (*end_context_switch)(struct task_struct *next);
11276-};
11277+} __no_const;
11278
11279 struct pv_irq_ops {
11280 /*
6e9df6a3 11281@@ -224,7 +224,7 @@ struct pv_apic_ops {
15a11c5b
MT
11282 unsigned long start_eip,
11283 unsigned long start_esp);
11284 #endif
11285-};
11286+} __no_const;
11287
11288 struct pv_mmu_ops {
11289 unsigned long (*read_cr2)(void);
6e9df6a3 11290@@ -313,6 +313,7 @@ struct pv_mmu_ops {
15a11c5b
MT
11291 struct paravirt_callee_save make_pud;
11292
11293 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11294+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11295 #endif /* PAGETABLE_LEVELS == 4 */
11296 #endif /* PAGETABLE_LEVELS >= 3 */
11297
6e9df6a3 11298@@ -324,6 +325,12 @@ struct pv_mmu_ops {
ae4e228f
MT
11299 an mfn. We can tell which is which from the index. */
11300 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11301 phys_addr_t phys, pgprot_t flags);
11302+
11303+#ifdef CONFIG_PAX_KERNEXEC
11304+ unsigned long (*pax_open_kernel)(void);
11305+ unsigned long (*pax_close_kernel)(void);
11306+#endif
11307+
11308 };
11309
11310 struct arch_spinlock;
6e9df6a3 11311@@ -334,7 +341,7 @@ struct pv_lock_ops {
15a11c5b
MT
11312 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
11313 int (*spin_trylock)(struct arch_spinlock *lock);
11314 void (*spin_unlock)(struct arch_spinlock *lock);
11315-};
11316+} __no_const;
11317
11318 /* This contains all the paravirt structures: we get a convenient
11319 * number for each function using the offset which we use to indicate
fe2de317 11320diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
5e856224 11321index b4389a4..7024269 100644
fe2de317
MT
11322--- a/arch/x86/include/asm/pgalloc.h
11323+++ b/arch/x86/include/asm/pgalloc.h
11324@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
58c5fc13
MT
11325 pmd_t *pmd, pte_t *pte)
11326 {
11327 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11328+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11329+}
11330+
11331+static inline void pmd_populate_user(struct mm_struct *mm,
11332+ pmd_t *pmd, pte_t *pte)
11333+{
11334+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11335 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11336 }
11337
5e856224
MT
11338@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
11339
11340 #ifdef CONFIG_X86_PAE
11341 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
11342+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
11343+{
11344+ pud_populate(mm, pudp, pmd);
11345+}
11346 #else /* !CONFIG_X86_PAE */
11347 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11348 {
11349 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11350 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
11351 }
11352+
11353+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11354+{
11355+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11356+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
11357+}
11358 #endif /* CONFIG_X86_PAE */
11359
11360 #if PAGETABLE_LEVELS > 3
11361@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11362 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
11363 }
11364
11365+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11366+{
11367+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
11368+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
11369+}
11370+
11371 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
11372 {
11373 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
fe2de317
MT
11374diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11375index 98391db..8f6984e 100644
11376--- a/arch/x86/include/asm/pgtable-2level.h
11377+++ b/arch/x86/include/asm/pgtable-2level.h
11378@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
58c5fc13
MT
11379
11380 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11381 {
ae4e228f 11382+ pax_open_kernel();
58c5fc13 11383 *pmdp = pmd;
ae4e228f 11384+ pax_close_kernel();
58c5fc13
MT
11385 }
11386
11387 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
fe2de317
MT
11388diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11389index effff47..f9e4035 100644
11390--- a/arch/x86/include/asm/pgtable-3level.h
11391+++ b/arch/x86/include/asm/pgtable-3level.h
11392@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
58c5fc13
MT
11393
11394 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11395 {
ae4e228f 11396+ pax_open_kernel();
58c5fc13 11397 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
ae4e228f 11398+ pax_close_kernel();
58c5fc13
MT
11399 }
11400
11401 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11402 {
ae4e228f 11403+ pax_open_kernel();
58c5fc13 11404 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
ae4e228f 11405+ pax_close_kernel();
58c5fc13
MT
11406 }
11407
11408 /*
fe2de317 11409diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
5e856224 11410index 49afb3f..ed14d07 100644
fe2de317
MT
11411--- a/arch/x86/include/asm/pgtable.h
11412+++ b/arch/x86/include/asm/pgtable.h
11413@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
15a11c5b
MT
11414
11415 #ifndef __PAGETABLE_PUD_FOLDED
11416 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11417+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11418 #define pgd_clear(pgd) native_pgd_clear(pgd)
11419 #endif
11420
fe2de317 11421@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
ae4e228f
MT
11422
11423 #define arch_end_context_switch(prev) do {} while(0)
11424
11425+#define pax_open_kernel() native_pax_open_kernel()
11426+#define pax_close_kernel() native_pax_close_kernel()
11427 #endif /* CONFIG_PARAVIRT */
11428
11429+#define __HAVE_ARCH_PAX_OPEN_KERNEL
11430+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
58c5fc13
MT
11431+
11432+#ifdef CONFIG_PAX_KERNEXEC
ae4e228f
MT
11433+static inline unsigned long native_pax_open_kernel(void)
11434+{
58c5fc13
MT
11435+ unsigned long cr0;
11436+
ae4e228f
MT
11437+ preempt_disable();
11438+ barrier();
11439+ cr0 = read_cr0() ^ X86_CR0_WP;
11440+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
11441+ write_cr0(cr0);
11442+ return cr0 ^ X86_CR0_WP;
11443+}
58c5fc13 11444+
ae4e228f
MT
11445+static inline unsigned long native_pax_close_kernel(void)
11446+{
11447+ unsigned long cr0;
58c5fc13 11448+
ae4e228f
MT
11449+ cr0 = read_cr0() ^ X86_CR0_WP;
11450+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11451+ write_cr0(cr0);
11452+ barrier();
11453+ preempt_enable_no_resched();
11454+ return cr0 ^ X86_CR0_WP;
11455+}
11456+#else
11457+static inline unsigned long native_pax_open_kernel(void) { return 0; }
11458+static inline unsigned long native_pax_close_kernel(void) { return 0; }
58c5fc13
MT
11459+#endif
11460+
ae4e228f 11461 /*
58c5fc13
MT
11462 * The following only work if pte_present() is true.
11463 * Undefined behaviour if not..
11464 */
11465+static inline int pte_user(pte_t pte)
11466+{
11467+ return pte_val(pte) & _PAGE_USER;
11468+}
11469+
11470 static inline int pte_dirty(pte_t pte)
11471 {
11472 return pte_flags(pte) & _PAGE_DIRTY;
fe2de317 11473@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
58c5fc13
MT
11474 return pte_clear_flags(pte, _PAGE_RW);
11475 }
11476
11477+static inline pte_t pte_mkread(pte_t pte)
11478+{
11479+ return __pte(pte_val(pte) | _PAGE_USER);
11480+}
11481+
11482 static inline pte_t pte_mkexec(pte_t pte)
11483 {
11484- return pte_clear_flags(pte, _PAGE_NX);
11485+#ifdef CONFIG_X86_PAE
11486+ if (__supported_pte_mask & _PAGE_NX)
11487+ return pte_clear_flags(pte, _PAGE_NX);
11488+ else
11489+#endif
11490+ return pte_set_flags(pte, _PAGE_USER);
11491+}
11492+
11493+static inline pte_t pte_exprotect(pte_t pte)
11494+{
11495+#ifdef CONFIG_X86_PAE
11496+ if (__supported_pte_mask & _PAGE_NX)
11497+ return pte_set_flags(pte, _PAGE_NX);
11498+ else
11499+#endif
11500+ return pte_clear_flags(pte, _PAGE_USER);
11501 }
11502
11503 static inline pte_t pte_mkdirty(pte_t pte)
fe2de317 11504@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
df50ba0c
MT
11505 #endif
11506
11507 #ifndef __ASSEMBLY__
11508+
11509+#ifdef CONFIG_PAX_PER_CPU_PGD
11510+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11511+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11512+{
11513+ return cpu_pgd[cpu];
11514+}
11515+#endif
11516+
11517 #include <linux/mm_types.h>
11518
11519 static inline int pte_none(pte_t pte)
fe2de317 11520@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
58c5fc13
MT
11521
11522 static inline int pgd_bad(pgd_t pgd)
11523 {
11524- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11525+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11526 }
11527
11528 static inline int pgd_none(pgd_t pgd)
15a11c5b 11529@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
df50ba0c
MT
11530 * pgd_offset() returns a (pgd_t *)
11531 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11532 */
11533-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11534+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11535+
11536+#ifdef CONFIG_PAX_PER_CPU_PGD
11537+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11538+#endif
11539+
11540 /*
11541 * a shortcut which implies the use of the kernel's pgd, instead
11542 * of a process's
15a11c5b 11543@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
df50ba0c
MT
11544 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11545 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11546
11547+#ifdef CONFIG_X86_32
11548+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11549+#else
11550+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11551+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11552+
11553+#ifdef CONFIG_PAX_MEMORY_UDEREF
11554+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11555+#else
11556+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11557+#endif
11558+
11559+#endif
11560+
11561 #ifndef __ASSEMBLY__
11562
11563 extern int direct_gbpages;
fe2de317 11564@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
ae4e228f
MT
11565 * dst and src can be on the same page, but the range must not overlap,
11566 * and must not cross a page boundary.
58c5fc13 11567 */
ae4e228f
MT
11568-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11569+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
58c5fc13
MT
11570 {
11571- memcpy(dst, src, count * sizeof(pgd_t));
ae4e228f
MT
11572+ pax_open_kernel();
11573+ while (count--)
11574+ *dst++ = *src++;
11575+ pax_close_kernel();
58c5fc13
MT
11576 }
11577
df50ba0c
MT
11578+#ifdef CONFIG_PAX_PER_CPU_PGD
11579+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11580+#endif
11581+
11582+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11583+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11584+#else
11585+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11586+#endif
58c5fc13 11587
df50ba0c
MT
11588 #include <asm-generic/pgtable.h>
11589 #endif /* __ASSEMBLY__ */
fe2de317
MT
11590diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11591index 0c92113..34a77c6 100644
11592--- a/arch/x86/include/asm/pgtable_32.h
11593+++ b/arch/x86/include/asm/pgtable_32.h
11594@@ -25,9 +25,6 @@
11595 struct mm_struct;
11596 struct vm_area_struct;
11597
11598-extern pgd_t swapper_pg_dir[1024];
11599-extern pgd_t initial_page_table[1024];
11600-
11601 static inline void pgtable_cache_init(void) { }
11602 static inline void check_pgt_cache(void) { }
11603 void paging_init(void);
11604@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11605 # include <asm/pgtable-2level.h>
11606 #endif
11607
11608+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11609+extern pgd_t initial_page_table[PTRS_PER_PGD];
11610+#ifdef CONFIG_X86_PAE
11611+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11612+#endif
11613+
11614 #if defined(CONFIG_HIGHPTE)
11615 #define pte_offset_map(dir, address) \
11616 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
11617@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11618 /* Clear a kernel PTE and flush it from the TLB */
11619 #define kpte_clear_flush(ptep, vaddr) \
11620 do { \
11621+ pax_open_kernel(); \
11622 pte_clear(&init_mm, (vaddr), (ptep)); \
11623+ pax_close_kernel(); \
11624 __flush_tlb_one((vaddr)); \
11625 } while (0)
11626
11627@@ -74,6 +79,9 @@ do { \
11628
11629 #endif /* !__ASSEMBLY__ */
11630
11631+#define HAVE_ARCH_UNMAPPED_AREA
11632+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11633+
11634 /*
11635 * kern_addr_valid() is (1) for FLATMEM and (0) for
11636 * SPARSEMEM and DISCONTIGMEM
11637diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11638index ed5903b..c7fe163 100644
11639--- a/arch/x86/include/asm/pgtable_32_types.h
11640+++ b/arch/x86/include/asm/pgtable_32_types.h
11641@@ -8,7 +8,7 @@
11642 */
11643 #ifdef CONFIG_X86_PAE
11644 # include <asm/pgtable-3level_types.h>
11645-# define PMD_SIZE (1UL << PMD_SHIFT)
11646+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11647 # define PMD_MASK (~(PMD_SIZE - 1))
11648 #else
11649 # include <asm/pgtable-2level_types.h>
11650@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11651 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11652 #endif
11653
11654+#ifdef CONFIG_PAX_KERNEXEC
11655+#ifndef __ASSEMBLY__
11656+extern unsigned char MODULES_EXEC_VADDR[];
11657+extern unsigned char MODULES_EXEC_END[];
11658+#endif
11659+#include <asm/boot.h>
11660+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11661+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11662+#else
11663+#define ktla_ktva(addr) (addr)
11664+#define ktva_ktla(addr) (addr)
11665+#endif
11666+
11667 #define MODULES_VADDR VMALLOC_START
11668 #define MODULES_END VMALLOC_END
11669 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11670diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
5e856224 11671index 975f709..9f779c9 100644
fe2de317
MT
11672--- a/arch/x86/include/asm/pgtable_64.h
11673+++ b/arch/x86/include/asm/pgtable_64.h
11674@@ -16,10 +16,14 @@
11675
11676 extern pud_t level3_kernel_pgt[512];
11677 extern pud_t level3_ident_pgt[512];
11678+extern pud_t level3_vmalloc_start_pgt[512];
11679+extern pud_t level3_vmalloc_end_pgt[512];
11680+extern pud_t level3_vmemmap_pgt[512];
11681+extern pud_t level2_vmemmap_pgt[512];
11682 extern pmd_t level2_kernel_pgt[512];
11683 extern pmd_t level2_fixmap_pgt[512];
11684-extern pmd_t level2_ident_pgt[512];
11685-extern pgd_t init_level4_pgt[];
11686+extern pmd_t level2_ident_pgt[512*2];
11687+extern pgd_t init_level4_pgt[512];
11688
11689 #define swapper_pg_dir init_level4_pgt
11690
11691@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11692
11693 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11694 {
11695+ pax_open_kernel();
11696 *pmdp = pmd;
11697+ pax_close_kernel();
11698 }
11699
11700 static inline void native_pmd_clear(pmd_t *pmd)
5e856224
MT
11701@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
11702
11703 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11704 {
11705+ pax_open_kernel();
11706 *pudp = pud;
11707+ pax_close_kernel();
11708 }
11709
11710 static inline void native_pud_clear(pud_t *pud)
11711@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
fe2de317
MT
11712
11713 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11714 {
11715+ pax_open_kernel();
11716+ *pgdp = pgd;
11717+ pax_close_kernel();
11718+}
11719+
11720+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11721+{
11722 *pgdp = pgd;
11723 }
11724
11725diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11726index 766ea16..5b96cb3 100644
11727--- a/arch/x86/include/asm/pgtable_64_types.h
11728+++ b/arch/x86/include/asm/pgtable_64_types.h
11729@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11730 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11731 #define MODULES_END _AC(0xffffffffff000000, UL)
11732 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11733+#define MODULES_EXEC_VADDR MODULES_VADDR
11734+#define MODULES_EXEC_END MODULES_END
11735+
11736+#define ktla_ktva(addr) (addr)
11737+#define ktva_ktla(addr) (addr)
11738
11739 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11740diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11741index 013286a..8b42f4f 100644
11742--- a/arch/x86/include/asm/pgtable_types.h
11743+++ b/arch/x86/include/asm/pgtable_types.h
16454cff 11744@@ -16,13 +16,12 @@
58c5fc13
MT
11745 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11746 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11747 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11748-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11749+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11750 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11751 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11752 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11753-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11754-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
16454cff 11755-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
58c5fc13 11756+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
16454cff 11757+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
58c5fc13
MT
11758 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11759
11760 /* If _PAGE_BIT_PRESENT is clear, we use these: */
16454cff 11761@@ -40,7 +39,6 @@
58c5fc13
MT
11762 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11763 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11764 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11765-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11766 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11767 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11768 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
16454cff 11769@@ -57,8 +55,10 @@
58c5fc13
MT
11770
11771 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11772 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11773-#else
11774+#elif defined(CONFIG_KMEMCHECK)
11775 #define _PAGE_NX (_AT(pteval_t, 0))
11776+#else
11777+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11778 #endif
11779
11780 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
16454cff 11781@@ -96,6 +96,9 @@
58c5fc13
MT
11782 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11783 _PAGE_ACCESSED)
11784
11785+#define PAGE_READONLY_NOEXEC PAGE_READONLY
11786+#define PAGE_SHARED_NOEXEC PAGE_SHARED
11787+
11788 #define __PAGE_KERNEL_EXEC \
11789 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11790 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
6e9df6a3 11791@@ -106,7 +109,7 @@
58c5fc13
MT
11792 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11793 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11794 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11795-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
58c5fc13 11796+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
6e9df6a3
MT
11797 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11798 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
58c5fc13 11799 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
6e9df6a3 11800@@ -168,8 +171,8 @@
58c5fc13
MT
11801 * bits are combined, this will alow user to access the high address mapped
11802 * VDSO in the presence of CONFIG_COMPAT_VDSO
11803 */
11804-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11805-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11806+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11807+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11808 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11809 #endif
11810
fe2de317 11811@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
57199397
MT
11812 {
11813 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11814 }
11815+#endif
11816
11817+#if PAGETABLE_LEVELS == 3
11818+#include <asm-generic/pgtable-nopud.h>
11819+#endif
11820+
11821+#if PAGETABLE_LEVELS == 2
11822+#include <asm-generic/pgtable-nopmd.h>
11823+#endif
11824+
11825+#ifndef __ASSEMBLY__
11826 #if PAGETABLE_LEVELS > 3
11827 typedef struct { pudval_t pud; } pud_t;
11828
fe2de317 11829@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
57199397
MT
11830 return pud.pud;
11831 }
11832 #else
11833-#include <asm-generic/pgtable-nopud.h>
11834-
11835 static inline pudval_t native_pud_val(pud_t pud)
11836 {
11837 return native_pgd_val(pud.pgd);
fe2de317 11838@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
57199397
MT
11839 return pmd.pmd;
11840 }
11841 #else
11842-#include <asm-generic/pgtable-nopmd.h>
11843-
11844 static inline pmdval_t native_pmd_val(pmd_t pmd)
11845 {
11846 return native_pgd_val(pmd.pud.pgd);
6e9df6a3 11847@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
58c5fc13
MT
11848
11849 extern pteval_t __supported_pte_mask;
ae4e228f
MT
11850 extern void set_nx(void);
11851-extern int nx_enabled;
58c5fc13
MT
11852
11853 #define pgprot_writecombine pgprot_writecombine
11854 extern pgprot_t pgprot_writecombine(pgprot_t prot);
fe2de317 11855diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
5e856224 11856index 58545c9..fe6fc38e 100644
fe2de317
MT
11857--- a/arch/x86/include/asm/processor.h
11858+++ b/arch/x86/include/asm/processor.h
5e856224 11859@@ -266,7 +266,7 @@ struct tss_struct {
58c5fc13
MT
11860
11861 } ____cacheline_aligned;
11862
11863-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11864+extern struct tss_struct init_tss[NR_CPUS];
11865
11866 /*
11867 * Save the original ist values for checking stack pointers during debugging
5e856224 11868@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(const void *x)
58c5fc13
MT
11869 */
11870 #define TASK_SIZE PAGE_OFFSET
11871 #define TASK_SIZE_MAX TASK_SIZE
11872+
11873+#ifdef CONFIG_PAX_SEGMEXEC
11874+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
58c5fc13
MT
11875+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11876+#else
11877 #define STACK_TOP TASK_SIZE
11878-#define STACK_TOP_MAX STACK_TOP
11879+#endif
ae4e228f 11880+
58c5fc13
MT
11881+#define STACK_TOP_MAX TASK_SIZE
11882
11883 #define INIT_THREAD { \
66a7e928
MT
11884- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11885+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11886 .vm86_info = NULL, \
11887 .sysenter_cs = __KERNEL_CS, \
11888 .io_bitmap_ptr = NULL, \
5e856224 11889@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(const void *x)
58c5fc13
MT
11890 */
11891 #define INIT_TSS { \
11892 .x86_tss = { \
11893- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11894+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11895 .ss0 = __KERNEL_DS, \
11896 .ss1 = __KERNEL_CS, \
11897 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
5e856224 11898@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(const void *x)
58c5fc13
MT
11899 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11900
11901 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11902-#define KSTK_TOP(info) \
11903-({ \
11904- unsigned long *__ptr = (unsigned long *)(info); \
11905- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11906-})
71d190be 11907+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
58c5fc13
MT
11908
11909 /*
11910 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
5e856224 11911@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
58c5fc13
MT
11912 #define task_pt_regs(task) \
11913 ({ \
11914 struct pt_regs *__regs__; \
11915- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11916+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11917 __regs__ - 1; \
11918 })
11919
5e856224 11920@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
df50ba0c
MT
11921 /*
11922 * User space process size. 47bits minus one guard page.
11923 */
11924-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11925+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11926
11927 /* This decides where the kernel will search for a free chunk of vm
58c5fc13
MT
11928 * space during mmap's.
11929 */
11930 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11931- 0xc0000000 : 0xFFFFe000)
11932+ 0xc0000000 : 0xFFFFf000)
11933
11934 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11935 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
5e856224 11936@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
66a7e928
MT
11937 #define STACK_TOP_MAX TASK_SIZE_MAX
11938
11939 #define INIT_THREAD { \
11940- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11941+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11942 }
11943
11944 #define INIT_TSS { \
11945- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11946+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11947 }
11948
11949 /*
5e856224 11950@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
58c5fc13
MT
11951 */
11952 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11953
11954+#ifdef CONFIG_PAX_SEGMEXEC
11955+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11956+#endif
11957+
11958 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11959
11960 /* Get/set a process' ability to use the timestamp counter instruction */
fe2de317
MT
11961diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11962index 3566454..4bdfb8c 100644
11963--- a/arch/x86/include/asm/ptrace.h
11964+++ b/arch/x86/include/asm/ptrace.h
11965@@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
58c5fc13
MT
11966 }
11967
11968 /*
11969- * user_mode_vm(regs) determines whether a register set came from user mode.
11970+ * user_mode(regs) determines whether a register set came from user mode.
11971 * This is true if V8086 mode was enabled OR if the register set was from
11972 * protected mode with RPL-3 CS value. This tricky test checks that with
11973 * one comparison. Many places in the kernel can bypass this full check
11974- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11975+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11976+ * be used.
11977 */
11978-static inline int user_mode(struct pt_regs *regs)
11979+static inline int user_mode_novm(struct pt_regs *regs)
11980 {
11981 #ifdef CONFIG_X86_32
11982 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11983 #else
11984- return !!(regs->cs & 3);
11985+ return !!(regs->cs & SEGMENT_RPL_MASK);
11986 #endif
11987 }
11988
11989-static inline int user_mode_vm(struct pt_regs *regs)
11990+static inline int user_mode(struct pt_regs *regs)
11991 {
11992 #ifdef CONFIG_X86_32
11993 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11994 USER_RPL;
11995 #else
11996- return user_mode(regs);
11997+ return user_mode_novm(regs);
11998 #endif
11999 }
12000
fe2de317 12001@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
6e9df6a3
MT
12002 #ifdef CONFIG_X86_64
12003 static inline bool user_64bit_mode(struct pt_regs *regs)
12004 {
12005+ unsigned long cs = regs->cs & 0xffff;
12006 #ifndef CONFIG_PARAVIRT
12007 /*
12008 * On non-paravirt systems, this is the only long mode CPL 3
12009 * selector. We do not allow long mode selectors in the LDT.
12010 */
12011- return regs->cs == __USER_CS;
12012+ return cs == __USER_CS;
12013 #else
12014 /* Headers are too twisted for this to go in paravirt.h. */
12015- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
12016+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
12017 #endif
12018 }
12019 #endif
fe2de317 12020diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
4c928ab7 12021index 92f29706..a79cbbb 100644
fe2de317
MT
12022--- a/arch/x86/include/asm/reboot.h
12023+++ b/arch/x86/include/asm/reboot.h
66a7e928
MT
12024@@ -6,19 +6,19 @@
12025 struct pt_regs;
12026
12027 struct machine_ops {
12028- void (*restart)(char *cmd);
12029- void (*halt)(void);
12030- void (*power_off)(void);
12031+ void (* __noreturn restart)(char *cmd);
12032+ void (* __noreturn halt)(void);
12033+ void (* __noreturn power_off)(void);
12034 void (*shutdown)(void);
12035 void (*crash_shutdown)(struct pt_regs *);
12036- void (*emergency_restart)(void);
15a11c5b 12037-};
66a7e928 12038+ void (* __noreturn emergency_restart)(void);
15a11c5b 12039+} __no_const;
66a7e928
MT
12040
12041 extern struct machine_ops machine_ops;
58c5fc13
MT
12042
12043 void native_machine_crash_shutdown(struct pt_regs *regs);
12044 void native_machine_shutdown(void);
66a7e928
MT
12045-void machine_real_restart(unsigned int type);
12046+void machine_real_restart(unsigned int type) __noreturn;
12047 /* These must match dispatch_table in reboot_32.S */
12048 #define MRR_BIOS 0
12049 #define MRR_APM 1
fe2de317 12050diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
4c928ab7 12051index 2dbe4a7..ce1db00 100644
fe2de317
MT
12052--- a/arch/x86/include/asm/rwsem.h
12053+++ b/arch/x86/include/asm/rwsem.h
12054@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
58c5fc13
MT
12055 {
12056 asm volatile("# beginning down_read\n\t"
df50ba0c 12057 LOCK_PREFIX _ASM_INC "(%1)\n\t"
58c5fc13
MT
12058+
12059+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12060+ "jno 0f\n"
df50ba0c 12061+ LOCK_PREFIX _ASM_DEC "(%1)\n"
bc901d79
MT
12062+ "int $4\n0:\n"
12063+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12064+#endif
12065+
6892158b 12066 /* adds 0x00000001 */
bc901d79 12067 " jns 1f\n"
58c5fc13 12068 " call call_rwsem_down_read_failed\n"
fe2de317 12069@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
bc901d79 12070 "1:\n\t"
df50ba0c
MT
12071 " mov %1,%2\n\t"
12072 " add %3,%2\n\t"
58c5fc13
MT
12073+
12074+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12075+ "jno 0f\n"
df50ba0c 12076+ "sub %3,%2\n"
bc901d79
MT
12077+ "int $4\n0:\n"
12078+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12079+#endif
12080+
bc901d79 12081 " jle 2f\n\t"
df50ba0c 12082 LOCK_PREFIX " cmpxchg %2,%0\n\t"
bc901d79 12083 " jnz 1b\n\t"
fe2de317 12084@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
66a7e928 12085 long tmp;
58c5fc13 12086 asm volatile("# beginning down_write\n\t"
df50ba0c 12087 LOCK_PREFIX " xadd %1,(%2)\n\t"
58c5fc13
MT
12088+
12089+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12090+ "jno 0f\n"
df50ba0c 12091+ "mov %1,(%2)\n"
bc901d79
MT
12092+ "int $4\n0:\n"
12093+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12094+#endif
12095+
6892158b 12096 /* adds 0xffff0001, returns the old value */
df50ba0c 12097 " test %1,%1\n\t"
58c5fc13 12098 /* was the count 0 before? */
fe2de317 12099@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
66a7e928 12100 long tmp;
58c5fc13 12101 asm volatile("# beginning __up_read\n\t"
df50ba0c 12102 LOCK_PREFIX " xadd %1,(%2)\n\t"
58c5fc13
MT
12103+
12104+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12105+ "jno 0f\n"
df50ba0c 12106+ "mov %1,(%2)\n"
bc901d79
MT
12107+ "int $4\n0:\n"
12108+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12109+#endif
12110+
12111 /* subtracts 1, returns the old value */
bc901d79 12112 " jns 1f\n\t"
6892158b 12113 " call call_rwsem_wake\n" /* expects old value in %edx */
fe2de317 12114@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
66a7e928 12115 long tmp;
58c5fc13 12116 asm volatile("# beginning __up_write\n\t"
df50ba0c 12117 LOCK_PREFIX " xadd %1,(%2)\n\t"
58c5fc13
MT
12118+
12119+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12120+ "jno 0f\n"
df50ba0c 12121+ "mov %1,(%2)\n"
bc901d79
MT
12122+ "int $4\n0:\n"
12123+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12124+#endif
12125+
6892158b 12126 /* subtracts 0xffff0001, returns the old value */
bc901d79 12127 " jns 1f\n\t"
6892158b 12128 " call call_rwsem_wake\n" /* expects old value in %edx */
fe2de317 12129@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
58c5fc13
MT
12130 {
12131 asm volatile("# beginning __downgrade_write\n\t"
df50ba0c 12132 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
58c5fc13
MT
12133+
12134+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12135+ "jno 0f\n"
df50ba0c 12136+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
bc901d79
MT
12137+ "int $4\n0:\n"
12138+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12139+#endif
12140+
df50ba0c
MT
12141 /*
12142 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
12143 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
fe2de317 12144@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
66a7e928
MT
12145 */
12146 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
58c5fc13 12147 {
df50ba0c
MT
12148- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
12149+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
58c5fc13
MT
12150+
12151+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12152+ "jno 0f\n"
df50ba0c 12153+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
bc901d79
MT
12154+ "int $4\n0:\n"
12155+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12156+#endif
12157+
12158 : "+m" (sem->count)
df50ba0c 12159 : "er" (delta));
58c5fc13 12160 }
4c928ab7
MT
12161@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12162 */
12163 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
58c5fc13 12164 {
4c928ab7
MT
12165- return delta + xadd(&sem->count, delta);
12166+ return delta + xadd_check_overflow(&sem->count, delta);
12167 }
58c5fc13 12168
4c928ab7 12169 #endif /* __KERNEL__ */
fe2de317
MT
12170diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
12171index 5e64171..f58957e 100644
12172--- a/arch/x86/include/asm/segment.h
12173+++ b/arch/x86/include/asm/segment.h
15a11c5b 12174@@ -64,10 +64,15 @@
ae4e228f
MT
12175 * 26 - ESPFIX small SS
12176 * 27 - per-cpu [ offset to per-cpu data area ]
12177 * 28 - stack_canary-20 [ for stack protector ]
12178- * 29 - unused
12179- * 30 - unused
12180+ * 29 - PCI BIOS CS
12181+ * 30 - PCI BIOS DS
12182 * 31 - TSS for double fault handler
12183 */
15a11c5b
MT
12184+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
12185+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
12186+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
12187+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
12188+
ae4e228f 12189 #define GDT_ENTRY_TLS_MIN 6
15a11c5b
MT
12190 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
12191
12192@@ -79,6 +84,8 @@
ae4e228f 12193
bc901d79 12194 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
ae4e228f
MT
12195
12196+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
12197+
bc901d79 12198 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
58c5fc13 12199
bc901d79 12200 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
15a11c5b 12201@@ -104,6 +111,12 @@
58c5fc13
MT
12202 #define __KERNEL_STACK_CANARY 0
12203 #endif
12204
bc901d79 12205+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
58c5fc13
MT
12206+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12207+
bc901d79 12208+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
58c5fc13
MT
12209+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12210+
12211 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12212
12213 /*
15a11c5b 12214@@ -141,7 +154,7 @@
58c5fc13
MT
12215 */
12216
12217 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12218-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12219+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12220
12221
12222 #else
15a11c5b 12223@@ -165,6 +178,8 @@
6e9df6a3 12224 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
ae4e228f
MT
12225 #define __USER32_DS __USER_DS
12226
12227+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12228+
12229 #define GDT_ENTRY_TSS 8 /* needs two entries */
12230 #define GDT_ENTRY_LDT 10 /* needs two entries */
12231 #define GDT_ENTRY_TLS_MIN 12
15a11c5b 12232@@ -185,6 +200,7 @@
ae4e228f
MT
12233 #endif
12234
bc901d79
MT
12235 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
12236+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
12237 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
12238 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
12239 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
fe2de317 12240diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
5e856224 12241index 0434c40..1714bf0 100644
fe2de317
MT
12242--- a/arch/x86/include/asm/smp.h
12243+++ b/arch/x86/include/asm/smp.h
12244@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
66a7e928
MT
12245 /* cpus sharing the last level cache: */
12246 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
6892158b
MT
12247 DECLARE_PER_CPU(u16, cpu_llc_id);
12248-DECLARE_PER_CPU(int, cpu_number);
12249+DECLARE_PER_CPU(unsigned int, cpu_number);
12250
12251 static inline struct cpumask *cpu_sibling_mask(int cpu)
12252 {
15a11c5b
MT
12253@@ -77,7 +77,7 @@ struct smp_ops {
12254
12255 void (*send_call_func_ipi)(const struct cpumask *mask);
12256 void (*send_call_func_single_ipi)(int cpu);
12257-};
12258+} __no_const;
12259
12260 /* Globals due to paravirt */
12261 extern void set_cpu_sibling_map(int cpu);
fe2de317 12262@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
71d190be
MT
12263 extern int safe_smp_processor_id(void);
12264
12265 #elif defined(CONFIG_X86_64_SMP)
12266-#define raw_smp_processor_id() (percpu_read(cpu_number))
12267-
12268-#define stack_smp_processor_id() \
12269-({ \
12270- struct thread_info *ti; \
12271- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12272- ti->cpu; \
12273-})
12274+#define raw_smp_processor_id() (percpu_read(cpu_number))
12275+#define stack_smp_processor_id() raw_smp_processor_id()
12276 #define safe_smp_processor_id() smp_processor_id()
12277
12278 #endif
fe2de317 12279diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
5e856224 12280index a82c2bf..2198f61 100644
fe2de317
MT
12281--- a/arch/x86/include/asm/spinlock.h
12282+++ b/arch/x86/include/asm/spinlock.h
5e856224 12283@@ -175,6 +175,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
ae4e228f 12284 static inline void arch_read_lock(arch_rwlock_t *rw)
58c5fc13 12285 {
6e9df6a3 12286 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
58c5fc13
MT
12287+
12288+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12289+ "jno 0f\n"
6e9df6a3 12290+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
bc901d79
MT
12291+ "int $4\n0:\n"
12292+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12293+#endif
12294+
bc901d79
MT
12295 "jns 1f\n"
12296 "call __read_lock_failed\n\t"
12297 "1:\n"
5e856224 12298@@ -184,6 +192,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
ae4e228f 12299 static inline void arch_write_lock(arch_rwlock_t *rw)
58c5fc13 12300 {
6e9df6a3 12301 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
58c5fc13
MT
12302+
12303+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12304+ "jno 0f\n"
6e9df6a3 12305+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
bc901d79
MT
12306+ "int $4\n0:\n"
12307+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12308+#endif
12309+
bc901d79
MT
12310 "jz 1f\n"
12311 "call __write_lock_failed\n\t"
12312 "1:\n"
5e856224 12313@@ -213,13 +229,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
58c5fc13 12314
ae4e228f 12315 static inline void arch_read_unlock(arch_rwlock_t *rw)
58c5fc13 12316 {
6e9df6a3
MT
12317- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
12318+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
58c5fc13
MT
12319+
12320+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12321+ "jno 0f\n"
6e9df6a3 12322+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
bc901d79
MT
12323+ "int $4\n0:\n"
12324+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12325+#endif
12326+
6e9df6a3 12327 :"+m" (rw->lock) : : "memory");
58c5fc13
MT
12328 }
12329
ae4e228f 12330 static inline void arch_write_unlock(arch_rwlock_t *rw)
58c5fc13 12331 {
6e9df6a3
MT
12332- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
12333+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
58c5fc13
MT
12334+
12335+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12336+ "jno 0f\n"
6e9df6a3 12337+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
58c5fc13 12338+ "int $4\n0:\n"
bc901d79 12339+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12340+#endif
12341+
6e9df6a3 12342 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
58c5fc13
MT
12343 }
12344
fe2de317
MT
12345diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
12346index 1575177..cb23f52 100644
12347--- a/arch/x86/include/asm/stackprotector.h
12348+++ b/arch/x86/include/asm/stackprotector.h
15a11c5b
MT
12349@@ -48,7 +48,7 @@
12350 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12351 */
12352 #define GDT_STACK_CANARY_INIT \
12353- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12354+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12355
12356 /*
12357 * Initialize the stackprotector canary value.
fe2de317 12358@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
bc901d79
MT
12359
12360 static inline void load_stack_canary_segment(void)
12361 {
12362-#ifdef CONFIG_X86_32
12363+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12364 asm volatile ("mov %0, %%gs" : : "r" (0));
12365 #endif
12366 }
fe2de317
MT
12367diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
12368index 70bbe39..4ae2bd4 100644
12369--- a/arch/x86/include/asm/stacktrace.h
12370+++ b/arch/x86/include/asm/stacktrace.h
71d190be
MT
12371@@ -11,28 +11,20 @@
12372
12373 extern int kstack_depth_to_print;
12374
12375-struct thread_info;
12376+struct task_struct;
12377 struct stacktrace_ops;
12378
12379-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
12380- unsigned long *stack,
12381- unsigned long bp,
12382- const struct stacktrace_ops *ops,
12383- void *data,
12384- unsigned long *end,
12385- int *graph);
fe2de317
MT
12386+typedef unsigned long walk_stack_t(struct task_struct *task,
12387+ void *stack_start,
12388+ unsigned long *stack,
12389+ unsigned long bp,
12390+ const struct stacktrace_ops *ops,
12391+ void *data,
12392+ unsigned long *end,
12393+ int *graph);
12394
71d190be
MT
12395-extern unsigned long
12396-print_context_stack(struct thread_info *tinfo,
12397- unsigned long *stack, unsigned long bp,
12398- const struct stacktrace_ops *ops, void *data,
12399- unsigned long *end, int *graph);
12400-
12401-extern unsigned long
12402-print_context_stack_bp(struct thread_info *tinfo,
12403- unsigned long *stack, unsigned long bp,
12404- const struct stacktrace_ops *ops, void *data,
12405- unsigned long *end, int *graph);
71d190be
MT
12406+extern walk_stack_t print_context_stack;
12407+extern walk_stack_t print_context_stack_bp;
12408
12409 /* Generic stack tracer with callbacks */
12410
15a11c5b 12411@@ -40,7 +32,7 @@ struct stacktrace_ops {
71d190be
MT
12412 void (*address)(void *data, unsigned long address, int reliable);
12413 /* On negative return stop dumping */
12414 int (*stack)(void *data, char *name);
12415- walk_stack_t walk_stack;
12416+ walk_stack_t *walk_stack;
12417 };
12418
12419 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
fe2de317
MT
12420diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
12421index cb23852..2dde194 100644
12422--- a/arch/x86/include/asm/sys_ia32.h
12423+++ b/arch/x86/include/asm/sys_ia32.h
12424@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
6e9df6a3
MT
12425 compat_sigset_t __user *, unsigned int);
12426 asmlinkage long sys32_alarm(unsigned int);
12427
12428-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
12429+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
12430 asmlinkage long sys32_sysfs(int, u32, u32);
12431
12432 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
4c928ab7
MT
12433diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
12434index f1d8b44..a4de8b7 100644
12435--- a/arch/x86/include/asm/syscalls.h
12436+++ b/arch/x86/include/asm/syscalls.h
12437@@ -30,7 +30,7 @@ long sys_clone(unsigned long, unsigned long, void __user *,
12438 void __user *, struct pt_regs *);
12439
12440 /* kernel/ldt.c */
12441-asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
12442+asmlinkage int sys_modify_ldt(int, void __user *, unsigned long) __size_overflow(3);
12443
12444 /* kernel/signal.c */
12445 long sys_rt_sigreturn(struct pt_regs *);
fe2de317 12446diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
4c928ab7 12447index 2d2f01c..f985723 100644
fe2de317
MT
12448--- a/arch/x86/include/asm/system.h
12449+++ b/arch/x86/include/asm/system.h
66a7e928 12450@@ -129,7 +129,7 @@ do { \
71d190be
MT
12451 "call __switch_to\n\t" \
12452 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12453 __switch_canary \
12454- "movq %P[thread_info](%%rsi),%%r8\n\t" \
12455+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12456 "movq %%rax,%%rdi\n\t" \
12457 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12458 "jnz ret_from_fork\n\t" \
66a7e928 12459@@ -140,7 +140,7 @@ do { \
71d190be
MT
12460 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12461 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12462 [_tif_fork] "i" (_TIF_FORK), \
12463- [thread_info] "i" (offsetof(struct task_struct, stack)), \
12464+ [thread_info] "m" (current_tinfo), \
12465 [current_task] "m" (current_task) \
12466 __switch_canary_iparam \
12467 : "memory", "cc" __EXTRA_CLOBBER)
fe2de317 12468@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
58c5fc13
MT
12469 {
12470 unsigned long __limit;
12471 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12472- return __limit + 1;
12473+ return __limit;
12474 }
12475
12476 static inline void native_clts(void)
4c928ab7 12477@@ -397,13 +397,13 @@ void enable_hlt(void);
58c5fc13
MT
12478
12479 void cpu_idle_wait(void);
12480
12481-extern unsigned long arch_align_stack(unsigned long sp);
12482+#define arch_align_stack(x) ((x) & ~0xfUL)
12483 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
12484
12485 void default_idle(void);
4c928ab7 12486 bool set_pm_idle_to_default(void);
66a7e928
MT
12487
12488-void stop_this_cpu(void *dummy);
12489+void stop_this_cpu(void *dummy) __noreturn;
12490
12491 /*
12492 * Force strict CPU ordering.
fe2de317 12493diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
5e856224 12494index cfd8144..664ac89 100644
fe2de317
MT
12495--- a/arch/x86/include/asm/thread_info.h
12496+++ b/arch/x86/include/asm/thread_info.h
71d190be
MT
12497@@ -10,6 +10,7 @@
12498 #include <linux/compiler.h>
12499 #include <asm/page.h>
12500 #include <asm/types.h>
12501+#include <asm/percpu.h>
12502
12503 /*
12504 * low level task data that entry.S needs immediate access to
12505@@ -24,7 +25,6 @@ struct exec_domain;
6e9df6a3 12506 #include <linux/atomic.h>
71d190be
MT
12507
12508 struct thread_info {
12509- struct task_struct *task; /* main task structure */
12510 struct exec_domain *exec_domain; /* execution domain */
12511 __u32 flags; /* low level flags */
12512 __u32 status; /* thread synchronous flags */
5e856224 12513@@ -34,19 +34,13 @@ struct thread_info {
71d190be
MT
12514 mm_segment_t addr_limit;
12515 struct restart_block restart_block;
12516 void __user *sysenter_return;
12517-#ifdef CONFIG_X86_32
12518- unsigned long previous_esp; /* ESP of the previous stack in
12519- case of nested (IRQ) stacks
12520- */
12521- __u8 supervisor_stack[0];
12522-#endif
66a7e928 12523+ unsigned long lowest_stack;
5e856224
MT
12524 unsigned int sig_on_uaccess_error:1;
12525 unsigned int uaccess_err:1; /* uaccess failed */
71d190be
MT
12526 };
12527
12528-#define INIT_THREAD_INFO(tsk) \
12529+#define INIT_THREAD_INFO \
12530 { \
12531- .task = &tsk, \
12532 .exec_domain = &default_exec_domain, \
12533 .flags = 0, \
12534 .cpu = 0, \
5e856224 12535@@ -57,7 +51,7 @@ struct thread_info {
71d190be
MT
12536 }, \
12537 }
12538
12539-#define init_thread_info (init_thread_union.thread_info)
66a7e928 12540+#define init_thread_info (init_thread_union.stack)
71d190be
MT
12541 #define init_stack (init_thread_union.stack)
12542
12543 #else /* !__ASSEMBLY__ */
5e856224
MT
12544@@ -95,6 +89,7 @@ struct thread_info {
12545 #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
12546 #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
12547 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
12548+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
12549
12550 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
12551 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
12552@@ -116,16 +111,17 @@ struct thread_info {
12553 #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
12554 #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
12555 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
12556+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
12557
12558 /* work to do in syscall_trace_enter() */
12559 #define _TIF_WORK_SYSCALL_ENTRY \
12560 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
12561- _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
12562+ _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
12563
12564 /* work to do in syscall_trace_leave() */
12565 #define _TIF_WORK_SYSCALL_EXIT \
12566 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
12567- _TIF_SYSCALL_TRACEPOINT)
12568+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
12569
12570 /* work to do on interrupt/exception return */
12571 #define _TIF_WORK_MASK \
12572@@ -135,7 +131,8 @@ struct thread_info {
12573
12574 /* work to do on any return to user space */
12575 #define _TIF_ALLWORK_MASK \
12576- ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
12577+ ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
12578+ _TIF_GRSEC_SETXID)
12579
12580 /* Only used for 64 bit */
12581 #define _TIF_DO_NOTIFY_MASK \
12582@@ -169,45 +166,40 @@ struct thread_info {
66a7e928
MT
12583 ret; \
12584 })
71d190be 12585
fe2de317
MT
12586-#ifdef CONFIG_X86_32
12587-
12588-#define STACK_WARN (THREAD_SIZE/8)
12589-/*
12590- * macros/functions for gaining access to the thread information structure
12591- *
12592- * preempt_count needs to be 1 initially, until the scheduler is functional.
12593- */
12594-#ifndef __ASSEMBLY__
12595-
12596-
12597-/* how to get the current stack pointer from C */
12598-register unsigned long current_stack_pointer asm("esp") __used;
71d190be 12599-
71d190be
MT
12600-/* how to get the thread information struct from C */
12601-static inline struct thread_info *current_thread_info(void)
12602-{
12603- return (struct thread_info *)
12604- (current_stack_pointer & ~(THREAD_SIZE - 1));
12605-}
12606-
12607-#else /* !__ASSEMBLY__ */
12608-
fe2de317
MT
12609+#ifdef __ASSEMBLY__
12610 /* how to get the thread information struct from ASM */
12611 #define GET_THREAD_INFO(reg) \
71d190be
MT
12612- movl $-THREAD_SIZE, reg; \
12613- andl %esp, reg
fe2de317
MT
12614+ mov PER_CPU_VAR(current_tinfo), reg
12615
12616 /* use this one if reg already contains %esp */
71d190be
MT
12617-#define GET_THREAD_INFO_WITH_ESP(reg) \
12618- andl $-THREAD_SIZE, reg
fe2de317
MT
12619+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12620+#else
12621+/* how to get the thread information struct from C */
12622+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12623+
12624+static __always_inline struct thread_info *current_thread_info(void)
12625+{
12626+ return percpu_read_stable(current_tinfo);
12627+}
12628+#endif
12629+
12630+#ifdef CONFIG_X86_32
12631+
12632+#define STACK_WARN (THREAD_SIZE/8)
12633+/*
12634+ * macros/functions for gaining access to the thread information structure
12635+ *
12636+ * preempt_count needs to be 1 initially, until the scheduler is functional.
12637+ */
12638+#ifndef __ASSEMBLY__
12639+
12640+/* how to get the current stack pointer from C */
12641+register unsigned long current_stack_pointer asm("esp") __used;
12642
71d190be
MT
12643 #endif
12644
12645 #else /* X86_32 */
12646
12647-#include <asm/percpu.h>
12648-#define KERNEL_STACK_OFFSET (5*8)
12649-
12650 /*
12651 * macros/functions for gaining access to the thread information structure
12652 * preempt_count needs to be 1 initially, until the scheduler is functional.
5e856224 12653@@ -215,27 +207,8 @@ static inline struct thread_info *current_thread_info(void)
71d190be
MT
12654 #ifndef __ASSEMBLY__
12655 DECLARE_PER_CPU(unsigned long, kernel_stack);
12656
12657-static inline struct thread_info *current_thread_info(void)
12658-{
12659- struct thread_info *ti;
12660- ti = (void *)(percpu_read_stable(kernel_stack) +
12661- KERNEL_STACK_OFFSET - THREAD_SIZE);
12662- return ti;
12663-}
12664-
12665-#else /* !__ASSEMBLY__ */
12666-
12667-/* how to get the thread information struct from ASM */
12668-#define GET_THREAD_INFO(reg) \
12669- movq PER_CPU_VAR(kernel_stack),reg ; \
12670- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12671-
5e856224
MT
12672-/*
12673- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
12674- * a certain register (to be used in assembler memory operands).
12675- */
12676-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
12677-
66a7e928
MT
12678+/* how to get the current stack pointer from C */
12679+register unsigned long current_stack_pointer asm("rsp") __used;
71d190be
MT
12680 #endif
12681
12682 #endif /* !X86_32 */
5e856224 12683@@ -269,5 +242,16 @@ extern void arch_task_cache_init(void);
71d190be
MT
12684 extern void free_thread_info(struct thread_info *ti);
12685 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12686 #define arch_task_cache_init arch_task_cache_init
12687+
12688+#define __HAVE_THREAD_FUNCTIONS
12689+#define task_thread_info(task) (&(task)->tinfo)
12690+#define task_stack_page(task) ((task)->stack)
12691+#define setup_thread_stack(p, org) do {} while (0)
12692+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12693+
12694+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
66a7e928 12695+extern struct task_struct *alloc_task_struct_node(int node);
71d190be
MT
12696+extern void free_task_struct(struct task_struct *);
12697+
12698 #endif
12699 #endif /* _ASM_X86_THREAD_INFO_H */
fe2de317 12700diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
5e856224 12701index 8be5f54..7ae826d 100644
fe2de317
MT
12702--- a/arch/x86/include/asm/uaccess.h
12703+++ b/arch/x86/include/asm/uaccess.h
12704@@ -7,12 +7,15 @@
12705 #include <linux/compiler.h>
12706 #include <linux/thread_info.h>
12707 #include <linux/string.h>
12708+#include <linux/sched.h>
12709 #include <asm/asm.h>
12710 #include <asm/page.h>
12711
12712 #define VERIFY_READ 0
12713 #define VERIFY_WRITE 1
12714
12715+extern void check_object_size(const void *ptr, unsigned long n, bool to);
12716+
12717 /*
12718 * The fs value determines whether argument validity checking should be
12719 * performed or not. If get_fs() == USER_DS, checking is performed, with
12720@@ -28,7 +31,12 @@
12721
12722 #define get_ds() (KERNEL_DS)
12723 #define get_fs() (current_thread_info()->addr_limit)
12724+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12725+void __set_fs(mm_segment_t x);
12726+void set_fs(mm_segment_t x);
12727+#else
12728 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12729+#endif
12730
12731 #define segment_eq(a, b) ((a).seg == (b).seg)
12732
12733@@ -76,7 +84,33 @@
12734 * checks that the pointer is in the user space range - after calling
12735 * this function, memory access functions may still return -EFAULT.
12736 */
12737-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12738+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12739+#define access_ok(type, addr, size) \
12740+({ \
12741+ long __size = size; \
12742+ unsigned long __addr = (unsigned long)addr; \
12743+ unsigned long __addr_ao = __addr & PAGE_MASK; \
12744+ unsigned long __end_ao = __addr + __size - 1; \
12745+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12746+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12747+ while(__addr_ao <= __end_ao) { \
12748+ char __c_ao; \
12749+ __addr_ao += PAGE_SIZE; \
12750+ if (__size > PAGE_SIZE) \
12751+ cond_resched(); \
12752+ if (__get_user(__c_ao, (char __user *)__addr)) \
12753+ break; \
12754+ if (type != VERIFY_WRITE) { \
12755+ __addr = __addr_ao; \
12756+ continue; \
12757+ } \
12758+ if (__put_user(__c_ao, (char __user *)__addr)) \
12759+ break; \
12760+ __addr = __addr_ao; \
12761+ } \
12762+ } \
12763+ __ret_ao; \
12764+})
12765
12766 /*
12767 * The exception table consists of pairs of addresses: the first is the
12768@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
12769 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12770 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12771
12772-
12773+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12774+#define __copyuser_seg "gs;"
12775+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12776+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12777+#else
12778+#define __copyuser_seg
12779+#define __COPYUSER_SET_ES
12780+#define __COPYUSER_RESTORE_ES
12781+#endif
12782
12783 #ifdef CONFIG_X86_32
12784 #define __put_user_asm_u64(x, addr, err, errret) \
12785- asm volatile("1: movl %%eax,0(%2)\n" \
12786- "2: movl %%edx,4(%2)\n" \
12787+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12788+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12789 "3:\n" \
12790 ".section .fixup,\"ax\"\n" \
12791 "4: movl %3,%0\n" \
12792@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
12793 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12794
12795 #define __put_user_asm_ex_u64(x, addr) \
12796- asm volatile("1: movl %%eax,0(%1)\n" \
12797- "2: movl %%edx,4(%1)\n" \
12798+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12799+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12800 "3:\n" \
12801 _ASM_EXTABLE(1b, 2b - 1b) \
12802 _ASM_EXTABLE(2b, 3b - 2b) \
12803@@ -252,7 +294,7 @@ extern void __put_user_8(void);
12804 __typeof__(*(ptr)) __pu_val; \
12805 __chk_user_ptr(ptr); \
12806 might_fault(); \
12807- __pu_val = x; \
12808+ __pu_val = (x); \
12809 switch (sizeof(*(ptr))) { \
12810 case 1: \
12811 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12812@@ -373,7 +415,7 @@ do { \
12813 } while (0)
12814
12815 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12816- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12817+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12818 "2:\n" \
12819 ".section .fixup,\"ax\"\n" \
12820 "3: mov %3,%0\n" \
12821@@ -381,7 +423,7 @@ do { \
12822 " jmp 2b\n" \
12823 ".previous\n" \
12824 _ASM_EXTABLE(1b, 3b) \
12825- : "=r" (err), ltype(x) \
12826+ : "=r" (err), ltype (x) \
12827 : "m" (__m(addr)), "i" (errret), "0" (err))
12828
12829 #define __get_user_size_ex(x, ptr, size) \
12830@@ -406,7 +448,7 @@ do { \
12831 } while (0)
12832
12833 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12834- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12835+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12836 "2:\n" \
12837 _ASM_EXTABLE(1b, 2b - 1b) \
12838 : ltype(x) : "m" (__m(addr)))
12839@@ -423,13 +465,24 @@ do { \
12840 int __gu_err; \
12841 unsigned long __gu_val; \
12842 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12843- (x) = (__force __typeof__(*(ptr)))__gu_val; \
12844+ (x) = (__typeof__(*(ptr)))__gu_val; \
12845 __gu_err; \
12846 })
12847
12848 /* FIXME: this hack is definitely wrong -AK */
12849 struct __large_struct { unsigned long buf[100]; };
12850-#define __m(x) (*(struct __large_struct __user *)(x))
12851+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12852+#define ____m(x) \
12853+({ \
12854+ unsigned long ____x = (unsigned long)(x); \
12855+ if (____x < PAX_USER_SHADOW_BASE) \
12856+ ____x += PAX_USER_SHADOW_BASE; \
12857+ (void __user *)____x; \
12858+})
12859+#else
12860+#define ____m(x) (x)
12861+#endif
12862+#define __m(x) (*(struct __large_struct __user *)____m(x))
12863
12864 /*
12865 * Tell gcc we read from memory instead of writing: this is because
12866@@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
12867 * aliasing issues.
12868 */
12869 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12870- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12871+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12872 "2:\n" \
12873 ".section .fixup,\"ax\"\n" \
12874 "3: mov %3,%0\n" \
12875@@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
12876 ".previous\n" \
12877 _ASM_EXTABLE(1b, 3b) \
12878 : "=r"(err) \
12879- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12880+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12881
12882 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12883- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12884+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12885 "2:\n" \
12886 _ASM_EXTABLE(1b, 2b - 1b) \
12887 : : ltype(x), "m" (__m(addr)))
12888@@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
12889 * On error, the variable @x is set to zero.
12890 */
12891
12892+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12893+#define __get_user(x, ptr) get_user((x), (ptr))
12894+#else
12895 #define __get_user(x, ptr) \
12896 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12897+#endif
12898
12899 /**
12900 * __put_user: - Write a simple value into user space, with less checking.
12901@@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
12902 * Returns zero on success, or -EFAULT on error.
12903 */
12904
12905+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12906+#define __put_user(x, ptr) put_user((x), (ptr))
12907+#else
12908 #define __put_user(x, ptr) \
12909 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12910+#endif
12911
12912 #define __get_user_unaligned __get_user
12913 #define __put_user_unaligned __put_user
12914@@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
12915 #define get_user_ex(x, ptr) do { \
12916 unsigned long __gue_val; \
12917 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12918- (x) = (__force __typeof__(*(ptr)))__gue_val; \
12919+ (x) = (__typeof__(*(ptr)))__gue_val; \
12920 } while (0)
12921
12922 #ifdef CONFIG_X86_WP_WORKS_OK
12923diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
4c928ab7 12924index 566e803..7183d0b 100644
fe2de317
MT
12925--- a/arch/x86/include/asm/uaccess_32.h
12926+++ b/arch/x86/include/asm/uaccess_32.h
4c928ab7
MT
12927@@ -11,15 +11,15 @@
12928 #include <asm/page.h>
12929
12930 unsigned long __must_check __copy_to_user_ll
12931- (void __user *to, const void *from, unsigned long n);
12932+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
12933 unsigned long __must_check __copy_from_user_ll
12934- (void *to, const void __user *from, unsigned long n);
12935+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12936 unsigned long __must_check __copy_from_user_ll_nozero
12937- (void *to, const void __user *from, unsigned long n);
12938+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12939 unsigned long __must_check __copy_from_user_ll_nocache
12940- (void *to, const void __user *from, unsigned long n);
12941+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12942 unsigned long __must_check __copy_from_user_ll_nocache_nozero
12943- (void *to, const void __user *from, unsigned long n);
12944+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12945
12946 /**
12947 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
12948@@ -41,8 +41,13 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12949 */
12950
58c5fc13 12951 static __always_inline unsigned long __must_check
4c928ab7
MT
12952+__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) __size_overflow(3);
12953+static __always_inline unsigned long __must_check
58c5fc13
MT
12954 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12955 {
12956+ if ((long)n < 0)
12957+ return n;
12958+
12959 if (__builtin_constant_p(n)) {
12960 unsigned long ret;
12961
fe2de317 12962@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
58c5fc13
MT
12963 return ret;
12964 }
12965 }
12966+ if (!__builtin_constant_p(n))
12967+ check_object_size(from, n, true);
12968 return __copy_to_user_ll(to, from, n);
12969 }
12970
4c928ab7
MT
12971@@ -79,15 +86,23 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12972 * On success, this will be zero.
12973 */
12974 static __always_inline unsigned long __must_check
12975+__copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
12976+static __always_inline unsigned long __must_check
66a7e928
MT
12977 __copy_to_user(void __user *to, const void *from, unsigned long n)
12978 {
12979 might_fault();
12980+
12981 return __copy_to_user_inatomic(to, from, n);
12982 }
12983
58c5fc13 12984 static __always_inline unsigned long
4c928ab7
MT
12985+__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) __size_overflow(3);
12986+static __always_inline unsigned long
58c5fc13
MT
12987 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12988 {
12989+ if ((long)n < 0)
12990+ return n;
12991+
12992 /* Avoid zeroing the tail if the copy fails..
12993 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12994 * but as the zeroing behaviour is only significant when n is not
4c928ab7
MT
12995@@ -134,9 +149,15 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12996 * for explanation of why this is needed.
12997 */
12998 static __always_inline unsigned long
12999+__copy_from_user(void *to, const void __user *from, unsigned long n) __size_overflow(3);
13000+static __always_inline unsigned long
58c5fc13
MT
13001 __copy_from_user(void *to, const void __user *from, unsigned long n)
13002 {
13003 might_fault();
13004+
13005+ if ((long)n < 0)
13006+ return n;
13007+
13008 if (__builtin_constant_p(n)) {
13009 unsigned long ret;
13010
4c928ab7 13011@@ -152,13 +173,21 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
58c5fc13
MT
13012 return ret;
13013 }
13014 }
13015+ if (!__builtin_constant_p(n))
13016+ check_object_size(to, n, false);
13017 return __copy_from_user_ll(to, from, n);
13018 }
13019
4c928ab7
MT
13020 static __always_inline unsigned long __copy_from_user_nocache(void *to,
13021+ const void __user *from, unsigned long n) __size_overflow(3);
13022+static __always_inline unsigned long __copy_from_user_nocache(void *to,
58c5fc13
MT
13023 const void __user *from, unsigned long n)
13024 {
13025 might_fault();
13026+
13027+ if ((long)n < 0)
13028+ return n;
13029+
13030 if (__builtin_constant_p(n)) {
13031 unsigned long ret;
13032
4c928ab7
MT
13033@@ -179,17 +208,24 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
13034
13035 static __always_inline unsigned long
58c5fc13 13036 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
4c928ab7
MT
13037+ unsigned long n) __size_overflow(3);
13038+static __always_inline unsigned long
13039+__copy_from_user_inatomic_nocache(void *to, const void __user *from,
58c5fc13
MT
13040 unsigned long n)
13041 {
13042- return __copy_from_user_ll_nocache_nozero(to, from, n);
13043+ if ((long)n < 0)
13044+ return n;
fe2de317
MT
13045+
13046+ return __copy_from_user_ll_nocache_nozero(to, from, n);
13047 }
ae4e228f
MT
13048
13049-unsigned long __must_check copy_to_user(void __user *to,
13050- const void *from, unsigned long n);
13051-unsigned long __must_check _copy_from_user(void *to,
13052- const void __user *from,
13053- unsigned long n);
fe2de317 13054-
ae4e228f
MT
13055+extern void copy_to_user_overflow(void)
13056+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13057+ __compiletime_error("copy_to_user() buffer size is not provably correct")
13058+#else
13059+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
13060+#endif
13061+;
13062
13063 extern void copy_from_user_overflow(void)
13064 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
4c928ab7 13065@@ -199,17 +235,65 @@ extern void copy_from_user_overflow(void)
ae4e228f
MT
13066 #endif
13067 ;
13068
13069-static inline unsigned long __must_check copy_from_user(void *to,
13070- const void __user *from,
13071- unsigned long n)
58c5fc13
MT
13072+/**
13073+ * copy_to_user: - Copy a block of data into user space.
13074+ * @to: Destination address, in user space.
13075+ * @from: Source address, in kernel space.
13076+ * @n: Number of bytes to copy.
13077+ *
13078+ * Context: User context only. This function may sleep.
13079+ *
13080+ * Copy data from kernel space to user space.
13081+ *
13082+ * Returns number of bytes that could not be copied.
13083+ * On success, this will be zero.
13084+ */
ae4e228f 13085+static inline unsigned long __must_check
4c928ab7
MT
13086+copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
13087+static inline unsigned long __must_check
58c5fc13
MT
13088+copy_to_user(void __user *to, const void *from, unsigned long n)
13089+{
ae4e228f
MT
13090+ int sz = __compiletime_object_size(from);
13091+
13092+ if (unlikely(sz != -1 && sz < n))
13093+ copy_to_user_overflow();
13094+ else if (access_ok(VERIFY_WRITE, to, n))
58c5fc13
MT
13095+ n = __copy_to_user(to, from, n);
13096+ return n;
13097+}
13098+
13099+/**
13100+ * copy_from_user: - Copy a block of data from user space.
13101+ * @to: Destination address, in kernel space.
13102+ * @from: Source address, in user space.
13103+ * @n: Number of bytes to copy.
13104+ *
13105+ * Context: User context only. This function may sleep.
13106+ *
13107+ * Copy data from user space to kernel space.
13108+ *
13109+ * Returns number of bytes that could not be copied.
13110+ * On success, this will be zero.
13111+ *
13112+ * If some data could not be copied, this function will pad the copied
13113+ * data to the requested size using zero bytes.
13114+ */
ae4e228f 13115+static inline unsigned long __must_check
4c928ab7
MT
13116+copy_from_user(void *to, const void __user *from, unsigned long n) __size_overflow(3);
13117+static inline unsigned long __must_check
58c5fc13 13118+copy_from_user(void *to, const void __user *from, unsigned long n)
ae4e228f
MT
13119 {
13120 int sz = __compiletime_object_size(to);
13121
13122- if (likely(sz == -1 || sz >= n))
13123- n = _copy_from_user(to, from, n);
13124- else
13125+ if (unlikely(sz != -1 && sz < n))
13126 copy_from_user_overflow();
13127-
13128+ else if (access_ok(VERIFY_READ, from, n))
58c5fc13
MT
13129+ n = __copy_from_user(to, from, n);
13130+ else if ((long)n > 0) {
13131+ if (!__builtin_constant_p(n))
13132+ check_object_size(to, n, false);
13133+ memset(to, 0, n);
13134+ }
ae4e228f 13135 return n;
58c5fc13
MT
13136 }
13137
4c928ab7
MT
13138@@ -235,7 +319,7 @@ long __must_check __strncpy_from_user(char *dst,
13139 #define strlen_user(str) strnlen_user(str, LONG_MAX)
13140
13141 long strnlen_user(const char __user *str, long n);
13142-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
13143-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
13144+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13145+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13146
13147 #endif /* _ASM_X86_UACCESS_32_H */
fe2de317 13148diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
4c928ab7 13149index 1c66d30..e294b5f 100644
fe2de317
MT
13150--- a/arch/x86/include/asm/uaccess_64.h
13151+++ b/arch/x86/include/asm/uaccess_64.h
15a11c5b 13152@@ -10,6 +10,9 @@
df50ba0c
MT
13153 #include <asm/alternative.h>
13154 #include <asm/cpufeature.h>
58c5fc13 13155 #include <asm/page.h>
df50ba0c
MT
13156+#include <asm/pgtable.h>
13157+
58c5fc13 13158+#define set_fs(x) (current_thread_info()->addr_limit = (x))
df50ba0c 13159
58c5fc13
MT
13160 /*
13161 * Copy To/From Userspace
4c928ab7 13162@@ -17,12 +20,14 @@
fe2de317
MT
13163
13164 /* Handles exceptions in both to and from, but doesn't do access_ok */
13165 __must_check unsigned long
13166-copy_user_generic_string(void *to, const void *from, unsigned len);
4c928ab7 13167+copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
fe2de317
MT
13168 __must_check unsigned long
13169-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
4c928ab7 13170+copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
fe2de317
MT
13171
13172 static __always_inline __must_check unsigned long
13173-copy_user_generic(void *to, const void *from, unsigned len)
4c928ab7
MT
13174+copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
13175+static __always_inline __must_check unsigned long
fe2de317
MT
13176+copy_user_generic(void *to, const void *from, unsigned long len)
13177 {
13178 unsigned ret;
13179
4c928ab7
MT
13180@@ -32,142 +37,237 @@ copy_user_generic(void *to, const void *from, unsigned len)
13181 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
13182 "=d" (len)),
13183 "1" (to), "2" (from), "3" (len)
13184- : "memory", "rcx", "r8", "r9", "r10", "r11");
13185+ : "memory", "rcx", "r8", "r9", "r11");
df50ba0c
MT
13186 return ret;
13187 }
13188
ae4e228f 13189+static __always_inline __must_check unsigned long
4c928ab7 13190+__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
ae4e228f 13191+static __always_inline __must_check unsigned long
4c928ab7 13192+__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
ae4e228f 13193 __must_check unsigned long
fe2de317
MT
13194-_copy_to_user(void __user *to, const void *from, unsigned len);
13195-__must_check unsigned long
13196-_copy_from_user(void *to, const void __user *from, unsigned len);
13197-__must_check unsigned long
13198-copy_in_user(void __user *to, const void __user *from, unsigned len);
4c928ab7 13199+copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
58c5fc13 13200
ae4e228f
MT
13201 static inline unsigned long __must_check copy_from_user(void *to,
13202 const void __user *from,
4c928ab7
MT
13203+ unsigned long n) __size_overflow(3);
13204+static inline unsigned long __must_check copy_from_user(void *to,
13205+ const void __user *from,
13206 unsigned long n)
ae4e228f
MT
13207 {
13208- int sz = __compiletime_object_size(to);
13209-
13210 might_fault();
13211- if (likely(sz == -1 || sz >= n))
13212- n = _copy_from_user(to, from, n);
13213-#ifdef CONFIG_DEBUG_VM
13214- else
13215- WARN(1, "Buffer overflow detected!\n");
13216-#endif
13217+
13218+ if (access_ok(VERIFY_READ, from, n))
13219+ n = __copy_from_user(to, from, n);
fe2de317 13220+ else if (n < INT_MAX) {
ae4e228f
MT
13221+ if (!__builtin_constant_p(n))
13222+ check_object_size(to, n, false);
13223+ memset(to, 0, n);
13224+ }
13225 return n;
13226 }
13227
fe2de317
MT
13228 static __always_inline __must_check
13229-int copy_to_user(void __user *dst, const void *src, unsigned size)
4c928ab7
MT
13230+int copy_to_user(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
13231+static __always_inline __must_check
fe2de317 13232+int copy_to_user(void __user *dst, const void *src, unsigned long size)
ae4e228f
MT
13233 {
13234 might_fault();
13235
13236- return _copy_to_user(dst, src, size);
13237+ if (access_ok(VERIFY_WRITE, dst, size))
13238+ size = __copy_to_user(dst, src, size);
13239+ return size;
13240 }
13241
58c5fc13
MT
13242 static __always_inline __must_check
13243-int __copy_from_user(void *dst, const void __user *src, unsigned size)
4c928ab7
MT
13244+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
13245+static __always_inline __must_check
fe2de317 13246+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
58c5fc13
MT
13247 {
13248- int ret = 0;
ae4e228f 13249+ int sz = __compiletime_object_size(dst);
58c5fc13
MT
13250+ unsigned ret = 0;
13251
13252 might_fault();
13253- if (!__builtin_constant_p(size))
bc901d79 13254- return copy_user_generic(dst, (__force void *)src, size);
58c5fc13 13255+
fe2de317 13256+ if (size > INT_MAX)
58c5fc13
MT
13257+ return size;
13258+
bc901d79
MT
13259+#ifdef CONFIG_PAX_MEMORY_UDEREF
13260+ if (!__access_ok(VERIFY_READ, src, size))
13261+ return size;
13262+#endif
13263+
ae4e228f
MT
13264+ if (unlikely(sz != -1 && sz < size)) {
13265+#ifdef CONFIG_DEBUG_VM
13266+ WARN(1, "Buffer overflow detected!\n");
13267+#endif
13268+ return size;
13269+ }
13270+
58c5fc13
MT
13271+ if (!__builtin_constant_p(size)) {
13272+ check_object_size(dst, size, false);
8308f9c9
MT
13273+
13274+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
13275+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13276+ src += PAX_USER_SHADOW_BASE;
8308f9c9
MT
13277+#endif
13278+
6e9df6a3 13279+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
58c5fc13
MT
13280+ }
13281 switch (size) {
bc901d79
MT
13282- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
13283+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
58c5fc13 13284 ret, "b", "b", "=q", 1);
bc901d79
MT
13285 return ret;
13286- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
13287+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
13288 ret, "w", "w", "=r", 2);
13289 return ret;
13290- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
13291+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
13292 ret, "l", "k", "=r", 4);
13293 return ret;
13294- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
13295+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13296 ret, "q", "", "=r", 8);
13297 return ret;
13298 case 10:
13299- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13300+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13301 ret, "q", "", "=r", 10);
13302 if (unlikely(ret))
13303 return ret;
13304 __get_user_asm(*(u16 *)(8 + (char *)dst),
13305- (u16 __user *)(8 + (char __user *)src),
13306+ (const u16 __user *)(8 + (const char __user *)src),
13307 ret, "w", "w", "=r", 2);
13308 return ret;
13309 case 16:
13310- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13311+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13312 ret, "q", "", "=r", 16);
13313 if (unlikely(ret))
13314 return ret;
13315 __get_user_asm(*(u64 *)(8 + (char *)dst),
13316- (u64 __user *)(8 + (char __user *)src),
13317+ (const u64 __user *)(8 + (const char __user *)src),
df50ba0c
MT
13318 ret, "q", "", "=r", 8);
13319 return ret;
13320 default:
bc901d79 13321- return copy_user_generic(dst, (__force void *)src, size);
8308f9c9
MT
13322+
13323+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
13324+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13325+ src += PAX_USER_SHADOW_BASE;
8308f9c9
MT
13326+#endif
13327+
6e9df6a3 13328+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
df50ba0c 13329 }
58c5fc13
MT
13330 }
13331
13332 static __always_inline __must_check
13333-int __copy_to_user(void __user *dst, const void *src, unsigned size)
4c928ab7
MT
13334+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
13335+static __always_inline __must_check
fe2de317 13336+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
58c5fc13
MT
13337 {
13338- int ret = 0;
ae4e228f 13339+ int sz = __compiletime_object_size(src);
58c5fc13
MT
13340+ unsigned ret = 0;
13341
13342 might_fault();
13343- if (!__builtin_constant_p(size))
6e9df6a3 13344- return copy_user_generic((__force void *)dst, src, size);
58c5fc13 13345+
fe2de317 13346+ if (size > INT_MAX)
58c5fc13
MT
13347+ return size;
13348+
bc901d79
MT
13349+#ifdef CONFIG_PAX_MEMORY_UDEREF
13350+ if (!__access_ok(VERIFY_WRITE, dst, size))
13351+ return size;
13352+#endif
13353+
ae4e228f
MT
13354+ if (unlikely(sz != -1 && sz < size)) {
13355+#ifdef CONFIG_DEBUG_VM
13356+ WARN(1, "Buffer overflow detected!\n");
13357+#endif
13358+ return size;
13359+ }
13360+
58c5fc13
MT
13361+ if (!__builtin_constant_p(size)) {
13362+ check_object_size(src, size, true);
8308f9c9
MT
13363+
13364+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
13365+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13366+ dst += PAX_USER_SHADOW_BASE;
8308f9c9
MT
13367+#endif
13368+
6e9df6a3 13369+ return copy_user_generic((__force_kernel void *)dst, src, size);
58c5fc13
MT
13370+ }
13371 switch (size) {
bc901d79
MT
13372- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13373+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
58c5fc13 13374 ret, "b", "b", "iq", 1);
bc901d79
MT
13375 return ret;
13376- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13377+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13378 ret, "w", "w", "ir", 2);
13379 return ret;
13380- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13381+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13382 ret, "l", "k", "ir", 4);
13383 return ret;
13384- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13385+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13386 ret, "q", "", "er", 8);
13387 return ret;
13388 case 10:
13389- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13390+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13391 ret, "q", "", "er", 10);
13392 if (unlikely(ret))
13393 return ret;
13394 asm("":::"memory");
13395- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13396+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13397 ret, "w", "w", "ir", 2);
13398 return ret;
13399 case 16:
13400- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13401+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13402 ret, "q", "", "er", 16);
13403 if (unlikely(ret))
13404 return ret;
13405 asm("":::"memory");
13406- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13407+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
df50ba0c
MT
13408 ret, "q", "", "er", 8);
13409 return ret;
13410 default:
6e9df6a3 13411- return copy_user_generic((__force void *)dst, src, size);
8308f9c9
MT
13412+
13413+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
13414+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13415+ dst += PAX_USER_SHADOW_BASE;
8308f9c9
MT
13416+#endif
13417+
6e9df6a3 13418+ return copy_user_generic((__force_kernel void *)dst, src, size);
df50ba0c 13419 }
58c5fc13
MT
13420 }
13421
13422 static __always_inline __must_check
13423-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
4c928ab7
MT
13424+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size) __size_overflow(3);
13425+static __always_inline __must_check
fe2de317 13426+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
58c5fc13
MT
13427 {
13428- int ret = 0;
58c5fc13
MT
13429+ unsigned ret = 0;
13430
13431 might_fault();
df50ba0c 13432- if (!__builtin_constant_p(size))
6e9df6a3
MT
13433- return copy_user_generic((__force void *)dst,
13434- (__force void *)src, size);
58c5fc13 13435+
fe2de317 13436+ if (size > INT_MAX)
58c5fc13
MT
13437+ return size;
13438+
bc901d79
MT
13439+#ifdef CONFIG_PAX_MEMORY_UDEREF
13440+ if (!__access_ok(VERIFY_READ, src, size))
13441+ return size;
13442+ if (!__access_ok(VERIFY_WRITE, dst, size))
13443+ return size;
13444+#endif
13445+
df50ba0c 13446+ if (!__builtin_constant_p(size)) {
8308f9c9
MT
13447+
13448+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
13449+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13450+ src += PAX_USER_SHADOW_BASE;
13451+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13452+ dst += PAX_USER_SHADOW_BASE;
8308f9c9
MT
13453+#endif
13454+
6e9df6a3
MT
13455+ return copy_user_generic((__force_kernel void *)dst,
13456+ (__force_kernel const void *)src, size);
df50ba0c
MT
13457+ }
13458 switch (size) {
13459 case 1: {
13460 u8 tmp;
bc901d79
MT
13461- __get_user_asm(tmp, (u8 __user *)src,
13462+ __get_user_asm(tmp, (const u8 __user *)src,
13463 ret, "b", "b", "=q", 1);
13464 if (likely(!ret))
13465 __put_user_asm(tmp, (u8 __user *)dst,
4c928ab7 13466@@ -176,7 +276,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
bc901d79
MT
13467 }
13468 case 2: {
13469 u16 tmp;
13470- __get_user_asm(tmp, (u16 __user *)src,
13471+ __get_user_asm(tmp, (const u16 __user *)src,
13472 ret, "w", "w", "=r", 2);
13473 if (likely(!ret))
13474 __put_user_asm(tmp, (u16 __user *)dst,
4c928ab7 13475@@ -186,7 +286,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
bc901d79
MT
13476
13477 case 4: {
13478 u32 tmp;
13479- __get_user_asm(tmp, (u32 __user *)src,
13480+ __get_user_asm(tmp, (const u32 __user *)src,
13481 ret, "l", "k", "=r", 4);
13482 if (likely(!ret))
13483 __put_user_asm(tmp, (u32 __user *)dst,
4c928ab7 13484@@ -195,7 +295,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
bc901d79
MT
13485 }
13486 case 8: {
13487 u64 tmp;
13488- __get_user_asm(tmp, (u64 __user *)src,
13489+ __get_user_asm(tmp, (const u64 __user *)src,
13490 ret, "q", "", "=r", 8);
13491 if (likely(!ret))
13492 __put_user_asm(tmp, (u64 __user *)dst,
4c928ab7 13493@@ -203,8 +303,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
df50ba0c
MT
13494 return ret;
13495 }
13496 default:
6e9df6a3
MT
13497- return copy_user_generic((__force void *)dst,
13498- (__force void *)src, size);
8308f9c9
MT
13499+
13500+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
13501+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13502+ src += PAX_USER_SHADOW_BASE;
13503+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13504+ dst += PAX_USER_SHADOW_BASE;
8308f9c9
MT
13505+#endif
13506+
6e9df6a3
MT
13507+ return copy_user_generic((__force_kernel void *)dst,
13508+ (__force_kernel const void *)src, size);
df50ba0c 13509 }
bc901d79
MT
13510 }
13511
4c928ab7
MT
13512@@ -215,39 +323,83 @@ __strncpy_from_user(char *dst, const char __user *src, long count);
13513 __must_check long strnlen_user(const char __user *str, long n);
13514 __must_check long __strnlen_user(const char __user *str, long n);
13515 __must_check long strlen_user(const char __user *str);
13516-__must_check unsigned long clear_user(void __user *mem, unsigned long len);
13517-__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13518+__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13519+__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
fe2de317 13520
df50ba0c 13521 static __must_check __always_inline int
fe2de317 13522-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
4c928ab7
MT
13523+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
13524+static __must_check __always_inline int
fe2de317 13525+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
df50ba0c 13526 {
6e9df6a3 13527- return copy_user_generic(dst, (__force const void *)src, size);
fe2de317 13528+ if (size > INT_MAX)
bc901d79
MT
13529+ return size;
13530+
13531+#ifdef CONFIG_PAX_MEMORY_UDEREF
13532+ if (!__access_ok(VERIFY_READ, src, size))
13533+ return size;
bc901d79 13534+
df50ba0c
MT
13535+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13536+ src += PAX_USER_SHADOW_BASE;
8308f9c9
MT
13537+#endif
13538+
6e9df6a3 13539+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
ae4e228f 13540 }
58c5fc13
MT
13541
13542-static __must_check __always_inline int
fe2de317 13543-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
58c5fc13 13544+static __must_check __always_inline unsigned long
4c928ab7
MT
13545+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
13546+static __must_check __always_inline unsigned long
fe2de317 13547+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
58c5fc13 13548 {
6e9df6a3 13549- return copy_user_generic((__force void *)dst, src, size);
fe2de317 13550+ if (size > INT_MAX)
58c5fc13
MT
13551+ return size;
13552+
bc901d79
MT
13553+#ifdef CONFIG_PAX_MEMORY_UDEREF
13554+ if (!__access_ok(VERIFY_WRITE, dst, size))
13555+ return size;
bc901d79 13556+
df50ba0c
MT
13557+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13558+ dst += PAX_USER_SHADOW_BASE;
8308f9c9
MT
13559+#endif
13560+
6e9df6a3 13561+ return copy_user_generic((__force_kernel void *)dst, src, size);
58c5fc13
MT
13562 }
13563
13564-extern long __copy_user_nocache(void *dst, const void __user *src,
fe2de317 13565- unsigned size, int zerorest);
58c5fc13 13566+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
4c928ab7 13567+ unsigned long size, int zerorest) __size_overflow(3);
58c5fc13
MT
13568
13569-static inline int
13570-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
4c928ab7 13571+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
fe2de317 13572+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
58c5fc13
MT
13573 {
13574 might_sleep();
13575+
fe2de317 13576+ if (size > INT_MAX)
58c5fc13 13577+ return size;
bc901d79
MT
13578+
13579+#ifdef CONFIG_PAX_MEMORY_UDEREF
13580+ if (!__access_ok(VERIFY_READ, src, size))
13581+ return size;
13582+#endif
58c5fc13
MT
13583+
13584 return __copy_user_nocache(dst, src, size, 1);
13585 }
13586
13587-static inline int
13588-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
fe2de317 13589- unsigned size)
58c5fc13 13590+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
4c928ab7
MT
13591+ unsigned long size) __size_overflow(3);
13592+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
fe2de317 13593+ unsigned long size)
58c5fc13 13594 {
fe2de317 13595+ if (size > INT_MAX)
58c5fc13 13596+ return size;
bc901d79
MT
13597+
13598+#ifdef CONFIG_PAX_MEMORY_UDEREF
13599+ if (!__access_ok(VERIFY_READ, src, size))
13600+ return size;
13601+#endif
58c5fc13
MT
13602+
13603 return __copy_user_nocache(dst, src, size, 0);
13604 }
13605
13606-unsigned long
6e9df6a3 13607-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
58c5fc13 13608+extern unsigned long
4c928ab7 13609+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
58c5fc13
MT
13610
13611 #endif /* _ASM_X86_UACCESS_64_H */
fe2de317
MT
13612diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13613index bb05228..d763d5b 100644
13614--- a/arch/x86/include/asm/vdso.h
13615+++ b/arch/x86/include/asm/vdso.h
6e9df6a3
MT
13616@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
13617 #define VDSO32_SYMBOL(base, name) \
13618 ({ \
13619 extern const char VDSO32_##name[]; \
13620- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13621+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13622 })
13623 #endif
13624
fe2de317 13625diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
5e856224 13626index a609c39..7a68dc7 100644
fe2de317
MT
13627--- a/arch/x86/include/asm/x86_init.h
13628+++ b/arch/x86/include/asm/x86_init.h
5e856224 13629@@ -29,7 +29,7 @@ struct x86_init_mpparse {
15a11c5b
MT
13630 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13631 void (*find_smp_config)(void);
13632 void (*get_smp_config)(unsigned int early);
13633-};
13634+} __no_const;
57199397 13635
15a11c5b
MT
13636 /**
13637 * struct x86_init_resources - platform specific resource related ops
5e856224 13638@@ -43,7 +43,7 @@ struct x86_init_resources {
15a11c5b
MT
13639 void (*probe_roms)(void);
13640 void (*reserve_resources)(void);
13641 char *(*memory_setup)(void);
13642-};
13643+} __no_const;
58c5fc13 13644
15a11c5b
MT
13645 /**
13646 * struct x86_init_irqs - platform specific interrupt setup
5e856224 13647@@ -56,7 +56,7 @@ struct x86_init_irqs {
15a11c5b
MT
13648 void (*pre_vector_init)(void);
13649 void (*intr_init)(void);
13650 void (*trap_init)(void);
13651-};
13652+} __no_const;
58c5fc13 13653
15a11c5b
MT
13654 /**
13655 * struct x86_init_oem - oem platform specific customizing functions
5e856224 13656@@ -66,7 +66,7 @@ struct x86_init_irqs {
15a11c5b
MT
13657 struct x86_init_oem {
13658 void (*arch_setup)(void);
13659 void (*banner)(void);
13660-};
13661+} __no_const;
58c5fc13 13662
15a11c5b
MT
13663 /**
13664 * struct x86_init_mapping - platform specific initial kernel pagetable setup
5e856224 13665@@ -77,7 +77,7 @@ struct x86_init_oem {
15a11c5b
MT
13666 */
13667 struct x86_init_mapping {
13668 void (*pagetable_reserve)(u64 start, u64 end);
13669-};
13670+} __no_const;
58c5fc13 13671
15a11c5b
MT
13672 /**
13673 * struct x86_init_paging - platform specific paging functions
5e856224 13674@@ -87,7 +87,7 @@ struct x86_init_mapping {
15a11c5b
MT
13675 struct x86_init_paging {
13676 void (*pagetable_setup_start)(pgd_t *base);
13677 void (*pagetable_setup_done)(pgd_t *base);
13678-};
13679+} __no_const;
58c5fc13 13680
15a11c5b
MT
13681 /**
13682 * struct x86_init_timers - platform specific timer setup
5e856224 13683@@ -102,7 +102,7 @@ struct x86_init_timers {
15a11c5b
MT
13684 void (*tsc_pre_init)(void);
13685 void (*timer_init)(void);
13686 void (*wallclock_init)(void);
13687-};
13688+} __no_const;
58c5fc13 13689
15a11c5b
MT
13690 /**
13691 * struct x86_init_iommu - platform specific iommu setup
5e856224 13692@@ -110,7 +110,7 @@ struct x86_init_timers {
15a11c5b
MT
13693 */
13694 struct x86_init_iommu {
13695 int (*iommu_init)(void);
13696-};
13697+} __no_const;
58c5fc13 13698
15a11c5b
MT
13699 /**
13700 * struct x86_init_pci - platform specific pci init functions
5e856224 13701@@ -124,7 +124,7 @@ struct x86_init_pci {
15a11c5b
MT
13702 int (*init)(void);
13703 void (*init_irq)(void);
13704 void (*fixup_irqs)(void);
13705-};
13706+} __no_const;
58c5fc13 13707
15a11c5b
MT
13708 /**
13709 * struct x86_init_ops - functions for platform specific setup
5e856224 13710@@ -140,7 +140,7 @@ struct x86_init_ops {
15a11c5b
MT
13711 struct x86_init_timers timers;
13712 struct x86_init_iommu iommu;
13713 struct x86_init_pci pci;
13714-};
13715+} __no_const;
66a7e928 13716
15a11c5b
MT
13717 /**
13718 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
5e856224 13719@@ -149,7 +149,7 @@ struct x86_init_ops {
15a11c5b
MT
13720 struct x86_cpuinit_ops {
13721 void (*setup_percpu_clockev)(void);
5e856224 13722 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
15a11c5b
MT
13723-};
13724+} __no_const;
66a7e928 13725
15a11c5b
MT
13726 /**
13727 * struct x86_platform_ops - platform specific runtime functions
5e856224 13728@@ -171,7 +171,7 @@ struct x86_platform_ops {
15a11c5b 13729 void (*nmi_init)(void);
4c928ab7 13730 unsigned char (*get_nmi_reason)(void);
15a11c5b
MT
13731 int (*i8042_detect)(void);
13732-};
13733+} __no_const;
13734
13735 struct pci_dev;
13736
5e856224 13737@@ -180,7 +180,7 @@ struct x86_msi_ops {
15a11c5b
MT
13738 void (*teardown_msi_irq)(unsigned int irq);
13739 void (*teardown_msi_irqs)(struct pci_dev *dev);
5e856224 13740 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
15a11c5b
MT
13741-};
13742+} __no_const;
13743
13744 extern struct x86_init_ops x86_init;
13745 extern struct x86_cpuinit_ops x86_cpuinit;
fe2de317
MT
13746diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13747index c6ce245..ffbdab7 100644
13748--- a/arch/x86/include/asm/xsave.h
13749+++ b/arch/x86/include/asm/xsave.h
13750@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
df50ba0c
MT
13751 {
13752 int err;
6892158b 13753
df50ba0c
MT
13754+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13755+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13756+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13757+#endif
13758+
6892158b
MT
13759 /*
13760 * Clear the xsave header first, so that reserved fields are
13761 * initialized to zero.
fe2de317 13762@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
6e9df6a3
MT
13763 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13764 {
13765 int err;
13766- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13767+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
df50ba0c
MT
13768 u32 lmask = mask;
13769 u32 hmask = mask >> 32;
13770
13771+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13772+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13773+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13774+#endif
13775+
13776 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13777 "2:\n"
13778 ".section .fixup,\"ax\"\n"
fe2de317
MT
13779diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13780index 6a564ac..9b1340c 100644
13781--- a/arch/x86/kernel/acpi/realmode/Makefile
13782+++ b/arch/x86/kernel/acpi/realmode/Makefile
13783@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
15a11c5b
MT
13784 $(call cc-option, -fno-stack-protector) \
13785 $(call cc-option, -mpreferred-stack-boundary=2)
13786 KBUILD_CFLAGS += $(call cc-option, -m32)
13787+ifdef CONSTIFY_PLUGIN
13788+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13789+endif
13790 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13791 GCOV_PROFILE := n
13792
fe2de317
MT
13793diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13794index b4fd836..4358fe3 100644
13795--- a/arch/x86/kernel/acpi/realmode/wakeup.S
13796+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
15a11c5b
MT
13797@@ -108,6 +108,9 @@ wakeup_code:
13798 /* Do any other stuff... */
13799
13800 #ifndef CONFIG_64BIT
13801+ /* Recheck NX bit overrides (64bit path does this in trampoline */
13802+ call verify_cpu
13803+
13804 /* This could also be done in C code... */
13805 movl pmode_cr3, %eax
13806 movl %eax, %cr3
13807@@ -131,6 +134,7 @@ wakeup_code:
13808 movl pmode_cr0, %eax
13809 movl %eax, %cr0
13810 jmp pmode_return
13811+# include "../../verify_cpu.S"
13812 #else
13813 pushw $0
13814 pushw trampoline_segment
fe2de317
MT
13815diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13816index 103b6ab..2004d0a 100644
13817--- a/arch/x86/kernel/acpi/sleep.c
13818+++ b/arch/x86/kernel/acpi/sleep.c
15a11c5b 13819@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
66a7e928 13820 header->trampoline_segment = trampoline_address() >> 4;
58c5fc13 13821 #ifdef CONFIG_SMP
16454cff 13822 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
58c5fc13 13823+
ae4e228f 13824+ pax_open_kernel();
58c5fc13
MT
13825 early_gdt_descr.address =
13826 (unsigned long)get_cpu_gdt_table(smp_processor_id());
ae4e228f 13827+ pax_close_kernel();
58c5fc13
MT
13828+
13829 initial_gs = per_cpu_offset(smp_processor_id());
13830 #endif
13831 initial_code = (unsigned long)wakeup_long64;
fe2de317
MT
13832diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13833index 13ab720..95d5442 100644
13834--- a/arch/x86/kernel/acpi/wakeup_32.S
13835+++ b/arch/x86/kernel/acpi/wakeup_32.S
58c5fc13
MT
13836@@ -30,13 +30,11 @@ wakeup_pmode_return:
13837 # and restore the stack ... but you need gdt for this to work
13838 movl saved_context_esp, %esp
13839
13840- movl %cs:saved_magic, %eax
13841- cmpl $0x12345678, %eax
13842+ cmpl $0x12345678, saved_magic
13843 jne bogus_magic
13844
13845 # jump to place where we left off
13846- movl saved_eip, %eax
13847- jmp *%eax
13848+ jmp *(saved_eip)
13849
13850 bogus_magic:
13851 jmp bogus_magic
fe2de317 13852diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
4c928ab7 13853index 1f84794..e23f862 100644
fe2de317
MT
13854--- a/arch/x86/kernel/alternative.c
13855+++ b/arch/x86/kernel/alternative.c
13856@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
6e9df6a3
MT
13857 */
13858 for (a = start; a < end; a++) {
13859 instr = (u8 *)&a->instr_offset + a->instr_offset;
13860+
13861+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13862+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13863+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13864+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13865+#endif
13866+
13867 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13868 BUG_ON(a->replacementlen > a->instrlen);
13869 BUG_ON(a->instrlen > sizeof(insnbuf));
fe2de317 13870@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
6e9df6a3
MT
13871 for (poff = start; poff < end; poff++) {
13872 u8 *ptr = (u8 *)poff + *poff;
13873
13874+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13875+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13876+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13877+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13878+#endif
13879+
57199397
MT
13880 if (!*poff || ptr < text || ptr >= text_end)
13881 continue;
13882 /* turn DS segment override prefix into lock prefix */
13883- if (*ptr == 0x3e)
13884+ if (*ktla_ktva(ptr) == 0x3e)
13885 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13886 };
13887 mutex_unlock(&text_mutex);
fe2de317 13888@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
6e9df6a3
MT
13889 for (poff = start; poff < end; poff++) {
13890 u8 *ptr = (u8 *)poff + *poff;
13891
13892+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13893+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13894+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13895+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13896+#endif
13897+
57199397
MT
13898 if (!*poff || ptr < text || ptr >= text_end)
13899 continue;
13900 /* turn lock prefix into DS segment override prefix */
13901- if (*ptr == 0xf0)
13902+ if (*ktla_ktva(ptr) == 0xf0)
13903 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13904 };
13905 mutex_unlock(&text_mutex);
fe2de317 13906@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
58c5fc13
MT
13907
13908 BUG_ON(p->len > MAX_PATCH_LEN);
13909 /* prep the buffer with the original instructions */
13910- memcpy(insnbuf, p->instr, p->len);
13911+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13912 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13913 (unsigned long)p->instr, p->len);
13914
fe2de317 13915@@ -568,7 +587,7 @@ void __init alternative_instructions(void)
df50ba0c
MT
13916 if (smp_alt_once)
13917 free_init_pages("SMP alternatives",
13918 (unsigned long)__smp_locks,
13919- (unsigned long)__smp_locks_end);
13920+ PAGE_ALIGN((unsigned long)__smp_locks_end));
13921
13922 restart_nmi();
13923 }
fe2de317 13924@@ -585,13 +604,17 @@ void __init alternative_instructions(void)
58c5fc13
MT
13925 * instructions. And on the local CPU you need to be protected again NMI or MCE
13926 * handlers seeing an inconsistent instruction while you patch.
13927 */
bc901d79
MT
13928-void *__init_or_module text_poke_early(void *addr, const void *opcode,
13929+void *__kprobes text_poke_early(void *addr, const void *opcode,
ae4e228f 13930 size_t len)
58c5fc13
MT
13931 {
13932 unsigned long flags;
58c5fc13
MT
13933 local_irq_save(flags);
13934- memcpy(addr, opcode, len);
13935+
ae4e228f 13936+ pax_open_kernel();
58c5fc13 13937+ memcpy(ktla_ktva(addr), opcode, len);
57199397 13938 sync_core();
ae4e228f 13939+ pax_close_kernel();
58c5fc13 13940+
ae4e228f 13941 local_irq_restore(flags);
58c5fc13 13942 /* Could also do a CLFLUSH here to speed up CPU recovery; but
57199397 13943 that causes hangs on some VIA CPUs. */
fe2de317 13944@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
58c5fc13
MT
13945 */
13946 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13947 {
13948- unsigned long flags;
13949- char *vaddr;
13950+ unsigned char *vaddr = ktla_ktva(addr);
13951 struct page *pages[2];
13952- int i;
13953+ size_t i;
58c5fc13 13954
ae4e228f 13955 if (!core_kernel_text((unsigned long)addr)) {
58c5fc13
MT
13956- pages[0] = vmalloc_to_page(addr);
13957- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
58c5fc13
MT
13958+ pages[0] = vmalloc_to_page(vaddr);
13959+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13960 } else {
13961- pages[0] = virt_to_page(addr);
13962+ pages[0] = virt_to_page(vaddr);
13963 WARN_ON(!PageReserved(pages[0]));
13964- pages[1] = virt_to_page(addr + PAGE_SIZE);
13965+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13966 }
13967 BUG_ON(!pages[0]);
13968- local_irq_save(flags);
13969- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13970- if (pages[1])
13971- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13972- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13973- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13974- clear_fixmap(FIX_TEXT_POKE0);
13975- if (pages[1])
13976- clear_fixmap(FIX_TEXT_POKE1);
13977- local_flush_tlb();
13978- sync_core();
13979- /* Could also do a CLFLUSH here to speed up CPU recovery; but
13980- that causes hangs on some VIA CPUs. */
13981+ text_poke_early(addr, opcode, len);
13982 for (i = 0; i < len; i++)
13983- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13984- local_irq_restore(flags);
bc901d79 13985+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
58c5fc13
MT
13986 return addr;
13987 }
df50ba0c 13988
fe2de317 13989diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
5e856224 13990index 5b3f88e..61232b4 100644
fe2de317
MT
13991--- a/arch/x86/kernel/apic/apic.c
13992+++ b/arch/x86/kernel/apic/apic.c
5e856224 13993@@ -184,7 +184,7 @@ int first_system_vector = 0xfe;
66a7e928 13994 /*
15a11c5b
MT
13995 * Debug level, exported for io_apic.c
13996 */
13997-unsigned int apic_verbosity;
13998+int apic_verbosity;
ae4e228f 13999
15a11c5b 14000 int pic_mode;
66a7e928 14001
5e856224 14002@@ -1912,7 +1912,7 @@ void smp_error_interrupt(struct pt_regs *regs)
8308f9c9
MT
14003 apic_write(APIC_ESR, 0);
14004 v1 = apic_read(APIC_ESR);
14005 ack_APIC_irq();
14006- atomic_inc(&irq_err_count);
14007+ atomic_inc_unchecked(&irq_err_count);
14008
15a11c5b
MT
14009 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
14010 smp_processor_id(), v0 , v1);
fe2de317 14011diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
5e856224 14012index fb07275..e06bb59 100644
fe2de317
MT
14013--- a/arch/x86/kernel/apic/io_apic.c
14014+++ b/arch/x86/kernel/apic/io_apic.c
4c928ab7 14015@@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
ae4e228f
MT
14016 }
14017 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
14018
14019-void lock_vector_lock(void)
14020+void lock_vector_lock(void) __acquires(vector_lock)
14021 {
14022 /* Used to the online set of cpus does not change
14023 * during assign_irq_vector.
4c928ab7 14024@@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
df50ba0c 14025 raw_spin_lock(&vector_lock);
ae4e228f
MT
14026 }
14027
14028-void unlock_vector_lock(void)
14029+void unlock_vector_lock(void) __releases(vector_lock)
14030 {
df50ba0c 14031 raw_spin_unlock(&vector_lock);
ae4e228f 14032 }
4c928ab7 14033@@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
8308f9c9
MT
14034 ack_APIC_irq();
14035 }
14036
14037-atomic_t irq_mis_count;
14038+atomic_unchecked_t irq_mis_count;
14039
4c928ab7
MT
14040 static void ack_apic_level(struct irq_data *data)
14041 {
14042@@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
8308f9c9
MT
14043 * at the cpu.
14044 */
14045 if (!(v & (1 << (i & 0x1f)))) {
14046- atomic_inc(&irq_mis_count);
14047+ atomic_inc_unchecked(&irq_mis_count);
14048
14049 eoi_ioapic_irq(irq, cfg);
14050 }
fe2de317 14051diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
5e856224 14052index f76623c..aab694f 100644
fe2de317
MT
14053--- a/arch/x86/kernel/apm_32.c
14054+++ b/arch/x86/kernel/apm_32.c
4c928ab7 14055@@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
ae4e228f
MT
14056 * This is for buggy BIOS's that refer to (real mode) segment 0x40
14057 * even though they are called in protected mode.
14058 */
14059-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
14060+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
14061 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
14062
14063 static const char driver_version[] = "1.16ac"; /* no spaces */
4c928ab7 14064@@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
58c5fc13
MT
14065 BUG_ON(cpu != 0);
14066 gdt = get_cpu_gdt_table(cpu);
14067 save_desc_40 = gdt[0x40 / 8];
14068+
ae4e228f 14069+ pax_open_kernel();
58c5fc13 14070 gdt[0x40 / 8] = bad_bios_desc;
ae4e228f 14071+ pax_close_kernel();
58c5fc13 14072
58c5fc13
MT
14073 apm_irq_save(flags);
14074 APM_DO_SAVE_SEGS;
4c928ab7 14075@@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
58c5fc13
MT
14076 &call->esi);
14077 APM_DO_RESTORE_SEGS;
14078 apm_irq_restore(flags);
14079+
ae4e228f 14080+ pax_open_kernel();
58c5fc13 14081 gdt[0x40 / 8] = save_desc_40;
ae4e228f 14082+ pax_close_kernel();
58c5fc13
MT
14083+
14084 put_cpu();
14085
14086 return call->eax & 0xff;
4c928ab7 14087@@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
58c5fc13
MT
14088 BUG_ON(cpu != 0);
14089 gdt = get_cpu_gdt_table(cpu);
14090 save_desc_40 = gdt[0x40 / 8];
14091+
ae4e228f 14092+ pax_open_kernel();
58c5fc13 14093 gdt[0x40 / 8] = bad_bios_desc;
ae4e228f 14094+ pax_close_kernel();
58c5fc13 14095
58c5fc13
MT
14096 apm_irq_save(flags);
14097 APM_DO_SAVE_SEGS;
4c928ab7 14098@@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
58c5fc13
MT
14099 &call->eax);
14100 APM_DO_RESTORE_SEGS;
14101 apm_irq_restore(flags);
14102+
ae4e228f 14103+ pax_open_kernel();
58c5fc13 14104 gdt[0x40 / 8] = save_desc_40;
ae4e228f 14105+ pax_close_kernel();
58c5fc13
MT
14106+
14107 put_cpu();
14108 return error;
14109 }
4c928ab7 14110@@ -2347,12 +2361,15 @@ static int __init apm_init(void)
58c5fc13
MT
14111 * code to that CPU.
14112 */
14113 gdt = get_cpu_gdt_table(0);
14114+
ae4e228f
MT
14115+ pax_open_kernel();
14116 set_desc_base(&gdt[APM_CS >> 3],
14117 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
14118 set_desc_base(&gdt[APM_CS_16 >> 3],
14119 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
14120 set_desc_base(&gdt[APM_DS >> 3],
14121 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
14122+ pax_close_kernel();
58c5fc13 14123
58c5fc13
MT
14124 proc_create("apm", 0, NULL, &apm_file_ops);
14125
fe2de317 14126diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
5e856224 14127index 68de2dc..1f3c720 100644
fe2de317
MT
14128--- a/arch/x86/kernel/asm-offsets.c
14129+++ b/arch/x86/kernel/asm-offsets.c
66a7e928 14130@@ -33,6 +33,8 @@ void common(void) {
71d190be 14131 OFFSET(TI_status, thread_info, status);
66a7e928
MT
14132 OFFSET(TI_addr_limit, thread_info, addr_limit);
14133 OFFSET(TI_preempt_count, thread_info, preempt_count);
14134+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
14135+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
58c5fc13 14136
66a7e928
MT
14137 BLANK();
14138 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
15a11c5b 14139@@ -53,8 +55,26 @@ void common(void) {
ae4e228f 14140 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
66a7e928 14141 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
ae4e228f
MT
14142 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
14143+
14144+#ifdef CONFIG_PAX_KERNEXEC
ae4e228f 14145+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
fe2de317
MT
14146 #endif
14147
df50ba0c
MT
14148+#ifdef CONFIG_PAX_MEMORY_UDEREF
14149+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
14150+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
15a11c5b
MT
14151+#ifdef CONFIG_X86_64
14152+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
14153+#endif
fe2de317
MT
14154+#endif
14155+
66a7e928
MT
14156+#endif
14157+
14158+ BLANK();
14159+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14160+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
8308f9c9 14161+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
66a7e928 14162+
8308f9c9
MT
14163 #ifdef CONFIG_XEN
14164 BLANK();
14165 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
fe2de317 14166diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
5e856224 14167index 834e897..dacddc8 100644
fe2de317
MT
14168--- a/arch/x86/kernel/asm-offsets_64.c
14169+++ b/arch/x86/kernel/asm-offsets_64.c
5e856224 14170@@ -70,6 +70,7 @@ int main(void)
fe2de317
MT
14171 BLANK();
14172 #undef ENTRY
14173
14174+ DEFINE(TSS_size, sizeof(struct tss_struct));
14175 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
14176 BLANK();
14177
14178diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
4c928ab7 14179index 25f24dc..4094a7f 100644
fe2de317
MT
14180--- a/arch/x86/kernel/cpu/Makefile
14181+++ b/arch/x86/kernel/cpu/Makefile
14182@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
14183 CFLAGS_REMOVE_perf_event.o = -pg
14184 endif
14185
14186-# Make sure load_percpu_segment has no stackprotector
14187-nostackp := $(call cc-option, -fno-stack-protector)
14188-CFLAGS_common.o := $(nostackp)
14189-
14190 obj-y := intel_cacheinfo.o scattered.o topology.o
14191 obj-y += proc.o capflags.o powerflags.o common.o
14192 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
14193diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
5e856224 14194index 80ab83d..0a7b34e 100644
fe2de317
MT
14195--- a/arch/x86/kernel/cpu/amd.c
14196+++ b/arch/x86/kernel/cpu/amd.c
5e856224 14197@@ -670,7 +670,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
71d190be
MT
14198 unsigned int size)
14199 {
14200 /* AMD errata T13 (order #21922) */
14201- if ((c->x86 == 6)) {
14202+ if (c->x86 == 6) {
14203 /* Duron Rev A0 */
14204 if (c->x86_model == 3 && c->x86_mask == 0)
14205 size = 64;
fe2de317 14206diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
5e856224 14207index 1a810e4..9fa8201 100644
fe2de317
MT
14208--- a/arch/x86/kernel/cpu/common.c
14209+++ b/arch/x86/kernel/cpu/common.c
4c928ab7 14210@@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
58c5fc13
MT
14211
14212 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14213
14214-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14215-#ifdef CONFIG_X86_64
14216- /*
14217- * We need valid kernel segments for data and code in long mode too
14218- * IRET will check the segment types kkeil 2000/10/28
14219- * Also sysret mandates a special GDT layout
14220- *
14221- * TLS descriptors are currently at a different place compared to i386.
14222- * Hopefully nobody expects them at a fixed place (Wine?)
14223- */
ae4e228f
MT
14224- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14225- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14226- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14227- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14228- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14229- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
58c5fc13 14230-#else
ae4e228f
MT
14231- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14232- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14233- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14234- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
58c5fc13
MT
14235- /*
14236- * Segments used for calling PnP BIOS have byte granularity.
14237- * They code segments and data segments have fixed 64k limits,
14238- * the transfer segment sizes are set at run time.
14239- */
14240- /* 32-bit code */
ae4e228f 14241- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
58c5fc13 14242- /* 16-bit code */
ae4e228f 14243- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
58c5fc13 14244- /* 16-bit data */
ae4e228f 14245- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
58c5fc13 14246- /* 16-bit data */
ae4e228f 14247- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
58c5fc13 14248- /* 16-bit data */
ae4e228f 14249- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
58c5fc13
MT
14250- /*
14251- * The APM segments have byte granularity and their bases
14252- * are set at run time. All have 64k limits.
14253- */
14254- /* 32-bit code */
ae4e228f 14255- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
58c5fc13 14256- /* 16-bit code */
ae4e228f 14257- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
58c5fc13 14258- /* data */
ae4e228f 14259- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
58c5fc13 14260-
ae4e228f
MT
14261- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14262- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
58c5fc13
MT
14263- GDT_STACK_CANARY_INIT
14264-#endif
14265-} };
14266-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
14267-
14268 static int __init x86_xsave_setup(char *s)
14269 {
14270 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
4c928ab7 14271@@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
58c5fc13
MT
14272 {
14273 struct desc_ptr gdt_descr;
14274
14275- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
14276+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14277 gdt_descr.size = GDT_SIZE - 1;
14278 load_gdt(&gdt_descr);
14279 /* Reload the per-cpu base */
5e856224 14280@@ -839,6 +785,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
58c5fc13
MT
14281 /* Filter out anything that depends on CPUID levels we don't have */
14282 filter_cpuid_features(c, true);
14283
4c928ab7 14284+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
58c5fc13
MT
14285+ setup_clear_cpu_cap(X86_FEATURE_SEP);
14286+#endif
14287+
14288 /* If the model name is still unset, do table lookup. */
14289 if (!c->x86_model_id[0]) {
14290 const char *p;
5e856224 14291@@ -1019,10 +969,12 @@ static __init int setup_disablecpuid(char *arg)
71d190be
MT
14292 }
14293 __setup("clearcpuid=", setup_disablecpuid);
14294
14295+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
14296+EXPORT_PER_CPU_SYMBOL(current_tinfo);
14297+
14298 #ifdef CONFIG_X86_64
14299 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
5e856224
MT
14300-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
14301- (unsigned long) nmi_idt_table };
14302+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
71d190be 14303
5e856224
MT
14304 DEFINE_PER_CPU_FIRST(union irq_stack_union,
14305 irq_stack_union) __aligned(PAGE_SIZE);
14306@@ -1036,7 +988,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
71d190be
MT
14307 EXPORT_PER_CPU_SYMBOL(current_task);
14308
14309 DEFINE_PER_CPU(unsigned long, kernel_stack) =
14310- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
66a7e928 14311+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
71d190be
MT
14312 EXPORT_PER_CPU_SYMBOL(kernel_stack);
14313
14314 DEFINE_PER_CPU(char *, irq_stack_ptr) =
5e856224 14315@@ -1126,7 +1078,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
bc901d79
MT
14316 {
14317 memset(regs, 0, sizeof(struct pt_regs));
14318 regs->fs = __KERNEL_PERCPU;
14319- regs->gs = __KERNEL_STACK_CANARY;
14320+ savesegment(gs, regs->gs);
14321
14322 return regs;
14323 }
5e856224 14324@@ -1181,7 +1133,7 @@ void __cpuinit cpu_init(void)
58c5fc13
MT
14325 int i;
14326
14327 cpu = stack_smp_processor_id();
14328- t = &per_cpu(init_tss, cpu);
14329+ t = init_tss + cpu;
ae4e228f 14330 oist = &per_cpu(orig_ist, cpu);
58c5fc13
MT
14331
14332 #ifdef CONFIG_NUMA
5e856224 14333@@ -1207,7 +1159,7 @@ void __cpuinit cpu_init(void)
df50ba0c
MT
14334 switch_to_new_gdt(cpu);
14335 loadsegment(fs, 0);
14336
14337- load_idt((const struct desc_ptr *)&idt_descr);
14338+ load_idt(&idt_descr);
14339
14340 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
14341 syscall_init();
5e856224 14342@@ -1216,7 +1168,6 @@ void __cpuinit cpu_init(void)
bc901d79
MT
14343 wrmsrl(MSR_KERNEL_GS_BASE, 0);
14344 barrier();
14345
14346- x86_configure_nx();
14347 if (cpu != 0)
14348 enable_x2apic();
14349
5e856224 14350@@ -1272,7 +1223,7 @@ void __cpuinit cpu_init(void)
58c5fc13
MT
14351 {
14352 int cpu = smp_processor_id();
14353 struct task_struct *curr = current;
14354- struct tss_struct *t = &per_cpu(init_tss, cpu);
14355+ struct tss_struct *t = init_tss + cpu;
14356 struct thread_struct *thread = &curr->thread;
14357
14358 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
fe2de317 14359diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
5e856224 14360index 3e6ff6c..54b4992 100644
fe2de317
MT
14361--- a/arch/x86/kernel/cpu/intel.c
14362+++ b/arch/x86/kernel/cpu/intel.c
4c928ab7 14363@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
58c5fc13
MT
14364 * Update the IDT descriptor and reload the IDT so that
14365 * it uses the read-only mapped virtual address.
14366 */
14367- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14368+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14369 load_idt(&idt_descr);
14370 }
14371 #endif
fe2de317 14372diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
5e856224 14373index fc4beb3..f20a5a7 100644
fe2de317
MT
14374--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
14375+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
5e856224 14376@@ -199,6 +199,8 @@ static void raise_mce(struct mce *m)
4c928ab7
MT
14377
14378 /* Error injection interface */
14379 static ssize_t mce_write(struct file *filp, const char __user *ubuf,
14380+ size_t usize, loff_t *off) __size_overflow(3);
14381+static ssize_t mce_write(struct file *filp, const char __user *ubuf,
14382 size_t usize, loff_t *off)
14383 {
14384 struct mce m;
fe2de317 14385diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
5e856224 14386index 5a11ae2..a1a1c8a 100644
fe2de317
MT
14387--- a/arch/x86/kernel/cpu/mcheck/mce.c
14388+++ b/arch/x86/kernel/cpu/mcheck/mce.c
6e9df6a3
MT
14389@@ -42,6 +42,7 @@
14390 #include <asm/processor.h>
c52201e0
MT
14391 #include <asm/mce.h>
14392 #include <asm/msr.h>
14393+#include <asm/local.h>
14394
14395 #include "mce-internal.h"
14396
5e856224 14397@@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
ae4e228f
MT
14398 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14399 m->cs, m->ip);
14400
14401- if (m->cs == __KERNEL_CS)
14402+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14403 print_symbol("{%s}", m->ip);
14404 pr_cont("\n");
14405 }
5e856224 14406@@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
8308f9c9
MT
14407
14408 #define PANIC_TIMEOUT 5 /* 5 seconds */
14409
14410-static atomic_t mce_paniced;
14411+static atomic_unchecked_t mce_paniced;
14412
14413 static int fake_panic;
14414-static atomic_t mce_fake_paniced;
14415+static atomic_unchecked_t mce_fake_paniced;
14416
14417 /* Panic in progress. Enable interrupts and wait for final IPI */
14418 static void wait_for_panic(void)
5e856224 14419@@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
8308f9c9
MT
14420 /*
14421 * Make sure only one CPU runs in machine check panic
14422 */
14423- if (atomic_inc_return(&mce_paniced) > 1)
14424+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14425 wait_for_panic();
14426 barrier();
14427
5e856224 14428@@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
8308f9c9
MT
14429 console_verbose();
14430 } else {
14431 /* Don't log too much for fake panic */
14432- if (atomic_inc_return(&mce_fake_paniced) > 1)
14433+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14434 return;
14435 }
14436 /* First print corrected ones that are still unlogged */
5e856224 14437@@ -658,7 +659,7 @@ static int mce_timed_out(u64 *t)
8308f9c9
MT
14438 * might have been modified by someone else.
14439 */
14440 rmb();
14441- if (atomic_read(&mce_paniced))
14442+ if (atomic_read_unchecked(&mce_paniced))
14443 wait_for_panic();
14444 if (!monarch_timeout)
14445 goto out;
5e856224 14446@@ -1446,7 +1447,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
fe2de317
MT
14447 }
14448
14449 /* Call the installed machine check handler for this CPU setup. */
14450-void (*machine_check_vector)(struct pt_regs *, long error_code) =
14451+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14452 unexpected_machine_check;
14453
14454 /*
5e856224 14455@@ -1469,7 +1470,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
fe2de317
MT
14456 return;
14457 }
14458
14459+ pax_open_kernel();
14460 machine_check_vector = do_machine_check;
14461+ pax_close_kernel();
14462
14463 __mcheck_cpu_init_generic();
14464 __mcheck_cpu_init_vendor(c);
5e856224 14465@@ -1483,7 +1486,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
58c5fc13
MT
14466 */
14467
6e9df6a3
MT
14468 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
14469-static int mce_chrdev_open_count; /* #times opened */
14470+static local_t mce_chrdev_open_count; /* #times opened */
14471 static int mce_chrdev_open_exclu; /* already open exclusive? */
58c5fc13 14472
6e9df6a3 14473 static int mce_chrdev_open(struct inode *inode, struct file *file)
5e856224 14474@@ -1491,7 +1494,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
6e9df6a3 14475 spin_lock(&mce_chrdev_state_lock);
58c5fc13 14476
6e9df6a3
MT
14477 if (mce_chrdev_open_exclu ||
14478- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
14479+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
14480 spin_unlock(&mce_chrdev_state_lock);
58c5fc13
MT
14481
14482 return -EBUSY;
5e856224 14483@@ -1499,7 +1502,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
58c5fc13
MT
14484
14485 if (file->f_flags & O_EXCL)
6e9df6a3
MT
14486 mce_chrdev_open_exclu = 1;
14487- mce_chrdev_open_count++;
14488+ local_inc(&mce_chrdev_open_count);
58c5fc13 14489
6e9df6a3 14490 spin_unlock(&mce_chrdev_state_lock);
58c5fc13 14491
5e856224 14492@@ -1510,7 +1513,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
58c5fc13 14493 {
6e9df6a3 14494 spin_lock(&mce_chrdev_state_lock);
58c5fc13 14495
6e9df6a3
MT
14496- mce_chrdev_open_count--;
14497+ local_dec(&mce_chrdev_open_count);
14498 mce_chrdev_open_exclu = 0;
58c5fc13 14499
6e9df6a3 14500 spin_unlock(&mce_chrdev_state_lock);
5e856224 14501@@ -2229,7 +2232,7 @@ struct dentry *mce_get_debugfs_dir(void)
8308f9c9
MT
14502 static void mce_reset(void)
14503 {
14504 cpu_missing = 0;
14505- atomic_set(&mce_fake_paniced, 0);
14506+ atomic_set_unchecked(&mce_fake_paniced, 0);
14507 atomic_set(&mce_executing, 0);
14508 atomic_set(&mce_callin, 0);
14509 atomic_set(&global_nwo, 0);
fe2de317 14510diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
4c928ab7 14511index 5c0e653..0882b0a 100644
fe2de317
MT
14512--- a/arch/x86/kernel/cpu/mcheck/p5.c
14513+++ b/arch/x86/kernel/cpu/mcheck/p5.c
4c928ab7
MT
14514@@ -12,6 +12,7 @@
14515 #include <asm/system.h>
14516 #include <asm/mce.h>
14517 #include <asm/msr.h>
14518+#include <asm/pgtable.h>
14519
14520 /* By default disabled */
14521 int mce_p5_enabled __read_mostly;
14522@@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
fe2de317
MT
14523 if (!cpu_has(c, X86_FEATURE_MCE))
14524 return;
14525
15a11c5b 14526+ pax_open_kernel();
fe2de317 14527 machine_check_vector = pentium_machine_check;
15a11c5b 14528+ pax_close_kernel();
fe2de317
MT
14529 /* Make sure the vector pointer is visible before we enable MCEs: */
14530 wmb();
14531
14532diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
4c928ab7 14533index 54060f5..c1a7577 100644
fe2de317
MT
14534--- a/arch/x86/kernel/cpu/mcheck/winchip.c
14535+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
4c928ab7
MT
14536@@ -11,6 +11,7 @@
14537 #include <asm/system.h>
14538 #include <asm/mce.h>
14539 #include <asm/msr.h>
14540+#include <asm/pgtable.h>
14541
14542 /* Machine check handler for WinChip C6: */
14543 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14544@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
fe2de317
MT
14545 {
14546 u32 lo, hi;
14547
14548+ pax_open_kernel();
14549 machine_check_vector = winchip_machine_check;
14550+ pax_close_kernel();
14551 /* Make sure the vector pointer is visible before we enable MCEs: */
14552 wmb();
14553
4c928ab7
MT
14554diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
14555index 7928963..1b16001 100644
14556--- a/arch/x86/kernel/cpu/mtrr/if.c
14557+++ b/arch/x86/kernel/cpu/mtrr/if.c
14558@@ -91,6 +91,8 @@ mtrr_file_del(unsigned long base, unsigned long size,
14559 * "base=%Lx size=%Lx type=%s" or "disable=%d"
14560 */
14561 static ssize_t
14562+mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) __size_overflow(3);
14563+static ssize_t
14564 mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
14565 {
14566 int i, err;
fe2de317
MT
14567diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14568index 6b96110..0da73eb 100644
14569--- a/arch/x86/kernel/cpu/mtrr/main.c
14570+++ b/arch/x86/kernel/cpu/mtrr/main.c
66a7e928 14571@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
ae4e228f
MT
14572 u64 size_or_mask, size_and_mask;
14573 static bool mtrr_aps_delayed_init;
14574
df50ba0c 14575-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
ae4e228f
MT
14576+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14577
df50ba0c 14578 const struct mtrr_ops *mtrr_if;
ae4e228f 14579
fe2de317
MT
14580diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14581index df5e41f..816c719 100644
14582--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14583+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
15a11c5b
MT
14584@@ -25,7 +25,7 @@ struct mtrr_ops {
14585 int (*validate_add_page)(unsigned long base, unsigned long size,
ae4e228f 14586 unsigned int type);
15a11c5b
MT
14587 int (*have_wrcomb)(void);
14588-};
14589+} __do_const;
ae4e228f
MT
14590
14591 extern int generic_get_free_region(unsigned long base, unsigned long size,
15a11c5b 14592 int replace_reg);
fe2de317 14593diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
5e856224 14594index 5adce10..99284ec 100644
fe2de317
MT
14595--- a/arch/x86/kernel/cpu/perf_event.c
14596+++ b/arch/x86/kernel/cpu/perf_event.c
5e856224 14597@@ -1665,7 +1665,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
57199397
MT
14598 break;
14599
bc901d79 14600 perf_callchain_store(entry, frame.return_address);
57199397 14601- fp = frame.next_frame;
6e9df6a3 14602+ fp = (const void __force_user *)frame.next_frame;
57199397
MT
14603 }
14604 }
14605
fe2de317 14606diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
4c928ab7 14607index 13ad899..f642b9a 100644
fe2de317
MT
14608--- a/arch/x86/kernel/crash.c
14609+++ b/arch/x86/kernel/crash.c
4c928ab7
MT
14610@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
14611 {
58c5fc13 14612 #ifdef CONFIG_X86_32
4c928ab7
MT
14613 struct pt_regs fixed_regs;
14614-#endif
14615
14616-#ifdef CONFIG_X86_32
58c5fc13
MT
14617- if (!user_mode_vm(regs)) {
14618+ if (!user_mode(regs)) {
14619 crash_fixup_ss_esp(&fixed_regs, regs);
14620 regs = &fixed_regs;
14621 }
fe2de317
MT
14622diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14623index 37250fe..bf2ec74 100644
14624--- a/arch/x86/kernel/doublefault_32.c
14625+++ b/arch/x86/kernel/doublefault_32.c
58c5fc13
MT
14626@@ -11,7 +11,7 @@
14627
14628 #define DOUBLEFAULT_STACKSIZE (1024)
14629 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14630-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14631+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14632
14633 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14634
14635@@ -21,7 +21,7 @@ static void doublefault_fn(void)
14636 unsigned long gdt, tss;
14637
14638 store_gdt(&gdt_desc);
14639- gdt = gdt_desc.address;
14640+ gdt = (unsigned long)gdt_desc.address;
14641
14642 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14643
fe2de317 14644@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
58c5fc13
MT
14645 /* 0x2 bit is always set */
14646 .flags = X86_EFLAGS_SF | 0x2,
14647 .sp = STACK_START,
14648- .es = __USER_DS,
14649+ .es = __KERNEL_DS,
14650 .cs = __KERNEL_CS,
14651 .ss = __KERNEL_DS,
14652- .ds = __USER_DS,
14653+ .ds = __KERNEL_DS,
14654 .fs = __KERNEL_PERCPU,
14655
14656 .__cr3 = __pa_nodebug(swapper_pg_dir),
fe2de317 14657diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
5e856224 14658index 4025fe4..d8451c6 100644
fe2de317
MT
14659--- a/arch/x86/kernel/dumpstack.c
14660+++ b/arch/x86/kernel/dumpstack.c
bc901d79
MT
14661@@ -2,6 +2,9 @@
14662 * Copyright (C) 1991, 1992 Linus Torvalds
14663 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14664 */
14665+#ifdef CONFIG_GRKERNSEC_HIDESYM
14666+#define __INCLUDED_BY_HIDESYM 1
14667+#endif
14668 #include <linux/kallsyms.h>
14669 #include <linux/kprobes.h>
14670 #include <linux/uaccess.h>
fe2de317 14671@@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
71d190be
MT
14672 static void
14673 print_ftrace_graph_addr(unsigned long addr, void *data,
14674 const struct stacktrace_ops *ops,
14675- struct thread_info *tinfo, int *graph)
14676+ struct task_struct *task, int *graph)
14677 {
14678- struct task_struct *task = tinfo->task;
14679 unsigned long ret_addr;
14680 int index = task->curr_ret_stack;
14681
fe2de317 14682@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
71d190be
MT
14683 static inline void
14684 print_ftrace_graph_addr(unsigned long addr, void *data,
14685 const struct stacktrace_ops *ops,
14686- struct thread_info *tinfo, int *graph)
14687+ struct task_struct *task, int *graph)
14688 { }
14689 #endif
14690
fe2de317 14691@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
71d190be
MT
14692 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14693 */
14694
14695-static inline int valid_stack_ptr(struct thread_info *tinfo,
14696- void *p, unsigned int size, void *end)
14697+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14698 {
14699- void *t = tinfo;
14700 if (end) {
14701 if (p < end && p >= (end-THREAD_SIZE))
14702 return 1;
fe2de317 14703@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
71d190be
MT
14704 }
14705
14706 unsigned long
14707-print_context_stack(struct thread_info *tinfo,
14708+print_context_stack(struct task_struct *task, void *stack_start,
14709 unsigned long *stack, unsigned long bp,
14710 const struct stacktrace_ops *ops, void *data,
14711 unsigned long *end, int *graph)
14712 {
14713 struct stack_frame *frame = (struct stack_frame *)bp;
14714
14715- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14716+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14717 unsigned long addr;
14718
14719 addr = *stack;
fe2de317 14720@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
71d190be
MT
14721 } else {
14722 ops->address(data, addr, 0);
14723 }
14724- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14725+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14726 }
14727 stack++;
14728 }
fe2de317 14729@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
71d190be
MT
14730 EXPORT_SYMBOL_GPL(print_context_stack);
14731
14732 unsigned long
14733-print_context_stack_bp(struct thread_info *tinfo,
14734+print_context_stack_bp(struct task_struct *task, void *stack_start,
14735 unsigned long *stack, unsigned long bp,
14736 const struct stacktrace_ops *ops, void *data,
14737 unsigned long *end, int *graph)
fe2de317 14738@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
71d190be
MT
14739 struct stack_frame *frame = (struct stack_frame *)bp;
14740 unsigned long *ret_addr = &frame->return_address;
14741
14742- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
14743+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
14744 unsigned long addr = *ret_addr;
14745
14746 if (!__kernel_text_address(addr))
fe2de317 14747@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
71d190be
MT
14748 ops->address(data, addr, 1);
14749 frame = frame->next_frame;
14750 ret_addr = &frame->return_address;
14751- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14752+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14753 }
14754
14755 return (unsigned long)frame;
15a11c5b 14756@@ -186,7 +186,7 @@ void dump_stack(void)
57199397 14757
66a7e928 14758 bp = stack_frame(current, NULL);
57199397
MT
14759 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14760- current->pid, current->comm, print_tainted(),
14761+ task_pid_nr(current), current->comm, print_tainted(),
14762 init_utsname()->release,
14763 (int)strcspn(init_utsname()->version, " "),
14764 init_utsname()->version);
15a11c5b 14765@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
71d190be
MT
14766 }
14767 EXPORT_SYMBOL_GPL(oops_begin);
14768
14769+extern void gr_handle_kernel_exploit(void);
14770+
14771 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14772 {
14773 if (regs && kexec_should_crash(current))
fe2de317 14774@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
57199397
MT
14775 panic("Fatal exception in interrupt");
14776 if (panic_on_oops)
14777 panic("Fatal exception");
14778- do_exit(signr);
71d190be
MT
14779+
14780+ gr_handle_kernel_exploit();
14781+
57199397
MT
14782+ do_group_exit(signr);
14783 }
14784
14785 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
5e856224 14786@@ -270,7 +275,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
57199397
MT
14787
14788 show_registers(regs);
14789 #ifdef CONFIG_X86_32
14790- if (user_mode_vm(regs)) {
14791+ if (user_mode(regs)) {
14792 sp = regs->sp;
14793 ss = regs->ss & 0xffff;
14794 } else {
5e856224 14795@@ -298,7 +303,7 @@ void die(const char *str, struct pt_regs *regs, long err)
57199397
MT
14796 unsigned long flags = oops_begin();
14797 int sig = SIGSEGV;
14798
14799- if (!user_mode_vm(regs))
14800+ if (!user_mode(regs))
14801 report_bug(regs->ip, regs);
14802
14803 if (__die(str, regs, err))
fe2de317 14804diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
4c928ab7 14805index c99f9ed..2a15d80 100644
fe2de317
MT
14806--- a/arch/x86/kernel/dumpstack_32.c
14807+++ b/arch/x86/kernel/dumpstack_32.c
14808@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14809 bp = stack_frame(task, regs);
14810
14811 for (;;) {
14812- struct thread_info *context;
14813+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14814
14815- context = (struct thread_info *)
14816- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14817- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14818+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14819
14820- stack = (unsigned long *)context->previous_esp;
14821- if (!stack)
14822+ if (stack_start == task_stack_page(task))
14823 break;
14824+ stack = *(unsigned long **)stack_start;
14825 if (ops->stack(data, "IRQ") < 0)
14826 break;
14827 touch_nmi_watchdog();
14828@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
14829 * When in-kernel, we also print out the stack and code at the
14830 * time of the fault..
14831 */
14832- if (!user_mode_vm(regs)) {
14833+ if (!user_mode(regs)) {
14834 unsigned int code_prologue = code_bytes * 43 / 64;
14835 unsigned int code_len = code_bytes;
14836 unsigned char c;
14837 u8 *ip;
14838+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14839
14840 printk(KERN_EMERG "Stack:\n");
14841 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14842
14843 printk(KERN_EMERG "Code: ");
14844
14845- ip = (u8 *)regs->ip - code_prologue;
14846+ ip = (u8 *)regs->ip - code_prologue + cs_base;
14847 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14848 /* try starting at IP */
14849- ip = (u8 *)regs->ip;
14850+ ip = (u8 *)regs->ip + cs_base;
14851 code_len = code_len - code_prologue + 1;
14852 }
14853 for (i = 0; i < code_len; i++, ip++) {
14854@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
4c928ab7 14855 printk(KERN_CONT " Bad EIP value.");
fe2de317
MT
14856 break;
14857 }
14858- if (ip == (u8 *)regs->ip)
14859+ if (ip == (u8 *)regs->ip + cs_base)
4c928ab7 14860 printk(KERN_CONT "<%02x> ", c);
fe2de317 14861 else
4c928ab7 14862 printk(KERN_CONT "%02x ", c);
fe2de317
MT
14863@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14864 {
14865 unsigned short ud2;
14866
14867+ ip = ktla_ktva(ip);
14868 if (ip < PAGE_OFFSET)
14869 return 0;
14870 if (probe_kernel_address((unsigned short *)ip, ud2))
14871@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14872
14873 return ud2 == 0x0b0f;
14874 }
14875+
14876+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14877+void pax_check_alloca(unsigned long size)
14878+{
14879+ unsigned long sp = (unsigned long)&sp, stack_left;
14880+
14881+ /* all kernel stacks are of the same size */
14882+ stack_left = sp & (THREAD_SIZE - 1);
14883+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14884+}
14885+EXPORT_SYMBOL(pax_check_alloca);
14886+#endif
14887diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
5e856224 14888index 17107bd..b2deecf 100644
fe2de317
MT
14889--- a/arch/x86/kernel/dumpstack_64.c
14890+++ b/arch/x86/kernel/dumpstack_64.c
14891@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14892 unsigned long *irq_stack_end =
14893 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14894 unsigned used = 0;
14895- struct thread_info *tinfo;
14896 int graph = 0;
14897 unsigned long dummy;
14898+ void *stack_start;
14899
14900 if (!task)
14901 task = current;
14902@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14903 * current stack address. If the stacks consist of nested
14904 * exceptions
14905 */
14906- tinfo = task_thread_info(task);
14907 for (;;) {
14908 char *id;
14909 unsigned long *estack_end;
14910+
14911 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14912 &used, &id);
14913
14914@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14915 if (ops->stack(data, id) < 0)
14916 break;
14917
14918- bp = ops->walk_stack(tinfo, stack, bp, ops,
14919+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14920 data, estack_end, &graph);
14921 ops->stack(data, "<EOE>");
14922 /*
14923@@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14924 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14925 if (ops->stack(data, "IRQ") < 0)
14926 break;
14927- bp = ops->walk_stack(tinfo, stack, bp,
14928+ bp = ops->walk_stack(task, irq_stack, stack, bp,
14929 ops, data, irq_stack_end, &graph);
14930 /*
14931 * We link to the next stack (which would be
14932@@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14933 /*
14934 * This handles the process stack:
14935 */
14936- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14937+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14938+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14939 put_cpu();
14940 }
14941 EXPORT_SYMBOL(dump_trace);
14942@@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
14943
14944 return ud2 == 0x0b0f;
14945 }
14946+
14947+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14948+void pax_check_alloca(unsigned long size)
14949+{
14950+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14951+ unsigned cpu, used;
14952+ char *id;
14953+
14954+ /* check the process stack first */
14955+ stack_start = (unsigned long)task_stack_page(current);
14956+ stack_end = stack_start + THREAD_SIZE;
14957+ if (likely(stack_start <= sp && sp < stack_end)) {
14958+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
14959+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14960+ return;
14961+ }
14962+
14963+ cpu = get_cpu();
14964+
14965+ /* check the irq stacks */
14966+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14967+ stack_start = stack_end - IRQ_STACK_SIZE;
14968+ if (stack_start <= sp && sp < stack_end) {
14969+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14970+ put_cpu();
14971+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14972+ return;
14973+ }
14974+
14975+ /* check the exception stacks */
14976+ used = 0;
14977+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14978+ stack_start = stack_end - EXCEPTION_STKSZ;
14979+ if (stack_end && stack_start <= sp && sp < stack_end) {
14980+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14981+ put_cpu();
14982+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14983+ return;
14984+ }
14985+
14986+ put_cpu();
14987+
14988+ /* unknown stack */
14989+ BUG();
14990+}
14991+EXPORT_SYMBOL(pax_check_alloca);
14992+#endif
14993diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
5e856224 14994index 9b9f18b..9fcaa04 100644
fe2de317
MT
14995--- a/arch/x86/kernel/early_printk.c
14996+++ b/arch/x86/kernel/early_printk.c
66a7e928
MT
14997@@ -7,6 +7,7 @@
14998 #include <linux/pci_regs.h>
14999 #include <linux/pci_ids.h>
15000 #include <linux/errno.h>
15001+#include <linux/sched.h>
15002 #include <asm/io.h>
15003 #include <asm/processor.h>
15004 #include <asm/fcntl.h>
fe2de317 15005diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
5e856224 15006index 7b784f4..db6b628 100644
fe2de317
MT
15007--- a/arch/x86/kernel/entry_32.S
15008+++ b/arch/x86/kernel/entry_32.S
5e856224 15009@@ -179,13 +179,146 @@
bc901d79
MT
15010 /*CFI_REL_OFFSET gs, PT_GS*/
15011 .endm
15012 .macro SET_KERNEL_GS reg
58c5fc13 15013+
bc901d79
MT
15014+#ifdef CONFIG_CC_STACKPROTECTOR
15015 movl $(__KERNEL_STACK_CANARY), \reg
15016+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
15017+ movl $(__USER_DS), \reg
15018+#else
15019+ xorl \reg, \reg
15020+#endif
15021+
15022 movl \reg, %gs
15023 .endm
58c5fc13
MT
15024
15025 #endif /* CONFIG_X86_32_LAZY_GS */
15026
15027-.macro SAVE_ALL
8308f9c9 15028+.macro pax_enter_kernel
df50ba0c 15029+#ifdef CONFIG_PAX_KERNEXEC
8308f9c9 15030+ call pax_enter_kernel
df50ba0c 15031+#endif
8308f9c9
MT
15032+.endm
15033+
15034+.macro pax_exit_kernel
15035+#ifdef CONFIG_PAX_KERNEXEC
15036+ call pax_exit_kernel
df50ba0c
MT
15037+#endif
15038+.endm
15039+
df50ba0c 15040+#ifdef CONFIG_PAX_KERNEXEC
8308f9c9 15041+ENTRY(pax_enter_kernel)
df50ba0c 15042+#ifdef CONFIG_PARAVIRT
66a7e928
MT
15043+ pushl %eax
15044+ pushl %ecx
df50ba0c
MT
15045+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
15046+ mov %eax, %esi
15047+#else
15048+ mov %cr0, %esi
15049+#endif
15050+ bts $16, %esi
15051+ jnc 1f
15052+ mov %cs, %esi
15053+ cmp $__KERNEL_CS, %esi
15054+ jz 3f
15055+ ljmp $__KERNEL_CS, $3f
15056+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
15057+2:
15058+#ifdef CONFIG_PARAVIRT
15059+ mov %esi, %eax
15060+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
15061+#else
15062+ mov %esi, %cr0
15063+#endif
15064+3:
15065+#ifdef CONFIG_PARAVIRT
66a7e928
MT
15066+ popl %ecx
15067+ popl %eax
df50ba0c 15068+#endif
8308f9c9
MT
15069+ ret
15070+ENDPROC(pax_enter_kernel)
15071+
15072+ENTRY(pax_exit_kernel)
15073+#ifdef CONFIG_PARAVIRT
66a7e928
MT
15074+ pushl %eax
15075+ pushl %ecx
8308f9c9
MT
15076+#endif
15077+ mov %cs, %esi
15078+ cmp $__KERNEXEC_KERNEL_CS, %esi
15079+ jnz 2f
15080+#ifdef CONFIG_PARAVIRT
15081+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
15082+ mov %eax, %esi
15083+#else
15084+ mov %cr0, %esi
15085+#endif
15086+ btr $16, %esi
15087+ ljmp $__KERNEL_CS, $1f
15088+1:
15089+#ifdef CONFIG_PARAVIRT
15090+ mov %esi, %eax
15091+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
15092+#else
15093+ mov %esi, %cr0
15094+#endif
15095+2:
15096+#ifdef CONFIG_PARAVIRT
66a7e928
MT
15097+ popl %ecx
15098+ popl %eax
8308f9c9
MT
15099+#endif
15100+ ret
15101+ENDPROC(pax_exit_kernel)
15102+#endif
15103+
15104+.macro pax_erase_kstack
15105+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15106+ call pax_erase_kstack
df50ba0c
MT
15107+#endif
15108+.endm
15109+
8308f9c9 15110+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
66a7e928
MT
15111+/*
15112+ * ebp: thread_info
15113+ * ecx, edx: can be clobbered
15114+ */
8308f9c9 15115+ENTRY(pax_erase_kstack)
66a7e928
MT
15116+ pushl %edi
15117+ pushl %eax
8308f9c9 15118+
66a7e928 15119+ mov TI_lowest_stack(%ebp), %edi
8308f9c9
MT
15120+ mov $-0xBEEF, %eax
15121+ std
66a7e928
MT
15122+
15123+1: mov %edi, %ecx
8308f9c9
MT
15124+ and $THREAD_SIZE_asm - 1, %ecx
15125+ shr $2, %ecx
15126+ repne scasl
15127+ jecxz 2f
66a7e928
MT
15128+
15129+ cmp $2*16, %ecx
8308f9c9 15130+ jc 2f
66a7e928
MT
15131+
15132+ mov $2*16, %ecx
8308f9c9
MT
15133+ repe scasl
15134+ jecxz 2f
15135+ jne 1b
66a7e928
MT
15136+
15137+2: cld
8308f9c9
MT
15138+ mov %esp, %ecx
15139+ sub %edi, %ecx
15140+ shr $2, %ecx
15141+ rep stosl
15142+
66a7e928
MT
15143+ mov TI_task_thread_sp0(%ebp), %edi
15144+ sub $128, %edi
15145+ mov %edi, TI_lowest_stack(%ebp)
15146+
15147+ popl %eax
15148+ popl %edi
8308f9c9
MT
15149+ ret
15150+ENDPROC(pax_erase_kstack)
15151+#endif
15152+
58c5fc13
MT
15153+.macro __SAVE_ALL _DS
15154 cld
15155 PUSH_GS
bc901d79 15156 pushl_cfi %fs
5e856224 15157@@ -208,7 +341,7 @@
bc901d79
MT
15158 CFI_REL_OFFSET ecx, 0
15159 pushl_cfi %ebx
58c5fc13
MT
15160 CFI_REL_OFFSET ebx, 0
15161- movl $(__USER_DS), %edx
15162+ movl $\_DS, %edx
15163 movl %edx, %ds
15164 movl %edx, %es
15165 movl $(__KERNEL_PERCPU), %edx
5e856224 15166@@ -216,6 +349,15 @@
58c5fc13
MT
15167 SET_KERNEL_GS %edx
15168 .endm
15169
15170+.macro SAVE_ALL
ae4e228f 15171+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
58c5fc13 15172+ __SAVE_ALL __KERNEL_DS
8308f9c9 15173+ pax_enter_kernel
58c5fc13
MT
15174+#else
15175+ __SAVE_ALL __USER_DS
15176+#endif
15177+.endm
15178+
15179 .macro RESTORE_INT_REGS
bc901d79
MT
15180 popl_cfi %ebx
15181 CFI_RESTORE ebx
5e856224 15182@@ -301,7 +443,7 @@ ENTRY(ret_from_fork)
6e9df6a3
MT
15183 popfl_cfi
15184 jmp syscall_exit
15185 CFI_ENDPROC
15186-END(ret_from_fork)
15187+ENDPROC(ret_from_fork)
15188
15189 /*
15190 * Interrupt exit functions should be protected against kprobes
5e856224
MT
15191@@ -335,7 +477,15 @@ resume_userspace_sig:
15192 andl $SEGMENT_RPL_MASK, %eax
15193 #endif
58c5fc13
MT
15194 cmpl $USER_RPL, %eax
15195+
15196+#ifdef CONFIG_PAX_KERNEXEC
15197+ jae resume_userspace
ae4e228f 15198+
5e856224 15199+ pax_exit_kernel
58c5fc13
MT
15200+ jmp resume_kernel
15201+#else
15202 jb resume_kernel # not returning to v8086 or userspace
15203+#endif
15204
15205 ENTRY(resume_userspace)
15206 LOCKDEP_SYS_EXIT
5e856224 15207@@ -347,8 +497,8 @@ ENTRY(resume_userspace)
66a7e928
MT
15208 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15209 # int/exception return?
15210 jne work_pending
15211- jmp restore_all
6e9df6a3 15212-END(ret_from_exception)
66a7e928 15213+ jmp restore_all_pax
6e9df6a3 15214+ENDPROC(ret_from_exception)
66a7e928
MT
15215
15216 #ifdef CONFIG_PREEMPT
6e9df6a3 15217 ENTRY(resume_kernel)
5e856224 15218@@ -363,7 +513,7 @@ need_resched:
6e9df6a3
MT
15219 jz restore_all
15220 call preempt_schedule_irq
15221 jmp need_resched
15222-END(resume_kernel)
15223+ENDPROC(resume_kernel)
15224 #endif
15225 CFI_ENDPROC
15226 /*
5e856224 15227@@ -397,23 +547,34 @@ sysenter_past_esp:
58c5fc13
MT
15228 /*CFI_REL_OFFSET cs, 0*/
15229 /*
15230 * Push current_thread_info()->sysenter_return to the stack.
15231- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15232- * pushed above; +8 corresponds to copy_thread's esp0 setting.
15233 */
66a7e928 15234- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
71d190be 15235+ pushl_cfi $0
58c5fc13
MT
15236 CFI_REL_OFFSET eip, 0
15237
bc901d79 15238 pushl_cfi %eax
71d190be
MT
15239 SAVE_ALL
15240+ GET_THREAD_INFO(%ebp)
15241+ movl TI_sysenter_return(%ebp),%ebp
15242+ movl %ebp,PT_EIP(%esp)
15243 ENABLE_INTERRUPTS(CLBR_NONE)
15244
15245 /*
58c5fc13
MT
15246 * Load the potential sixth argument from user stack.
15247 * Careful about security.
15248 */
15249+ movl PT_OLDESP(%esp),%ebp
15250+
15251+#ifdef CONFIG_PAX_MEMORY_UDEREF
15252+ mov PT_OLDSS(%esp),%ds
15253+1: movl %ds:(%ebp),%ebp
15254+ push %ss
15255+ pop %ds
15256+#else
15257 cmpl $__PAGE_OFFSET-3,%ebp
15258 jae syscall_fault
15259 1: movl (%ebp),%ebp
15260+#endif
15261+
15262 movl %ebp,PT_EBP(%esp)
15263 .section __ex_table,"a"
15264 .align 4
5e856224 15265@@ -436,12 +597,24 @@ sysenter_do_call:
58c5fc13
MT
15266 testl $_TIF_ALLWORK_MASK, %ecx
15267 jne sysexit_audit
15268 sysenter_exit:
15269+
15270+#ifdef CONFIG_PAX_RANDKSTACK
8308f9c9 15271+ pushl_cfi %eax
15a11c5b 15272+ movl %esp, %eax
58c5fc13 15273+ call pax_randomize_kstack
8308f9c9
MT
15274+ popl_cfi %eax
15275+#endif
15276+
66a7e928 15277+ pax_erase_kstack
58c5fc13
MT
15278+
15279 /* if something modifies registers it must also disable sysexit */
15280 movl PT_EIP(%esp), %edx
15281 movl PT_OLDESP(%esp), %ecx
15282 xorl %ebp,%ebp
15283 TRACE_IRQS_ON
15284 1: mov PT_FS(%esp), %fs
15285+2: mov PT_DS(%esp), %ds
15286+3: mov PT_ES(%esp), %es
15287 PTGS_TO_GS
15288 ENABLE_INTERRUPTS_SYSEXIT
15289
5e856224 15290@@ -458,6 +631,9 @@ sysenter_audit:
66a7e928
MT
15291 movl %eax,%edx /* 2nd arg: syscall number */
15292 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
5e856224 15293 call __audit_syscall_entry
66a7e928
MT
15294+
15295+ pax_erase_kstack
15296+
15297 pushl_cfi %ebx
15298 movl PT_EAX(%esp),%eax /* reload syscall number */
15299 jmp sysenter_do_call
5e856224 15300@@ -483,11 +659,17 @@ sysexit_audit:
58c5fc13
MT
15301
15302 CFI_ENDPROC
15303 .pushsection .fixup,"ax"
15304-2: movl $0,PT_FS(%esp)
15305+4: movl $0,PT_FS(%esp)
15306+ jmp 1b
15307+5: movl $0,PT_DS(%esp)
15308+ jmp 1b
15309+6: movl $0,PT_ES(%esp)
15310 jmp 1b
15311 .section __ex_table,"a"
15312 .align 4
15313- .long 1b,2b
15314+ .long 1b,4b
15315+ .long 2b,5b
15316+ .long 3b,6b
15317 .popsection
15318 PTGS_TO_GS_EX
15319 ENDPROC(ia32_sysenter_target)
5e856224 15320@@ -520,6 +702,15 @@ syscall_exit:
58c5fc13
MT
15321 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15322 jne syscall_exit_work
15323
66a7e928
MT
15324+restore_all_pax:
15325+
58c5fc13 15326+#ifdef CONFIG_PAX_RANDKSTACK
15a11c5b 15327+ movl %esp, %eax
58c5fc13
MT
15328+ call pax_randomize_kstack
15329+#endif
8308f9c9 15330+
8308f9c9 15331+ pax_erase_kstack
58c5fc13
MT
15332+
15333 restore_all:
15334 TRACE_IRQS_IRET
15335 restore_all_notrace:
5e856224 15336@@ -579,14 +770,34 @@ ldt_ss:
6892158b
MT
15337 * compensating for the offset by changing to the ESPFIX segment with
15338 * a base address that matches for the difference.
15339 */
15340-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
15341+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
15342 mov %esp, %edx /* load kernel esp */
58c5fc13
MT
15343 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15344 mov %dx, %ax /* eax: new kernel esp */
15345 sub %eax, %edx /* offset (low word is 0) */
58c5fc13
MT
15346+#ifdef CONFIG_SMP
15347+ movl PER_CPU_VAR(cpu_number), %ebx
15348+ shll $PAGE_SHIFT_asm, %ebx
15349+ addl $cpu_gdt_table, %ebx
15350+#else
15351+ movl $cpu_gdt_table, %ebx
15352+#endif
15353 shr $16, %edx
6892158b
MT
15354- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
15355- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
15a11c5b
MT
15356+
15357+#ifdef CONFIG_PAX_KERNEXEC
15358+ mov %cr0, %esi
15359+ btr $16, %esi
15360+ mov %esi, %cr0
15361+#endif
15362+
6892158b
MT
15363+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
15364+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
15a11c5b
MT
15365+
15366+#ifdef CONFIG_PAX_KERNEXEC
15367+ bts $16, %esi
15368+ mov %esi, %cr0
15369+#endif
15370+
bc901d79
MT
15371 pushl_cfi $__ESPFIX_SS
15372 pushl_cfi %eax /* new kernel esp */
15373 /* Disable interrupts, but do not irqtrace this section: we
5e856224 15374@@ -615,38 +826,30 @@ work_resched:
66a7e928
MT
15375 movl TI_flags(%ebp), %ecx
15376 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15377 # than syscall tracing?
15378- jz restore_all
15379+ jz restore_all_pax
15380 testb $_TIF_NEED_RESCHED, %cl
15381 jnz work_resched
58c5fc13
MT
15382
15383 work_notifysig: # deal with pending signals and
15384 # notify-resume requests
15385+ movl %esp, %eax
15386 #ifdef CONFIG_VM86
15387 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15388- movl %esp, %eax
15389- jne work_notifysig_v86 # returning to kernel-space or
15390+ jz 1f # returning to kernel-space or
15391 # vm86-space
5e856224
MT
15392- TRACE_IRQS_ON
15393- ENABLE_INTERRUPTS(CLBR_NONE)
58c5fc13
MT
15394- xorl %edx, %edx
15395- call do_notify_resume
15396- jmp resume_userspace_sig
15397
15398- ALIGN
15399-work_notifysig_v86:
bc901d79 15400 pushl_cfi %ecx # save ti_flags for do_notify_resume
58c5fc13 15401 call save_v86_state # %eax contains pt_regs pointer
bc901d79 15402 popl_cfi %ecx
58c5fc13
MT
15403 movl %eax, %esp
15404-#else
15405- movl %esp, %eax
15406+1:
15407 #endif
5e856224
MT
15408 TRACE_IRQS_ON
15409 ENABLE_INTERRUPTS(CLBR_NONE)
58c5fc13
MT
15410 xorl %edx, %edx
15411 call do_notify_resume
6e9df6a3
MT
15412 jmp resume_userspace_sig
15413-END(work_pending)
15414+ENDPROC(work_pending)
15415
15416 # perform syscall exit tracing
15417 ALIGN
5e856224 15418@@ -654,11 +857,14 @@ syscall_trace_entry:
66a7e928
MT
15419 movl $-ENOSYS,PT_EAX(%esp)
15420 movl %esp, %eax
15421 call syscall_trace_enter
15422+
15423+ pax_erase_kstack
15424+
15425 /* What it returned is what we'll actually use. */
5e856224 15426 cmpl $(NR_syscalls), %eax
66a7e928 15427 jnae syscall_call
6e9df6a3
MT
15428 jmp syscall_exit
15429-END(syscall_trace_entry)
15430+ENDPROC(syscall_trace_entry)
15431
15432 # perform syscall exit tracing
15433 ALIGN
5e856224 15434@@ -671,20 +877,24 @@ syscall_exit_work:
6e9df6a3
MT
15435 movl %esp, %eax
15436 call syscall_trace_leave
15437 jmp resume_userspace
15438-END(syscall_exit_work)
15439+ENDPROC(syscall_exit_work)
15440 CFI_ENDPROC
58c5fc13
MT
15441
15442 RING0_INT_FRAME # can't unwind into user space anyway
15443 syscall_fault:
15444+#ifdef CONFIG_PAX_MEMORY_UDEREF
15445+ push %ss
15446+ pop %ds
15447+#endif
15448 GET_THREAD_INFO(%ebp)
15449 movl $-EFAULT,PT_EAX(%esp)
15450 jmp resume_userspace
6e9df6a3
MT
15451-END(syscall_fault)
15452+ENDPROC(syscall_fault)
15453
15454 syscall_badsys:
15455 movl $-ENOSYS,PT_EAX(%esp)
15456 jmp resume_userspace
15457-END(syscall_badsys)
15458+ENDPROC(syscall_badsys)
15459 CFI_ENDPROC
15460 /*
15461 * End of kprobes section
5e856224 15462@@ -756,6 +966,36 @@ ENTRY(ptregs_clone)
bc901d79
MT
15463 CFI_ENDPROC
15464 ENDPROC(ptregs_clone)
15465
15466+ ALIGN;
15467+ENTRY(kernel_execve)
15468+ CFI_STARTPROC
15469+ pushl_cfi %ebp
15470+ sub $PT_OLDSS+4,%esp
15471+ pushl_cfi %edi
15472+ pushl_cfi %ecx
15473+ pushl_cfi %eax
15474+ lea 3*4(%esp),%edi
15475+ mov $PT_OLDSS/4+1,%ecx
15476+ xorl %eax,%eax
15477+ rep stosl
15478+ popl_cfi %eax
15479+ popl_cfi %ecx
15480+ popl_cfi %edi
15481+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15482+ pushl_cfi %esp
15483+ call sys_execve
15484+ add $4,%esp
15485+ CFI_ADJUST_CFA_OFFSET -4
15486+ GET_THREAD_INFO(%ebp)
15487+ test %eax,%eax
15488+ jz syscall_exit
15489+ add $PT_OLDSS+4,%esp
15490+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
15491+ popl_cfi %ebp
15492+ ret
15493+ CFI_ENDPROC
15494+ENDPROC(kernel_execve)
15495+
15496 .macro FIXUP_ESPFIX_STACK
15497 /*
15498 * Switch back for ESPFIX stack to the normal zerobased stack
5e856224 15499@@ -765,8 +1005,15 @@ ENDPROC(ptregs_clone)
58c5fc13
MT
15500 * normal stack and adjusts ESP with the matching offset.
15501 */
15502 /* fixup the stack */
6892158b
MT
15503- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
15504- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
58c5fc13
MT
15505+#ifdef CONFIG_SMP
15506+ movl PER_CPU_VAR(cpu_number), %ebx
15507+ shll $PAGE_SHIFT_asm, %ebx
15508+ addl $cpu_gdt_table, %ebx
15509+#else
15510+ movl $cpu_gdt_table, %ebx
15511+#endif
6892158b
MT
15512+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
15513+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
58c5fc13 15514 shl $16, %eax
6892158b 15515 addl %esp, %eax /* the adjusted stack pointer */
bc901d79 15516 pushl_cfi $__KERNEL_DS
5e856224 15517@@ -819,7 +1066,7 @@ vector=vector+1
6e9df6a3
MT
15518 .endr
15519 2: jmp common_interrupt
15520 .endr
15521-END(irq_entries_start)
15522+ENDPROC(irq_entries_start)
15523
15524 .previous
15525 END(interrupt)
5e856224 15526@@ -867,7 +1114,7 @@ ENTRY(coprocessor_error)
6e9df6a3
MT
15527 pushl_cfi $do_coprocessor_error
15528 jmp error_code
15529 CFI_ENDPROC
15530-END(coprocessor_error)
15531+ENDPROC(coprocessor_error)
15532
15533 ENTRY(simd_coprocessor_error)
15534 RING0_INT_FRAME
5e856224 15535@@ -888,7 +1135,7 @@ ENTRY(simd_coprocessor_error)
6e9df6a3
MT
15536 #endif
15537 jmp error_code
15538 CFI_ENDPROC
15539-END(simd_coprocessor_error)
15540+ENDPROC(simd_coprocessor_error)
15541
15542 ENTRY(device_not_available)
15543 RING0_INT_FRAME
5e856224 15544@@ -896,7 +1143,7 @@ ENTRY(device_not_available)
6e9df6a3
MT
15545 pushl_cfi $do_device_not_available
15546 jmp error_code
15547 CFI_ENDPROC
15548-END(device_not_available)
15549+ENDPROC(device_not_available)
15550
15551 #ifdef CONFIG_PARAVIRT
15552 ENTRY(native_iret)
5e856224 15553@@ -905,12 +1152,12 @@ ENTRY(native_iret)
6e9df6a3
MT
15554 .align 4
15555 .long native_iret, iret_exc
15556 .previous
15557-END(native_iret)
15558+ENDPROC(native_iret)
15559
15560 ENTRY(native_irq_enable_sysexit)
15561 sti
15562 sysexit
15563-END(native_irq_enable_sysexit)
15564+ENDPROC(native_irq_enable_sysexit)
15565 #endif
15566
15567 ENTRY(overflow)
5e856224 15568@@ -919,7 +1166,7 @@ ENTRY(overflow)
6e9df6a3
MT
15569 pushl_cfi $do_overflow
15570 jmp error_code
15571 CFI_ENDPROC
15572-END(overflow)
15573+ENDPROC(overflow)
15574
15575 ENTRY(bounds)
15576 RING0_INT_FRAME
5e856224 15577@@ -927,7 +1174,7 @@ ENTRY(bounds)
6e9df6a3
MT
15578 pushl_cfi $do_bounds
15579 jmp error_code
15580 CFI_ENDPROC
15581-END(bounds)
15582+ENDPROC(bounds)
15583
15584 ENTRY(invalid_op)
15585 RING0_INT_FRAME
5e856224 15586@@ -935,7 +1182,7 @@ ENTRY(invalid_op)
6e9df6a3
MT
15587 pushl_cfi $do_invalid_op
15588 jmp error_code
15589 CFI_ENDPROC
15590-END(invalid_op)
15591+ENDPROC(invalid_op)
15592
15593 ENTRY(coprocessor_segment_overrun)
15594 RING0_INT_FRAME
5e856224 15595@@ -943,35 +1190,35 @@ ENTRY(coprocessor_segment_overrun)
6e9df6a3
MT
15596 pushl_cfi $do_coprocessor_segment_overrun
15597 jmp error_code
15598 CFI_ENDPROC
15599-END(coprocessor_segment_overrun)
15600+ENDPROC(coprocessor_segment_overrun)
15601
15602 ENTRY(invalid_TSS)
15603 RING0_EC_FRAME
15604 pushl_cfi $do_invalid_TSS
15605 jmp error_code
15606 CFI_ENDPROC
15607-END(invalid_TSS)
15608+ENDPROC(invalid_TSS)
15609
15610 ENTRY(segment_not_present)
15611 RING0_EC_FRAME
15612 pushl_cfi $do_segment_not_present
15613 jmp error_code
15614 CFI_ENDPROC
15615-END(segment_not_present)
15616+ENDPROC(segment_not_present)
15617
15618 ENTRY(stack_segment)
15619 RING0_EC_FRAME
15620 pushl_cfi $do_stack_segment
15621 jmp error_code
15622 CFI_ENDPROC
15623-END(stack_segment)
15624+ENDPROC(stack_segment)
15625
15626 ENTRY(alignment_check)
15627 RING0_EC_FRAME
15628 pushl_cfi $do_alignment_check
15629 jmp error_code
15630 CFI_ENDPROC
15631-END(alignment_check)
15632+ENDPROC(alignment_check)
15633
15634 ENTRY(divide_error)
15635 RING0_INT_FRAME
5e856224 15636@@ -979,7 +1226,7 @@ ENTRY(divide_error)
6e9df6a3
MT
15637 pushl_cfi $do_divide_error
15638 jmp error_code
15639 CFI_ENDPROC
15640-END(divide_error)
15641+ENDPROC(divide_error)
15642
15643 #ifdef CONFIG_X86_MCE
15644 ENTRY(machine_check)
5e856224 15645@@ -988,7 +1235,7 @@ ENTRY(machine_check)
6e9df6a3
MT
15646 pushl_cfi machine_check_vector
15647 jmp error_code
15648 CFI_ENDPROC
15649-END(machine_check)
15650+ENDPROC(machine_check)
15651 #endif
15652
15653 ENTRY(spurious_interrupt_bug)
5e856224 15654@@ -997,7 +1244,7 @@ ENTRY(spurious_interrupt_bug)
6e9df6a3
MT
15655 pushl_cfi $do_spurious_interrupt_bug
15656 jmp error_code
15657 CFI_ENDPROC
15658-END(spurious_interrupt_bug)
15659+ENDPROC(spurious_interrupt_bug)
15660 /*
15661 * End of kprobes section
15662 */
5e856224 15663@@ -1112,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
6e9df6a3
MT
15664
15665 ENTRY(mcount)
15666 ret
15667-END(mcount)
15668+ENDPROC(mcount)
15669
15670 ENTRY(ftrace_caller)
15671 cmpl $0, function_trace_stop
5e856224 15672@@ -1141,7 +1388,7 @@ ftrace_graph_call:
6e9df6a3
MT
15673 .globl ftrace_stub
15674 ftrace_stub:
15675 ret
15676-END(ftrace_caller)
15677+ENDPROC(ftrace_caller)
15678
15679 #else /* ! CONFIG_DYNAMIC_FTRACE */
15680
5e856224 15681@@ -1177,7 +1424,7 @@ trace:
6e9df6a3
MT
15682 popl %ecx
15683 popl %eax
15684 jmp ftrace_stub
15685-END(mcount)
15686+ENDPROC(mcount)
15687 #endif /* CONFIG_DYNAMIC_FTRACE */
15688 #endif /* CONFIG_FUNCTION_TRACER */
15689
5e856224 15690@@ -1198,7 +1445,7 @@ ENTRY(ftrace_graph_caller)
6e9df6a3
MT
15691 popl %ecx
15692 popl %eax
15693 ret
15694-END(ftrace_graph_caller)
15695+ENDPROC(ftrace_graph_caller)
15696
15697 .globl return_to_handler
15698 return_to_handler:
5e856224 15699@@ -1253,15 +1500,18 @@ error_code:
58c5fc13
MT
15700 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15701 REG_TO_PTGS %ecx
15702 SET_KERNEL_GS %ecx
15703- movl $(__USER_DS), %ecx
15704+ movl $(__KERNEL_DS), %ecx
15705 movl %ecx, %ds
15706 movl %ecx, %es
df50ba0c 15707+
8308f9c9 15708+ pax_enter_kernel
df50ba0c 15709+
58c5fc13 15710 TRACE_IRQS_OFF
df50ba0c
MT
15711 movl %esp,%eax # pt_regs pointer
15712 call *%edi
6e9df6a3
MT
15713 jmp ret_from_exception
15714 CFI_ENDPROC
15715-END(page_fault)
15716+ENDPROC(page_fault)
15717
15718 /*
15719 * Debug traps and NMI can happen at the one SYSENTER instruction
5e856224 15720@@ -1303,7 +1553,7 @@ debug_stack_correct:
6e9df6a3
MT
15721 call do_debug
15722 jmp ret_from_exception
15723 CFI_ENDPROC
15724-END(debug)
15725+ENDPROC(debug)
15726
15727 /*
15728 * NMI is doubly nasty. It can happen _while_ we're handling
5e856224 15729@@ -1340,6 +1590,9 @@ nmi_stack_correct:
58c5fc13
MT
15730 xorl %edx,%edx # zero error code
15731 movl %esp,%eax # pt_regs pointer
15732 call do_nmi
15733+
8308f9c9 15734+ pax_exit_kernel
58c5fc13
MT
15735+
15736 jmp restore_all_notrace
15737 CFI_ENDPROC
15738
5e856224 15739@@ -1376,12 +1629,15 @@ nmi_espfix_stack:
58c5fc13
MT
15740 FIXUP_ESPFIX_STACK # %eax == %esp
15741 xorl %edx,%edx # zero error code
15742 call do_nmi
15743+
8308f9c9 15744+ pax_exit_kernel
58c5fc13
MT
15745+
15746 RESTORE_REGS
15747 lss 12+4(%esp), %esp # back to espfix stack
15748 CFI_ADJUST_CFA_OFFSET -24
6e9df6a3
MT
15749 jmp irq_return
15750 CFI_ENDPROC
15751-END(nmi)
15752+ENDPROC(nmi)
15753
15754 ENTRY(int3)
15755 RING0_INT_FRAME
5e856224 15756@@ -1393,14 +1649,14 @@ ENTRY(int3)
6e9df6a3
MT
15757 call do_int3
15758 jmp ret_from_exception
15759 CFI_ENDPROC
15760-END(int3)
15761+ENDPROC(int3)
15762
15763 ENTRY(general_protection)
15764 RING0_EC_FRAME
15765 pushl_cfi $do_general_protection
15766 jmp error_code
15767 CFI_ENDPROC
15768-END(general_protection)
15769+ENDPROC(general_protection)
15770
15771 #ifdef CONFIG_KVM_GUEST
15772 ENTRY(async_page_fault)
5e856224 15773@@ -1408,7 +1664,7 @@ ENTRY(async_page_fault)
6e9df6a3
MT
15774 pushl_cfi $do_async_page_fault
15775 jmp error_code
15776 CFI_ENDPROC
15777-END(async_page_fault)
15778+ENDPROC(async_page_fault)
15779 #endif
15780
15781 /*
fe2de317 15782diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
5e856224 15783index 1333d98..b340ca2 100644
fe2de317
MT
15784--- a/arch/x86/kernel/entry_64.S
15785+++ b/arch/x86/kernel/entry_64.S
5e856224 15786@@ -56,6 +56,8 @@
ae4e228f
MT
15787 #include <asm/ftrace.h>
15788 #include <asm/percpu.h>
5e856224 15789 #include <linux/err.h>
ae4e228f 15790+#include <asm/pgtable.h>
6e9df6a3 15791+#include <asm/alternative-asm.h>
ae4e228f
MT
15792
15793 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15794 #include <linux/elf-em.h>
5e856224 15795@@ -69,8 +71,9 @@
6e9df6a3
MT
15796 #ifdef CONFIG_FUNCTION_TRACER
15797 #ifdef CONFIG_DYNAMIC_FTRACE
15798 ENTRY(mcount)
15799+ pax_force_retaddr
15800 retq
15801-END(mcount)
15802+ENDPROC(mcount)
15803
15804 ENTRY(ftrace_caller)
15805 cmpl $0, function_trace_stop
5e856224 15806@@ -93,8 +96,9 @@ GLOBAL(ftrace_graph_call)
6e9df6a3
MT
15807 #endif
15808
15809 GLOBAL(ftrace_stub)
15810+ pax_force_retaddr
15811 retq
15812-END(ftrace_caller)
15813+ENDPROC(ftrace_caller)
15814
15815 #else /* ! CONFIG_DYNAMIC_FTRACE */
15816 ENTRY(mcount)
5e856224 15817@@ -113,6 +117,7 @@ ENTRY(mcount)
6e9df6a3
MT
15818 #endif
15819
15820 GLOBAL(ftrace_stub)
15821+ pax_force_retaddr
15822 retq
15823
15824 trace:
5e856224 15825@@ -122,12 +127,13 @@ trace:
6e9df6a3
MT
15826 movq 8(%rbp), %rsi
15827 subq $MCOUNT_INSN_SIZE, %rdi
15828
15829+ pax_force_fptr ftrace_trace_function
15830 call *ftrace_trace_function
15831
15832 MCOUNT_RESTORE_FRAME
15833
15834 jmp ftrace_stub
15835-END(mcount)
15836+ENDPROC(mcount)
15837 #endif /* CONFIG_DYNAMIC_FTRACE */
15838 #endif /* CONFIG_FUNCTION_TRACER */
15839
5e856224 15840@@ -147,8 +153,9 @@ ENTRY(ftrace_graph_caller)
6e9df6a3
MT
15841
15842 MCOUNT_RESTORE_FRAME
15843
15844+ pax_force_retaddr
15845 retq
15846-END(ftrace_graph_caller)
15847+ENDPROC(ftrace_graph_caller)
15848
15849 GLOBAL(return_to_handler)
15850 subq $24, %rsp
5e856224 15851@@ -164,6 +171,7 @@ GLOBAL(return_to_handler)
6e9df6a3
MT
15852 movq 8(%rsp), %rdx
15853 movq (%rsp), %rax
15854 addq $24, %rsp
15855+ pax_force_fptr %rdi
15856 jmp *%rdi
15857 #endif
15858
5e856224 15859@@ -179,6 +187,282 @@ ENTRY(native_usergs_sysret64)
df50ba0c
MT
15860 ENDPROC(native_usergs_sysret64)
15861 #endif /* CONFIG_PARAVIRT */
15862
15863+ .macro ljmpq sel, off
8308f9c9 15864+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
df50ba0c
MT
15865+ .byte 0x48; ljmp *1234f(%rip)
15866+ .pushsection .rodata
15867+ .align 16
15868+ 1234: .quad \off; .word \sel
15869+ .popsection
15870+#else
66a7e928
MT
15871+ pushq $\sel
15872+ pushq $\off
df50ba0c
MT
15873+ lretq
15874+#endif
15875+ .endm
15876+
317566c1 15877+ .macro pax_enter_kernel
fe2de317 15878+ pax_set_fptr_mask
317566c1
MT
15879+#ifdef CONFIG_PAX_KERNEXEC
15880+ call pax_enter_kernel
15881+#endif
15882+ .endm
15883+
15884+ .macro pax_exit_kernel
15885+#ifdef CONFIG_PAX_KERNEXEC
15886+ call pax_exit_kernel
15887+#endif
15888+ .endm
df50ba0c
MT
15889+
15890+#ifdef CONFIG_PAX_KERNEXEC
317566c1 15891+ENTRY(pax_enter_kernel)
66a7e928 15892+ pushq %rdi
df50ba0c
MT
15893+
15894+#ifdef CONFIG_PARAVIRT
15895+ PV_SAVE_REGS(CLBR_RDI)
15896+#endif
15897+
15898+ GET_CR0_INTO_RDI
15899+ bts $16,%rdi
fe2de317 15900+ jnc 3f
df50ba0c
MT
15901+ mov %cs,%edi
15902+ cmp $__KERNEL_CS,%edi
fe2de317
MT
15903+ jnz 2f
15904+1:
df50ba0c
MT
15905+
15906+#ifdef CONFIG_PARAVIRT
15907+ PV_RESTORE_REGS(CLBR_RDI)
15908+#endif
15909+
66a7e928 15910+ popq %rdi
6e9df6a3 15911+ pax_force_retaddr
df50ba0c 15912+ retq
fe2de317
MT
15913+
15914+2: ljmpq __KERNEL_CS,1f
15915+3: ljmpq __KERNEXEC_KERNEL_CS,4f
15916+4: SET_RDI_INTO_CR0
15917+ jmp 1b
df50ba0c
MT
15918+ENDPROC(pax_enter_kernel)
15919+
15920+ENTRY(pax_exit_kernel)
66a7e928 15921+ pushq %rdi
df50ba0c
MT
15922+
15923+#ifdef CONFIG_PARAVIRT
15924+ PV_SAVE_REGS(CLBR_RDI)
15925+#endif
15926+
15927+ mov %cs,%rdi
15928+ cmp $__KERNEXEC_KERNEL_CS,%edi
fe2de317
MT
15929+ jz 2f
15930+1:
15931+
15932+#ifdef CONFIG_PARAVIRT
15933+ PV_RESTORE_REGS(CLBR_RDI);
15934+#endif
15935+
15936+ popq %rdi
15937+ pax_force_retaddr
15938+ retq
df50ba0c 15939+
fe2de317
MT
15940+2: GET_CR0_INTO_RDI
15941+ btr $16,%rdi
15942+ ljmpq __KERNEL_CS,3f
15943+3: SET_RDI_INTO_CR0
15944+ jmp 1b
df50ba0c
MT
15945+#ifdef CONFIG_PARAVIRT
15946+ PV_RESTORE_REGS(CLBR_RDI);
15947+#endif
15948+
66a7e928 15949+ popq %rdi
6e9df6a3 15950+ pax_force_retaddr
df50ba0c
MT
15951+ retq
15952+ENDPROC(pax_exit_kernel)
317566c1 15953+#endif
df50ba0c 15954+
317566c1 15955+ .macro pax_enter_kernel_user
fe2de317 15956+ pax_set_fptr_mask
317566c1
MT
15957+#ifdef CONFIG_PAX_MEMORY_UDEREF
15958+ call pax_enter_kernel_user
15959+#endif
15960+ .endm
df50ba0c 15961+
317566c1 15962+ .macro pax_exit_kernel_user
df50ba0c 15963+#ifdef CONFIG_PAX_MEMORY_UDEREF
317566c1
MT
15964+ call pax_exit_kernel_user
15965+#endif
71d190be 15966+#ifdef CONFIG_PAX_RANDKSTACK
4c928ab7 15967+ pushq %rax
71d190be 15968+ call pax_randomize_kstack
4c928ab7 15969+ popq %rax
71d190be 15970+#endif
317566c1
MT
15971+ .endm
15972+
15973+#ifdef CONFIG_PAX_MEMORY_UDEREF
15974+ENTRY(pax_enter_kernel_user)
66a7e928
MT
15975+ pushq %rdi
15976+ pushq %rbx
df50ba0c
MT
15977+
15978+#ifdef CONFIG_PARAVIRT
15979+ PV_SAVE_REGS(CLBR_RDI)
15980+#endif
15981+
15982+ GET_CR3_INTO_RDI
15983+ mov %rdi,%rbx
15984+ add $__START_KERNEL_map,%rbx
15985+ sub phys_base(%rip),%rbx
15986+
15987+#ifdef CONFIG_PARAVIRT
66a7e928 15988+ pushq %rdi
df50ba0c
MT
15989+ cmpl $0, pv_info+PARAVIRT_enabled
15990+ jz 1f
15991+ i = 0
15992+ .rept USER_PGD_PTRS
15993+ mov i*8(%rbx),%rsi
15994+ mov $0,%sil
15995+ lea i*8(%rbx),%rdi
15a11c5b 15996+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
df50ba0c
MT
15997+ i = i + 1
15998+ .endr
15999+ jmp 2f
16000+1:
16001+#endif
16002+
16003+ i = 0
16004+ .rept USER_PGD_PTRS
16005+ movb $0,i*8(%rbx)
16006+ i = i + 1
16007+ .endr
16008+
16009+#ifdef CONFIG_PARAVIRT
66a7e928 16010+2: popq %rdi
df50ba0c
MT
16011+#endif
16012+ SET_RDI_INTO_CR3
16013+
16014+#ifdef CONFIG_PAX_KERNEXEC
16015+ GET_CR0_INTO_RDI
16016+ bts $16,%rdi
16017+ SET_RDI_INTO_CR0
16018+#endif
16019+
16020+#ifdef CONFIG_PARAVIRT
16021+ PV_RESTORE_REGS(CLBR_RDI)
16022+#endif
16023+
66a7e928
MT
16024+ popq %rbx
16025+ popq %rdi
6e9df6a3 16026+ pax_force_retaddr
df50ba0c
MT
16027+ retq
16028+ENDPROC(pax_enter_kernel_user)
16029+
16030+ENTRY(pax_exit_kernel_user)
df50ba0c
MT
16031+ push %rdi
16032+
16033+#ifdef CONFIG_PARAVIRT
66a7e928 16034+ pushq %rbx
df50ba0c
MT
16035+ PV_SAVE_REGS(CLBR_RDI)
16036+#endif
16037+
16038+#ifdef CONFIG_PAX_KERNEXEC
16039+ GET_CR0_INTO_RDI
16040+ btr $16,%rdi
16041+ SET_RDI_INTO_CR0
16042+#endif
16043+
16044+ GET_CR3_INTO_RDI
16045+ add $__START_KERNEL_map,%rdi
16046+ sub phys_base(%rip),%rdi
16047+
16048+#ifdef CONFIG_PARAVIRT
16049+ cmpl $0, pv_info+PARAVIRT_enabled
16050+ jz 1f
16051+ mov %rdi,%rbx
16052+ i = 0
16053+ .rept USER_PGD_PTRS
16054+ mov i*8(%rbx),%rsi
16055+ mov $0x67,%sil
16056+ lea i*8(%rbx),%rdi
15a11c5b 16057+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
df50ba0c
MT
16058+ i = i + 1
16059+ .endr
16060+ jmp 2f
16061+1:
16062+#endif
16063+
16064+ i = 0
16065+ .rept USER_PGD_PTRS
16066+ movb $0x67,i*8(%rdi)
16067+ i = i + 1
16068+ .endr
16069+
16070+#ifdef CONFIG_PARAVIRT
16071+2: PV_RESTORE_REGS(CLBR_RDI)
66a7e928 16072+ popq %rbx
df50ba0c
MT
16073+#endif
16074+
66a7e928 16075+ popq %rdi
6e9df6a3 16076+ pax_force_retaddr
df50ba0c
MT
16077+ retq
16078+ENDPROC(pax_exit_kernel_user)
66a7e928
MT
16079+#endif
16080+
6e9df6a3 16081+.macro pax_erase_kstack
66a7e928
MT
16082+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16083+ call pax_erase_kstack
16084+#endif
6e9df6a3 16085+.endm
66a7e928
MT
16086+
16087+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16088+/*
fe2de317 16089+ * r11: thread_info
66a7e928
MT
16090+ * rcx, rdx: can be clobbered
16091+ */
16092+ENTRY(pax_erase_kstack)
16093+ pushq %rdi
16094+ pushq %rax
fe2de317 16095+ pushq %r11
66a7e928 16096+
fe2de317
MT
16097+ GET_THREAD_INFO(%r11)
16098+ mov TI_lowest_stack(%r11), %rdi
66a7e928
MT
16099+ mov $-0xBEEF, %rax
16100+ std
16101+
16102+1: mov %edi, %ecx
16103+ and $THREAD_SIZE_asm - 1, %ecx
16104+ shr $3, %ecx
16105+ repne scasq
16106+ jecxz 2f
16107+
16108+ cmp $2*8, %ecx
16109+ jc 2f
16110+
16111+ mov $2*8, %ecx
16112+ repe scasq
16113+ jecxz 2f
16114+ jne 1b
16115+
16116+2: cld
16117+ mov %esp, %ecx
16118+ sub %edi, %ecx
15a11c5b
MT
16119+
16120+ cmp $THREAD_SIZE_asm, %rcx
16121+ jb 3f
16122+ ud2
16123+3:
16124+
66a7e928
MT
16125+ shr $3, %ecx
16126+ rep stosq
16127+
fe2de317 16128+ mov TI_task_thread_sp0(%r11), %rdi
66a7e928 16129+ sub $256, %rdi
fe2de317 16130+ mov %rdi, TI_lowest_stack(%r11)
66a7e928 16131+
fe2de317 16132+ popq %r11
66a7e928
MT
16133+ popq %rax
16134+ popq %rdi
6e9df6a3 16135+ pax_force_retaddr
66a7e928
MT
16136+ ret
16137+ENDPROC(pax_erase_kstack)
317566c1 16138+#endif
df50ba0c
MT
16139
16140 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
16141 #ifdef CONFIG_TRACE_IRQFLAGS
5e856224 16142@@ -232,8 +516,8 @@ ENDPROC(native_usergs_sysret64)
fe2de317
MT
16143 .endm
16144
16145 .macro UNFAKE_STACK_FRAME
16146- addq $8*6, %rsp
16147- CFI_ADJUST_CFA_OFFSET -(6*8)
16148+ addq $8*6 + ARG_SKIP, %rsp
16149+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16150 .endm
16151
16152 /*
5e856224 16153@@ -320,7 +604,7 @@ ENDPROC(native_usergs_sysret64)
6e9df6a3
MT
16154 movq %rsp, %rsi
16155
16156 leaq -RBP(%rsp),%rdi /* arg1 for handler */
df50ba0c
MT
16157- testl $3, CS(%rdi)
16158+ testb $3, CS(%rdi)
16159 je 1f
16160 SWAPGS
16161 /*
5e856224 16162@@ -356,9 +640,10 @@ ENTRY(save_rest)
6e9df6a3
MT
16163 movq_cfi r15, R15+16
16164 movq %r11, 8(%rsp) /* return address */
16165 FIXUP_TOP_OF_STACK %r11, 16
16166+ pax_force_retaddr
16167 ret
16168 CFI_ENDPROC
16169-END(save_rest)
16170+ENDPROC(save_rest)
16171
16172 /* save complete stack frame */
16173 .pushsection .kprobes.text, "ax"
5e856224 16174@@ -387,9 +672,10 @@ ENTRY(save_paranoid)
6e9df6a3
MT
16175 js 1f /* negative -> in kernel */
16176 SWAPGS
16177 xorl %ebx,%ebx
16178-1: ret
fe2de317 16179+1: pax_force_retaddr_bts
6e9df6a3
MT
16180+ ret
16181 CFI_ENDPROC
16182-END(save_paranoid)
16183+ENDPROC(save_paranoid)
16184 .popsection
16185
16186 /*
5e856224 16187@@ -411,7 +697,7 @@ ENTRY(ret_from_fork)
df50ba0c
MT
16188
16189 RESTORE_REST
16190
16191- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16192+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
5e856224 16193 jz retint_restore_args
df50ba0c
MT
16194
16195 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
5e856224 16196@@ -421,7 +707,7 @@ ENTRY(ret_from_fork)
6e9df6a3
MT
16197 jmp ret_from_sys_call # go to the SYSRET fastpath
16198
16199 CFI_ENDPROC
16200-END(ret_from_fork)
16201+ENDPROC(ret_from_fork)
16202
16203 /*
16204 * System call entry. Up to 6 arguments in registers are supported.
5e856224 16205@@ -457,7 +743,7 @@ END(ret_from_fork)
71d190be
MT
16206 ENTRY(system_call)
16207 CFI_STARTPROC simple
16208 CFI_SIGNAL_FRAME
16209- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16210+ CFI_DEF_CFA rsp,0
16211 CFI_REGISTER rip,rcx
16212 /*CFI_REGISTER rflags,r11*/
16213 SWAPGS_UNSAFE_STACK
5e856224 16214@@ -470,21 +756,23 @@ GLOBAL(system_call_after_swapgs)
df50ba0c
MT
16215
16216 movq %rsp,PER_CPU_VAR(old_rsp)
16217 movq PER_CPU_VAR(kernel_stack),%rsp
fe2de317 16218+ SAVE_ARGS 8*6,0
317566c1 16219+ pax_enter_kernel_user
df50ba0c
MT
16220 /*
16221 * No need to follow this irqs off/on section - it's straight
16222 * and short:
71d190be
MT
16223 */
16224 ENABLE_INTERRUPTS(CLBR_NONE)
6e9df6a3 16225- SAVE_ARGS 8,0
71d190be
MT
16226 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16227 movq %rcx,RIP-ARGOFFSET(%rsp)
16228 CFI_REL_OFFSET rip,RIP-ARGOFFSET
5e856224
MT
16229- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16230+ GET_THREAD_INFO(%rcx)
16231+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
16232 jnz tracesys
fe2de317
MT
16233 system_call_fastpath:
16234 cmpq $__NR_syscall_max,%rax
16235 ja badsys
16236- movq %r10,%rcx
16237+ movq R10-ARGOFFSET(%rsp),%rcx
16238 call *sys_call_table(,%rax,8) # XXX: rip relative
16239 movq %rax,RAX-ARGOFFSET(%rsp)
16240 /*
5e856224
MT
16241@@ -498,10 +786,13 @@ sysret_check:
16242 LOCKDEP_SYS_EXIT
16243 DISABLE_INTERRUPTS(CLBR_NONE)
16244 TRACE_IRQS_OFF
16245- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
16246+ GET_THREAD_INFO(%rcx)
16247+ movl TI_flags(%rcx),%edx
df50ba0c
MT
16248 andl %edi,%edx
16249 jnz sysret_careful
16250 CFI_REMEMBER_STATE
317566c1 16251+ pax_exit_kernel_user
15a11c5b 16252+ pax_erase_kstack
df50ba0c
MT
16253 /*
16254 * sysretq will re-enable interrupts:
16255 */
5e856224 16256@@ -553,14 +844,18 @@ badsys:
fe2de317
MT
16257 * jump back to the normal fast path.
16258 */
16259 auditsys:
16260- movq %r10,%r9 /* 6th arg: 4th syscall arg */
16261+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16262 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16263 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16264 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
66a7e928
MT
16265 movq %rax,%rsi /* 2nd arg: syscall number */
16266 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
5e856224 16267 call __audit_syscall_entry
66a7e928
MT
16268+
16269+ pax_erase_kstack
16270+
16271 LOAD_ARGS 0 /* reload call-clobbered registers */
fe2de317 16272+ pax_set_fptr_mask
66a7e928
MT
16273 jmp system_call_fastpath
16274
fe2de317 16275 /*
5e856224
MT
16276@@ -581,7 +876,7 @@ sysret_audit:
16277 /* Do syscall tracing */
16278 tracesys:
16279 #ifdef CONFIG_AUDITSYSCALL
16280- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16281+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
16282 jz auditsys
16283 #endif
16284 SAVE_REST
16285@@ -589,16 +884,20 @@ tracesys:
66a7e928
MT
16286 FIXUP_TOP_OF_STACK %rdi
16287 movq %rsp,%rdi
16288 call syscall_trace_enter
16289+
16290+ pax_erase_kstack
16291+
16292 /*
16293 * Reload arg registers from stack in case ptrace changed them.
16294 * We don't reload %rax because syscall_trace_enter() returned
fe2de317
MT
16295 * the value it wants us to use in the table lookup.
16296 */
16297 LOAD_ARGS ARGOFFSET, 1
16298+ pax_set_fptr_mask
16299 RESTORE_REST
16300 cmpq $__NR_syscall_max,%rax
16301 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16302- movq %r10,%rcx /* fixup for C */
16303+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16304 call *sys_call_table(,%rax,8)
16305 movq %rax,RAX-ARGOFFSET(%rsp)
16306 /* Use IRET because user could have changed frame */
5e856224 16307@@ -619,6 +918,7 @@ GLOBAL(int_with_check)
4c928ab7
MT
16308 andl %edi,%edx
16309 jnz int_careful
16310 andl $~TS_COMPAT,TI_status(%rcx)
16311+ pax_erase_kstack
16312 jmp retint_swapgs
16313
16314 /* Either reschedule or signal or syscall exit tracking needed. */
5e856224 16315@@ -665,7 +965,7 @@ int_restore_rest:
6e9df6a3
MT
16316 TRACE_IRQS_OFF
16317 jmp int_with_check
16318 CFI_ENDPROC
16319-END(system_call)
16320+ENDPROC(system_call)
16321
16322 /*
16323 * Certain special system calls that need to save a complete full stack frame.
5e856224 16324@@ -681,7 +981,7 @@ ENTRY(\label)
6e9df6a3
MT
16325 call \func
16326 jmp ptregscall_common
16327 CFI_ENDPROC
16328-END(\label)
16329+ENDPROC(\label)
16330 .endm
16331
16332 PTREGSCALL stub_clone, sys_clone, %r8
5e856224 16333@@ -699,9 +999,10 @@ ENTRY(ptregscall_common)
6e9df6a3
MT
16334 movq_cfi_restore R12+8, r12
16335 movq_cfi_restore RBP+8, rbp
16336 movq_cfi_restore RBX+8, rbx
16337+ pax_force_retaddr
16338 ret $REST_SKIP /* pop extended registers */
16339 CFI_ENDPROC
16340-END(ptregscall_common)
16341+ENDPROC(ptregscall_common)
16342
16343 ENTRY(stub_execve)
16344 CFI_STARTPROC
5e856224 16345@@ -716,7 +1017,7 @@ ENTRY(stub_execve)
6e9df6a3
MT
16346 RESTORE_REST
16347 jmp int_ret_from_sys_call
16348 CFI_ENDPROC
16349-END(stub_execve)
16350+ENDPROC(stub_execve)
16351
16352 /*
16353 * sigreturn is special because it needs to restore all registers on return.
5e856224 16354@@ -734,7 +1035,7 @@ ENTRY(stub_rt_sigreturn)
6e9df6a3
MT
16355 RESTORE_REST
16356 jmp int_ret_from_sys_call
16357 CFI_ENDPROC
16358-END(stub_rt_sigreturn)
16359+ENDPROC(stub_rt_sigreturn)
16360
16361 /*
16362 * Build the entry stubs and pointer table with some assembler magic.
5e856224 16363@@ -769,7 +1070,7 @@ vector=vector+1
6e9df6a3
MT
16364 2: jmp common_interrupt
16365 .endr
16366 CFI_ENDPROC
16367-END(irq_entries_start)
16368+ENDPROC(irq_entries_start)
16369
16370 .previous
16371 END(interrupt)
5e856224 16372@@ -789,6 +1090,16 @@ END(interrupt)
4c928ab7 16373 subq $ORIG_RAX-RBP, %rsp
16454cff 16374 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
6e9df6a3 16375 SAVE_ARGS_IRQ
df50ba0c
MT
16376+#ifdef CONFIG_PAX_MEMORY_UDEREF
16377+ testb $3, CS(%rdi)
16378+ jnz 1f
317566c1 16379+ pax_enter_kernel
df50ba0c 16380+ jmp 2f
317566c1 16381+1: pax_enter_kernel_user
df50ba0c
MT
16382+2:
16383+#else
317566c1 16384+ pax_enter_kernel
df50ba0c 16385+#endif
ae4e228f
MT
16386 call \func
16387 .endm
16388
5e856224 16389@@ -820,7 +1131,7 @@ ret_from_intr:
6e9df6a3 16390
ae4e228f 16391 exit_intr:
ae4e228f 16392 GET_THREAD_INFO(%rcx)
df50ba0c
MT
16393- testl $3,CS-ARGOFFSET(%rsp)
16394+ testb $3,CS-ARGOFFSET(%rsp)
ae4e228f 16395 je retint_kernel
df50ba0c
MT
16396
16397 /* Interrupt came from user space */
5e856224 16398@@ -842,12 +1153,15 @@ retint_swapgs: /* return to user-space */
df50ba0c
MT
16399 * The iretq could re-enable interrupts:
16400 */
16401 DISABLE_INTERRUPTS(CLBR_ANY)
317566c1 16402+ pax_exit_kernel_user
df50ba0c
MT
16403 TRACE_IRQS_IRETQ
16404 SWAPGS
16405 jmp restore_args
16406
16407 retint_restore_args: /* return to kernel space */
16408 DISABLE_INTERRUPTS(CLBR_ANY)
317566c1 16409+ pax_exit_kernel
6e9df6a3 16410+ pax_force_retaddr RIP-ARGOFFSET
df50ba0c
MT
16411 /*
16412 * The iretq could re-enable interrupts:
16413 */
5e856224 16414@@ -936,7 +1250,7 @@ ENTRY(retint_kernel)
6e9df6a3
MT
16415 #endif
16416
16417 CFI_ENDPROC
16418-END(common_interrupt)
16419+ENDPROC(common_interrupt)
16420 /*
16421 * End of kprobes section
16422 */
5e856224 16423@@ -953,7 +1267,7 @@ ENTRY(\sym)
6e9df6a3
MT
16424 interrupt \do_sym
16425 jmp ret_from_intr
16426 CFI_ENDPROC
16427-END(\sym)
16428+ENDPROC(\sym)
16429 .endm
16430
16431 #ifdef CONFIG_SMP
5e856224 16432@@ -1026,12 +1340,22 @@ ENTRY(\sym)
bc901d79 16433 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
ae4e228f
MT
16434 call error_entry
16435 DEFAULT_FRAME 0
df50ba0c
MT
16436+#ifdef CONFIG_PAX_MEMORY_UDEREF
16437+ testb $3, CS(%rsp)
16438+ jnz 1f
317566c1 16439+ pax_enter_kernel
df50ba0c 16440+ jmp 2f
317566c1 16441+1: pax_enter_kernel_user
df50ba0c
MT
16442+2:
16443+#else
317566c1 16444+ pax_enter_kernel
df50ba0c 16445+#endif
ae4e228f
MT
16446 movq %rsp,%rdi /* pt_regs pointer */
16447 xorl %esi,%esi /* no error code */
16448 call \do_sym
6e9df6a3
MT
16449 jmp error_exit /* %ebx: no swapgs flag */
16450 CFI_ENDPROC
16451-END(\sym)
16452+ENDPROC(\sym)
16453 .endm
16454
16455 .macro paranoidzeroentry sym do_sym
5e856224 16456@@ -1043,15 +1367,25 @@ ENTRY(\sym)
bc901d79 16457 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
ae4e228f
MT
16458 call save_paranoid
16459 TRACE_IRQS_OFF
df50ba0c
MT
16460+#ifdef CONFIG_PAX_MEMORY_UDEREF
16461+ testb $3, CS(%rsp)
16462+ jnz 1f
317566c1 16463+ pax_enter_kernel
df50ba0c 16464+ jmp 2f
317566c1 16465+1: pax_enter_kernel_user
df50ba0c
MT
16466+2:
16467+#else
317566c1 16468+ pax_enter_kernel
df50ba0c 16469+#endif
ae4e228f
MT
16470 movq %rsp,%rdi /* pt_regs pointer */
16471 xorl %esi,%esi /* no error code */
16472 call \do_sym
6e9df6a3
MT
16473 jmp paranoid_exit /* %ebx: no swapgs flag */
16474 CFI_ENDPROC
16475-END(\sym)
16476+ENDPROC(\sym)
6892158b
MT
16477 .endm
16478
16479-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
16480+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
16481 .macro paranoidzeroentry_ist sym do_sym ist
16482 ENTRY(\sym)
16483 INTR_FRAME
5e856224 16484@@ -1061,14 +1395,30 @@ ENTRY(\sym)
bc901d79 16485 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
ae4e228f 16486 call save_paranoid
58c5fc13 16487 TRACE_IRQS_OFF
df50ba0c
MT
16488+#ifdef CONFIG_PAX_MEMORY_UDEREF
16489+ testb $3, CS(%rsp)
16490+ jnz 1f
317566c1 16491+ pax_enter_kernel
df50ba0c 16492+ jmp 2f
317566c1 16493+1: pax_enter_kernel_user
df50ba0c
MT
16494+2:
16495+#else
317566c1 16496+ pax_enter_kernel
df50ba0c 16497+#endif
58c5fc13
MT
16498 movq %rsp,%rdi /* pt_regs pointer */
16499 xorl %esi,%esi /* no error code */
58c5fc13 16500+#ifdef CONFIG_SMP
ae4e228f
MT
16501+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
16502+ lea init_tss(%r12), %r12
58c5fc13 16503+#else
ae4e228f 16504+ lea init_tss(%rip), %r12
58c5fc13 16505+#endif
6892158b 16506 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
58c5fc13 16507 call \do_sym
6892158b 16508 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
6e9df6a3
MT
16509 jmp paranoid_exit /* %ebx: no swapgs flag */
16510 CFI_ENDPROC
16511-END(\sym)
16512+ENDPROC(\sym)
16513 .endm
16514
16515 .macro errorentry sym do_sym
5e856224 16516@@ -1079,13 +1429,23 @@ ENTRY(\sym)
bc901d79 16517 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
ae4e228f
MT
16518 call error_entry
16519 DEFAULT_FRAME 0
df50ba0c
MT
16520+#ifdef CONFIG_PAX_MEMORY_UDEREF
16521+ testb $3, CS(%rsp)
16522+ jnz 1f
317566c1 16523+ pax_enter_kernel
df50ba0c 16524+ jmp 2f
317566c1 16525+1: pax_enter_kernel_user
df50ba0c
MT
16526+2:
16527+#else
317566c1 16528+ pax_enter_kernel
df50ba0c 16529+#endif
ae4e228f
MT
16530 movq %rsp,%rdi /* pt_regs pointer */
16531 movq ORIG_RAX(%rsp),%rsi /* get error code */
16532 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
6e9df6a3
MT
16533 call \do_sym
16534 jmp error_exit /* %ebx: no swapgs flag */
16535 CFI_ENDPROC
16536-END(\sym)
16537+ENDPROC(\sym)
16538 .endm
16539
16540 /* error code is on the stack already */
5e856224 16541@@ -1098,13 +1458,23 @@ ENTRY(\sym)
ae4e228f
MT
16542 call save_paranoid
16543 DEFAULT_FRAME 0
16544 TRACE_IRQS_OFF
df50ba0c
MT
16545+#ifdef CONFIG_PAX_MEMORY_UDEREF
16546+ testb $3, CS(%rsp)
16547+ jnz 1f
317566c1 16548+ pax_enter_kernel
df50ba0c 16549+ jmp 2f
317566c1 16550+1: pax_enter_kernel_user
df50ba0c
MT
16551+2:
16552+#else
317566c1 16553+ pax_enter_kernel
df50ba0c 16554+#endif
ae4e228f
MT
16555 movq %rsp,%rdi /* pt_regs pointer */
16556 movq ORIG_RAX(%rsp),%rsi /* get error code */
16557 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
6e9df6a3
MT
16558 call \do_sym
16559 jmp paranoid_exit /* %ebx: no swapgs flag */
16560 CFI_ENDPROC
16561-END(\sym)
16562+ENDPROC(\sym)
16563 .endm
16564
16565 zeroentry divide_error do_divide_error
5e856224 16566@@ -1134,9 +1504,10 @@ gs_change:
6e9df6a3
MT
16567 2: mfence /* workaround */
16568 SWAPGS
16569 popfq_cfi
16570+ pax_force_retaddr
16571 ret
16572 CFI_ENDPROC
16573-END(native_load_gs_index)
16574+ENDPROC(native_load_gs_index)
16575
16576 .section __ex_table,"a"
16577 .align 8
5e856224 16578@@ -1158,13 +1529,14 @@ ENTRY(kernel_thread_helper)
6e9df6a3
MT
16579 * Here we are in the child and the registers are set as they were
16580 * at kernel_thread() invocation in the parent.
16581 */
16582+ pax_force_fptr %rsi
16583 call *%rsi
16584 # exit
16585 mov %eax, %edi
16586 call do_exit
16587 ud2 # padding for call trace
16588 CFI_ENDPROC
16589-END(kernel_thread_helper)
16590+ENDPROC(kernel_thread_helper)
16591
16592 /*
16593 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
5e856224 16594@@ -1191,11 +1563,11 @@ ENTRY(kernel_execve)
fe2de317
MT
16595 RESTORE_REST
16596 testq %rax,%rax
6e9df6a3 16597 je int_ret_from_sys_call
fe2de317 16598- RESTORE_ARGS
6e9df6a3
MT
16599 UNFAKE_STACK_FRAME
16600+ pax_force_retaddr
16601 ret
16602 CFI_ENDPROC
16603-END(kernel_execve)
16604+ENDPROC(kernel_execve)
16605
16606 /* Call softirq on interrupt stack. Interrupts are off. */
16607 ENTRY(call_softirq)
5e856224 16608@@ -1213,9 +1585,10 @@ ENTRY(call_softirq)
6e9df6a3
MT
16609 CFI_DEF_CFA_REGISTER rsp
16610 CFI_ADJUST_CFA_OFFSET -8
16611 decl PER_CPU_VAR(irq_count)
16612+ pax_force_retaddr
16613 ret
16614 CFI_ENDPROC
16615-END(call_softirq)
16616+ENDPROC(call_softirq)
16617
16618 #ifdef CONFIG_XEN
16619 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
5e856224 16620@@ -1253,7 +1626,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
6e9df6a3
MT
16621 decl PER_CPU_VAR(irq_count)
16622 jmp error_exit
16623 CFI_ENDPROC
16624-END(xen_do_hypervisor_callback)
16625+ENDPROC(xen_do_hypervisor_callback)
16626
16627 /*
16628 * Hypervisor uses this for application faults while it executes.
5e856224 16629@@ -1312,7 +1685,7 @@ ENTRY(xen_failsafe_callback)
6e9df6a3
MT
16630 SAVE_ALL
16631 jmp error_exit
16632 CFI_ENDPROC
16633-END(xen_failsafe_callback)
16634+ENDPROC(xen_failsafe_callback)
16635
16636 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
16637 xen_hvm_callback_vector xen_evtchn_do_upcall
5e856224 16638@@ -1361,16 +1734,31 @@ ENTRY(paranoid_exit)
df50ba0c
MT
16639 TRACE_IRQS_OFF
16640 testl %ebx,%ebx /* swapgs needed? */
16641 jnz paranoid_restore
16642- testl $3,CS(%rsp)
16643+ testb $3,CS(%rsp)
ae4e228f 16644 jnz paranoid_userspace
df50ba0c 16645+#ifdef CONFIG_PAX_MEMORY_UDEREF
317566c1 16646+ pax_exit_kernel
df50ba0c
MT
16647+ TRACE_IRQS_IRETQ 0
16648+ SWAPGS_UNSAFE_STACK
16649+ RESTORE_ALL 8
fe2de317 16650+ pax_force_retaddr_bts
df50ba0c
MT
16651+ jmp irq_return
16652+#endif
ae4e228f 16653 paranoid_swapgs:
df50ba0c 16654+#ifdef CONFIG_PAX_MEMORY_UDEREF
317566c1 16655+ pax_exit_kernel_user
df50ba0c 16656+#else
317566c1 16657+ pax_exit_kernel
df50ba0c 16658+#endif
ae4e228f
MT
16659 TRACE_IRQS_IRETQ 0
16660 SWAPGS_UNSAFE_STACK
16661 RESTORE_ALL 8
16662 jmp irq_return
16663 paranoid_restore:
317566c1 16664+ pax_exit_kernel
ae4e228f
MT
16665 TRACE_IRQS_IRETQ 0
16666 RESTORE_ALL 8
fe2de317 16667+ pax_force_retaddr_bts
ae4e228f 16668 jmp irq_return
15a11c5b
MT
16669 paranoid_userspace:
16670 GET_THREAD_INFO(%rcx)
5e856224 16671@@ -1399,7 +1787,7 @@ paranoid_schedule:
6e9df6a3
MT
16672 TRACE_IRQS_OFF
16673 jmp paranoid_userspace
16674 CFI_ENDPROC
16675-END(paranoid_exit)
16676+ENDPROC(paranoid_exit)
16677
16678 /*
16679 * Exception entry point. This expects an error code/orig_rax on the stack.
5e856224 16680@@ -1426,12 +1814,13 @@ ENTRY(error_entry)
df50ba0c
MT
16681 movq_cfi r14, R14+8
16682 movq_cfi r15, R15+8
16683 xorl %ebx,%ebx
16684- testl $3,CS+8(%rsp)
16685+ testb $3,CS+8(%rsp)
16686 je error_kernelspace
16687 error_swapgs:
16688 SWAPGS
6e9df6a3
MT
16689 error_sti:
16690 TRACE_IRQS_OFF
fe2de317 16691+ pax_force_retaddr_bts
6e9df6a3
MT
16692 ret
16693
16694 /*
5e856224 16695@@ -1458,7 +1847,7 @@ bstep_iret:
6e9df6a3
MT
16696 movq %rcx,RIP+8(%rsp)
16697 jmp error_swapgs
16698 CFI_ENDPROC
16699-END(error_entry)
16700+ENDPROC(error_entry)
16701
16702
16703 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
5e856224 16704@@ -1478,7 +1867,7 @@ ENTRY(error_exit)
6e9df6a3
MT
16705 jnz retint_careful
16706 jmp retint_swapgs
16707 CFI_ENDPROC
16708-END(error_exit)
16709+ENDPROC(error_exit)
16710
5e856224
MT
16711 /*
16712 * Test if a given stack is an NMI stack or not.
16713@@ -1535,9 +1924,11 @@ ENTRY(nmi)
16714 * If %cs was not the kernel segment, then the NMI triggered in user
16715 * space, which means it is definitely not nested.
16716 */
16717+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
16718+ je 1f
16719 cmpl $__KERNEL_CS, 16(%rsp)
16720 jne first_nmi
16721-
16722+1:
16723 /*
16724 * Check the special variable on the stack to see if NMIs are
16725 * executing.
16726@@ -1659,6 +2050,16 @@ restart_nmi:
16727 */
ae4e228f
MT
16728 call save_paranoid
16729 DEFAULT_FRAME 0
df50ba0c
MT
16730+#ifdef CONFIG_PAX_MEMORY_UDEREF
16731+ testb $3, CS(%rsp)
16732+ jnz 1f
317566c1 16733+ pax_enter_kernel
df50ba0c 16734+ jmp 2f
317566c1 16735+1: pax_enter_kernel_user
df50ba0c
MT
16736+2:
16737+#else
317566c1 16738+ pax_enter_kernel
df50ba0c 16739+#endif
ae4e228f
MT
16740 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16741 movq %rsp,%rdi
16742 movq $-1,%rsi
5e856224 16743@@ -1666,14 +2067,25 @@ restart_nmi:
df50ba0c
MT
16744 testl %ebx,%ebx /* swapgs needed? */
16745 jnz nmi_restore
ae4e228f 16746 nmi_swapgs:
317566c1
MT
16747+#ifdef CONFIG_PAX_MEMORY_UDEREF
16748+ pax_exit_kernel_user
16749+#else
16750+ pax_exit_kernel
16751+#endif
ae4e228f 16752 SWAPGS_UNSAFE_STACK
317566c1 16753+ RESTORE_ALL 8
5e856224
MT
16754+ /* Clear the NMI executing stack variable */
16755+ movq $0, 10*8(%rsp)
317566c1 16756+ jmp irq_return
ae4e228f 16757 nmi_restore:
317566c1 16758+ pax_exit_kernel
ae4e228f 16759 RESTORE_ALL 8
fe2de317 16760+ pax_force_retaddr_bts
5e856224
MT
16761 /* Clear the NMI executing stack variable */
16762 movq $0, 10*8(%rsp)
ae4e228f 16763 jmp irq_return
6e9df6a3 16764 CFI_ENDPROC
6e9df6a3
MT
16765-END(nmi)
16766+ENDPROC(nmi)
16767
5e856224
MT
16768 /*
16769 * If an NMI hit an iret because of an exception or breakpoint,
16770@@ -1700,7 +2112,7 @@ ENTRY(ignore_sysret)
6e9df6a3
MT
16771 mov $-ENOSYS,%eax
16772 sysret
16773 CFI_ENDPROC
16774-END(ignore_sysret)
16775+ENDPROC(ignore_sysret)
16776
16777 /*
16778 * End of kprobes section
fe2de317
MT
16779diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16780index c9a281f..ce2f317 100644
16781--- a/arch/x86/kernel/ftrace.c
16782+++ b/arch/x86/kernel/ftrace.c
16783@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
15a11c5b 16784 static const void *mod_code_newcode; /* holds the text to write to the IP */
8308f9c9
MT
16785
16786 static unsigned nmi_wait_count;
16787-static atomic_t nmi_update_count = ATOMIC_INIT(0);
16788+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16789
16790 int ftrace_arch_read_dyn_info(char *buf, int size)
16791 {
fe2de317 16792@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
8308f9c9
MT
16793
16794 r = snprintf(buf, size, "%u %u",
16795 nmi_wait_count,
16796- atomic_read(&nmi_update_count));
16797+ atomic_read_unchecked(&nmi_update_count));
16798 return r;
16799 }
16800
16801@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
df50ba0c 16802
ae4e228f
MT
16803 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16804 smp_rmb();
16805+ pax_open_kernel();
16806 ftrace_mod_code();
8308f9c9 16807- atomic_inc(&nmi_update_count);
ae4e228f 16808+ pax_close_kernel();
8308f9c9 16809+ atomic_inc_unchecked(&nmi_update_count);
ae4e228f
MT
16810 }
16811 /* Must have previous changes seen before executions */
8308f9c9 16812 smp_mb();
fe2de317 16813@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
ae4e228f
MT
16814 {
16815 unsigned char replaced[MCOUNT_INSN_SIZE];
16816
16817+ ip = ktla_ktva(ip);
16818+
16819 /*
16820 * Note: Due to modules and __init, code can
16821 * disappear and change, we need to protect against faulting
fe2de317 16822@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
58c5fc13
MT
16823 unsigned char old[MCOUNT_INSN_SIZE], *new;
16824 int ret;
16825
16826- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16827+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16828 new = ftrace_call_replace(ip, (unsigned long)func);
ae4e228f
MT
16829 ret = ftrace_modify_code(ip, old, new);
16830
fe2de317 16831@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
ae4e228f
MT
16832 {
16833 unsigned char code[MCOUNT_INSN_SIZE];
16834
16835+ ip = ktla_ktva(ip);
16836+
16837 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16838 return -EFAULT;
16839
fe2de317 16840diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
5e856224 16841index 51ff186..9e77418 100644
fe2de317
MT
16842--- a/arch/x86/kernel/head32.c
16843+++ b/arch/x86/kernel/head32.c
bc901d79 16844@@ -19,6 +19,7 @@
ae4e228f 16845 #include <asm/io_apic.h>
58c5fc13 16846 #include <asm/bios_ebda.h>
bc901d79 16847 #include <asm/tlbflush.h>
58c5fc13
MT
16848+#include <asm/boot.h>
16849
ae4e228f 16850 static void __init i386_default_early_setup(void)
58c5fc13 16851 {
5e856224 16852@@ -31,8 +32,7 @@ static void __init i386_default_early_setup(void)
58c5fc13 16853
5e856224
MT
16854 void __init i386_start_kernel(void)
16855 {
16856- memblock_reserve(__pa_symbol(&_text),
16857- __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
16858+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
58c5fc13
MT
16859
16860 #ifdef CONFIG_BLK_DEV_INITRD
16861 /* Reserve INITRD */
fe2de317
MT
16862diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16863index ce0be7c..c41476e 100644
16864--- a/arch/x86/kernel/head_32.S
16865+++ b/arch/x86/kernel/head_32.S
df50ba0c 16866@@ -25,6 +25,12 @@
58c5fc13
MT
16867 /* Physical address */
16868 #define pa(X) ((X) - __PAGE_OFFSET)
ae4e228f
MT
16869
16870+#ifdef CONFIG_PAX_KERNEXEC
16871+#define ta(X) (X)
16872+#else
16873+#define ta(X) ((X) - __PAGE_OFFSET)
16874+#endif
16875+
16876 /*
16877 * References to members of the new_cpu_data structure.
16878 */
df50ba0c 16879@@ -54,11 +60,7 @@
58c5fc13
MT
16880 * and small than max_low_pfn, otherwise will waste some page table entries
16881 */
16882
16883-#if PTRS_PER_PMD > 1
16884-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16885-#else
16886-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16887-#endif
16888+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16889
bc901d79
MT
16890 /* Number of possible pages in the lowmem region */
16891 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
fe2de317 16892@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
58c5fc13
MT
16893 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16894
16895 /*
16896+ * Real beginning of normal "text" segment
16897+ */
16898+ENTRY(stext)
16899+ENTRY(_stext)
16900+
16901+/*
16902 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16903 * %esi points to the real-mode code as a 32-bit pointer.
16904 * CS and DS must be 4 GB flat segments, but we don't depend on
bc901d79 16905@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
58c5fc13
MT
16906 * can.
16907 */
ae4e228f 16908 __HEAD
58c5fc13
MT
16909+
16910+#ifdef CONFIG_PAX_KERNEXEC
16911+ jmp startup_32
16912+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16913+.fill PAGE_SIZE-5,1,0xcc
16914+#endif
16915+
16916 ENTRY(startup_32)
16454cff
MT
16917 movl pa(stack_start),%ecx
16918
16919@@ -105,6 +120,57 @@ ENTRY(startup_32)
58c5fc13 16920 2:
16454cff 16921 leal -__PAGE_OFFSET(%ecx),%esp
58c5fc13
MT
16922
16923+#ifdef CONFIG_SMP
16924+ movl $pa(cpu_gdt_table),%edi
16925+ movl $__per_cpu_load,%eax
16926+ movw %ax,__KERNEL_PERCPU + 2(%edi)
16927+ rorl $16,%eax
16928+ movb %al,__KERNEL_PERCPU + 4(%edi)
16929+ movb %ah,__KERNEL_PERCPU + 7(%edi)
16930+ movl $__per_cpu_end - 1,%eax
ae4e228f 16931+ subl $__per_cpu_start,%eax
58c5fc13
MT
16932+ movw %ax,__KERNEL_PERCPU + 0(%edi)
16933+#endif
16934+
16935+#ifdef CONFIG_PAX_MEMORY_UDEREF
16936+ movl $NR_CPUS,%ecx
16937+ movl $pa(cpu_gdt_table),%edi
16938+1:
16939+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
bc901d79
MT
16940+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16941+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
58c5fc13
MT
16942+ addl $PAGE_SIZE_asm,%edi
16943+ loop 1b
16944+#endif
16945+
16946+#ifdef CONFIG_PAX_KERNEXEC
16947+ movl $pa(boot_gdt),%edi
ae4e228f 16948+ movl $__LOAD_PHYSICAL_ADDR,%eax
58c5fc13
MT
16949+ movw %ax,__BOOT_CS + 2(%edi)
16950+ rorl $16,%eax
16951+ movb %al,__BOOT_CS + 4(%edi)
16952+ movb %ah,__BOOT_CS + 7(%edi)
16953+ rorl $16,%eax
16954+
ae4e228f
MT
16955+ ljmp $(__BOOT_CS),$1f
16956+1:
16957+
58c5fc13
MT
16958+ movl $NR_CPUS,%ecx
16959+ movl $pa(cpu_gdt_table),%edi
ae4e228f 16960+ addl $__PAGE_OFFSET,%eax
58c5fc13
MT
16961+1:
16962+ movw %ax,__KERNEL_CS + 2(%edi)
ae4e228f 16963+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
58c5fc13
MT
16964+ rorl $16,%eax
16965+ movb %al,__KERNEL_CS + 4(%edi)
ae4e228f 16966+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
58c5fc13 16967+ movb %ah,__KERNEL_CS + 7(%edi)
ae4e228f 16968+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
58c5fc13
MT
16969+ rorl $16,%eax
16970+ addl $PAGE_SIZE_asm,%edi
16971+ loop 1b
16972+#endif
16973+
16974 /*
16975 * Clear BSS first so that there are no surprises...
16976 */
16454cff 16977@@ -195,8 +261,11 @@ ENTRY(startup_32)
58c5fc13
MT
16978 movl %eax, pa(max_pfn_mapped)
16979
16980 /* Do early initialization of the fixmap area */
bc901d79
MT
16981- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16982- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
58c5fc13 16983+#ifdef CONFIG_COMPAT_VDSO
bc901d79 16984+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
58c5fc13 16985+#else
bc901d79 16986+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
58c5fc13
MT
16987+#endif
16988 #else /* Not PAE */
16989
16990 page_pde_offset = (__PAGE_OFFSET >> 20);
16454cff 16991@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
58c5fc13
MT
16992 movl %eax, pa(max_pfn_mapped)
16993
16994 /* Do early initialization of the fixmap area */
bc901d79
MT
16995- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16996- movl %eax,pa(initial_page_table+0xffc)
58c5fc13 16997+#ifdef CONFIG_COMPAT_VDSO
bc901d79 16998+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
58c5fc13 16999+#else
bc901d79 17000+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
58c5fc13
MT
17001+#endif
17002 #endif
16454cff
MT
17003
17004 #ifdef CONFIG_PARAVIRT
17005@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
17006 cmpl $num_subarch_entries, %eax
17007 jae bad_subarch
17008
17009- movl pa(subarch_entries)(,%eax,4), %eax
17010- subl $__PAGE_OFFSET, %eax
17011- jmp *%eax
17012+ jmp *pa(subarch_entries)(,%eax,4)
17013
17014 bad_subarch:
17015 WEAK(lguest_entry)
17016@@ -255,10 +325,10 @@ WEAK(xen_entry)
17017 __INITDATA
17018
17019 subarch_entries:
17020- .long default_entry /* normal x86/PC */
17021- .long lguest_entry /* lguest hypervisor */
17022- .long xen_entry /* Xen hypervisor */
17023- .long default_entry /* Moorestown MID */
66a7e928
MT
17024+ .long ta(default_entry) /* normal x86/PC */
17025+ .long ta(lguest_entry) /* lguest hypervisor */
17026+ .long ta(xen_entry) /* Xen hypervisor */
17027+ .long ta(default_entry) /* Moorestown MID */
16454cff
MT
17028 num_subarch_entries = (. - subarch_entries) / 4
17029 .previous
17030 #else
17031@@ -312,6 +382,7 @@ default_entry:
58c5fc13
MT
17032 orl %edx,%eax
17033 movl %eax,%cr4
17034
17035+#ifdef CONFIG_X86_PAE
ae4e228f
MT
17036 testb $X86_CR4_PAE, %al # check if PAE is enabled
17037 jz 6f
58c5fc13 17038
16454cff 17039@@ -340,6 +411,9 @@ default_entry:
58c5fc13
MT
17040 /* Make changes effective */
17041 wrmsr
17042
17043+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
58c5fc13 17044+#endif
ae4e228f 17045+
58c5fc13
MT
17046 6:
17047
17048 /*
16454cff 17049@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
58c5fc13
MT
17050 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
17051 movl %eax,%ss # after changing gdt.
17052
17053- movl $(__USER_DS),%eax # DS/ES contains default USER segment
17054+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
17055 movl %eax,%ds
17056 movl %eax,%es
17057
16454cff 17058@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
58c5fc13
MT
17059 */
17060 cmpb $0,ready
17061 jne 1f
df50ba0c 17062- movl $gdt_page,%eax
58c5fc13 17063+ movl $cpu_gdt_table,%eax
df50ba0c 17064 movl $stack_canary,%ecx
58c5fc13
MT
17065+#ifdef CONFIG_SMP
17066+ addl $__per_cpu_load,%ecx
17067+#endif
17068 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17069 shrl $16, %ecx
17070 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
bc901d79
MT
17071 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
17072 1:
17073-#endif
17074 movl $(__KERNEL_STACK_CANARY),%eax
17075+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17076+ movl $(__USER_DS),%eax
17077+#else
17078+ xorl %eax,%eax
17079+#endif
17080 movl %eax,%gs
17081
17082 xorl %eax,%eax # Clear LDT
16454cff 17083@@ -558,22 +639,22 @@ early_page_fault:
58c5fc13
MT
17084 jmp early_fault
17085
17086 early_fault:
17087- cld
17088 #ifdef CONFIG_PRINTK
17089+ cmpl $1,%ss:early_recursion_flag
17090+ je hlt_loop
17091+ incl %ss:early_recursion_flag
17092+ cld
17093 pusha
17094 movl $(__KERNEL_DS),%eax
17095 movl %eax,%ds
17096 movl %eax,%es
17097- cmpl $2,early_recursion_flag
17098- je hlt_loop
17099- incl early_recursion_flag
17100 movl %cr2,%eax
17101 pushl %eax
17102 pushl %edx /* trapno */
17103 pushl $fault_msg
17104 call printk
17105+; call dump_stack
17106 #endif
17107- call dump_stack
17108 hlt_loop:
17109 hlt
17110 jmp hlt_loop
16454cff 17111@@ -581,8 +662,11 @@ hlt_loop:
58c5fc13
MT
17112 /* This is the default interrupt "handler" :-) */
17113 ALIGN
17114 ignore_int:
17115- cld
17116 #ifdef CONFIG_PRINTK
17117+ cmpl $2,%ss:early_recursion_flag
17118+ je hlt_loop
17119+ incl %ss:early_recursion_flag
17120+ cld
17121 pushl %eax
17122 pushl %ecx
17123 pushl %edx
16454cff 17124@@ -591,9 +675,6 @@ ignore_int:
58c5fc13
MT
17125 movl $(__KERNEL_DS),%eax
17126 movl %eax,%ds
17127 movl %eax,%es
17128- cmpl $2,early_recursion_flag
17129- je hlt_loop
17130- incl early_recursion_flag
17131 pushl 16(%esp)
17132 pushl 24(%esp)
17133 pushl 32(%esp)
16454cff 17134@@ -622,29 +703,43 @@ ENTRY(initial_code)
58c5fc13
MT
17135 /*
17136 * BSS section
17137 */
ae4e228f 17138-__PAGE_ALIGNED_BSS
66a7e928 17139- .align PAGE_SIZE
58c5fc13 17140 #ifdef CONFIG_X86_PAE
bc901d79 17141+.section .initial_pg_pmd,"a",@progbits
16454cff 17142 initial_pg_pmd:
58c5fc13
MT
17143 .fill 1024*KPMDS,4,0
17144 #else
c52201e0 17145+.section .initial_page_table,"a",@progbits
bc901d79 17146 ENTRY(initial_page_table)
58c5fc13
MT
17147 .fill 1024,4,0
17148 #endif
bc901d79 17149+.section .initial_pg_fixmap,"a",@progbits
16454cff 17150 initial_pg_fixmap:
58c5fc13 17151 .fill 1024,4,0
bc901d79
MT
17152+.section .empty_zero_page,"a",@progbits
17153 ENTRY(empty_zero_page)
17154 .fill 4096,1,0
17155+.section .swapper_pg_dir,"a",@progbits
17156 ENTRY(swapper_pg_dir)
6892158b
MT
17157+#ifdef CONFIG_X86_PAE
17158+ .fill 4,8,0
17159+#else
17160 .fill 1024,4,0
6892158b 17161+#endif
58c5fc13 17162+
bc901d79 17163+/*
58c5fc13
MT
17164+ * The IDT has to be page-aligned to simplify the Pentium
17165+ * F0 0F bug workaround.. We have a special link segment
17166+ * for this.
17167+ */
17168+.section .idt,"a",@progbits
17169+ENTRY(idt_table)
17170+ .fill 256,8,0
bc901d79
MT
17171
17172 /*
58c5fc13
MT
17173 * This starts the data section.
17174 */
17175 #ifdef CONFIG_X86_PAE
ae4e228f 17176-__PAGE_ALIGNED_DATA
58c5fc13 17177- /* Page-aligned for the benefit of paravirt? */
66a7e928 17178- .align PAGE_SIZE
bc901d79
MT
17179+.section .initial_page_table,"a",@progbits
17180 ENTRY(initial_page_table)
17181 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
58c5fc13 17182 # if KPMDS == 3
71d190be 17183@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
df50ba0c
MT
17184 # error "Kernel PMDs should be 1, 2 or 3"
17185 # endif
66a7e928 17186 .align PAGE_SIZE /* needs to be page-sized too */
df50ba0c
MT
17187+
17188+#ifdef CONFIG_PAX_PER_CPU_PGD
17189+ENTRY(cpu_pgd)
17190+ .rept NR_CPUS
17191+ .fill 4,8,0
17192+ .endr
17193+#endif
17194+
17195 #endif
58c5fc13
MT
17196
17197 .data
16454cff 17198 .balign 4
58c5fc13
MT
17199 ENTRY(stack_start)
17200- .long init_thread_union+THREAD_SIZE
17201+ .long init_thread_union+THREAD_SIZE-8
58c5fc13 17202
fe2de317
MT
17203+ready: .byte 0
17204+
58c5fc13
MT
17205+.section .rodata,"a",@progbits
17206 early_recursion_flag:
17207 .long 0
17208
71d190be
MT
17209-ready: .byte 0
17210-
17211 int_msg:
17212 .asciz "Unknown interrupt or fault at: %p %p %p\n"
17213
16454cff 17214@@ -707,7 +811,7 @@ fault_msg:
58c5fc13
MT
17215 .word 0 # 32 bit align gdt_desc.address
17216 boot_gdt_descr:
17217 .word __BOOT_DS+7
17218- .long boot_gdt - __PAGE_OFFSET
17219+ .long pa(boot_gdt)
17220
17221 .word 0 # 32-bit align idt_desc.address
17222 idt_descr:
16454cff 17223@@ -718,7 +822,7 @@ idt_descr:
58c5fc13
MT
17224 .word 0 # 32 bit align gdt_desc.address
17225 ENTRY(early_gdt_descr)
17226 .word GDT_ENTRIES*8-1
df50ba0c 17227- .long gdt_page /* Overwritten for secondary CPUs */
58c5fc13
MT
17228+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
17229
17230 /*
17231 * The boot_gdt must mirror the equivalent in setup.S and is
16454cff 17232@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
58c5fc13
MT
17233 .align L1_CACHE_BYTES
17234 ENTRY(boot_gdt)
17235 .fill GDT_ENTRY_BOOT_CS,8,0
17236- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17237- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17238+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17239+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17240+
17241+ .align PAGE_SIZE_asm
17242+ENTRY(cpu_gdt_table)
17243+ .rept NR_CPUS
17244+ .quad 0x0000000000000000 /* NULL descriptor */
17245+ .quad 0x0000000000000000 /* 0x0b reserved */
17246+ .quad 0x0000000000000000 /* 0x13 reserved */
17247+ .quad 0x0000000000000000 /* 0x1b reserved */
ae4e228f
MT
17248+
17249+#ifdef CONFIG_PAX_KERNEXEC
17250+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17251+#else
58c5fc13 17252+ .quad 0x0000000000000000 /* 0x20 unused */
ae4e228f
MT
17253+#endif
17254+
58c5fc13
MT
17255+ .quad 0x0000000000000000 /* 0x28 unused */
17256+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17257+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17258+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17259+ .quad 0x0000000000000000 /* 0x4b reserved */
17260+ .quad 0x0000000000000000 /* 0x53 reserved */
17261+ .quad 0x0000000000000000 /* 0x5b reserved */
17262+
17263+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17264+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17265+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17266+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17267+
17268+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17269+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17270+
17271+ /*
17272+ * Segments used for calling PnP BIOS have byte granularity.
17273+ * The code segments and data segments have fixed 64k limits,
17274+ * the transfer segment sizes are set at run time.
17275+ */
17276+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
17277+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
17278+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
17279+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
17280+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
17281+
17282+ /*
17283+ * The APM segments have byte granularity and their bases
17284+ * are set at run time. All have 64k limits.
17285+ */
17286+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17287+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17288+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
17289+
17290+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17291+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
15a11c5b 17292+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
58c5fc13
MT
17293+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17294+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17295+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17296+
17297+ /* Be sure this is zeroed to avoid false validations in Xen */
17298+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17299+ .endr
fe2de317 17300diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
5e856224 17301index 40f4eb3..6d24d9d 100644
fe2de317
MT
17302--- a/arch/x86/kernel/head_64.S
17303+++ b/arch/x86/kernel/head_64.S
17304@@ -19,6 +19,8 @@
ae4e228f
MT
17305 #include <asm/cache.h>
17306 #include <asm/processor-flags.h>
17307 #include <asm/percpu.h>
17308+#include <asm/cpufeature.h>
fe2de317 17309+#include <asm/alternative-asm.h>
ae4e228f
MT
17310
17311 #ifdef CONFIG_PARAVIRT
17312 #include <asm/asm-offsets.h>
fe2de317 17313@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
58c5fc13
MT
17314 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17315 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17316 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17317+L4_VMALLOC_START = pgd_index(VMALLOC_START)
17318+L3_VMALLOC_START = pud_index(VMALLOC_START)
fe2de317
MT
17319+L4_VMALLOC_END = pgd_index(VMALLOC_END)
17320+L3_VMALLOC_END = pud_index(VMALLOC_END)
58c5fc13
MT
17321+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17322+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17323
17324 .text
ae4e228f 17325 __HEAD
fe2de317 17326@@ -85,35 +93,23 @@ startup_64:
58c5fc13
MT
17327 */
17328 addq %rbp, init_level4_pgt + 0(%rip)
17329 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17330+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
fe2de317 17331+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
58c5fc13
MT
17332+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17333 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17334
17335 addq %rbp, level3_ident_pgt + 0(%rip)
ae4e228f 17336+#ifndef CONFIG_XEN
58c5fc13 17337+ addq %rbp, level3_ident_pgt + 8(%rip)
ae4e228f 17338+#endif
58c5fc13
MT
17339
17340- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17341- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17342+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
fe2de317 17343+
58c5fc13
MT
17344+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17345+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17346
fe2de317
MT
17347 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17348-
58c5fc13
MT
17349- /* Add an Identity mapping if I am above 1G */
17350- leaq _text(%rip), %rdi
17351- andq $PMD_PAGE_MASK, %rdi
17352-
17353- movq %rdi, %rax
17354- shrq $PUD_SHIFT, %rax
17355- andq $(PTRS_PER_PUD - 1), %rax
17356- jz ident_complete
17357-
17358- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17359- leaq level3_ident_pgt(%rip), %rbx
17360- movq %rdx, 0(%rbx, %rax, 8)
17361-
17362- movq %rdi, %rax
17363- shrq $PMD_SHIFT, %rax
17364- andq $(PTRS_PER_PMD - 1), %rax
17365- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17366- leaq level2_spare_pgt(%rip), %rbx
17367- movq %rdx, 0(%rbx, %rax, 8)
17368-ident_complete:
58c5fc13
MT
17369+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17370
17371 /*
17372 * Fixup the kernel text+data virtual addresses. Note that
fe2de317 17373@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
df50ba0c
MT
17374 * after the boot processor executes this code.
17375 */
17376
17377- /* Enable PAE mode and PGE */
17378- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17379+ /* Enable PAE mode and PSE/PGE */
17380+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17381 movq %rax, %cr4
17382
17383 /* Setup early boot stage 4 level pagetables. */
fe2de317 17384@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
ae4e228f
MT
17385 movl $MSR_EFER, %ecx
17386 rdmsr
17387 btsl $_EFER_SCE, %eax /* Enable System Call */
17388- btl $20,%edi /* No Execute supported? */
17389+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
58c5fc13
MT
17390 jnc 1f
17391 btsl $_EFER_NX, %eax
17392+ leaq init_level4_pgt(%rip), %rdi
fe2de317 17393+#ifndef CONFIG_EFI
58c5fc13 17394+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
fe2de317 17395+#endif
58c5fc13 17396+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
fe2de317 17397+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
58c5fc13 17398+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
ae4e228f 17399+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
58c5fc13
MT
17400 1: wrmsr /* Make changes effective */
17401
17402 /* Setup cr0 */
fe2de317
MT
17403@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
17404 * jump. In addition we need to ensure %cs is set so we make this
17405 * a far return.
17406 */
17407+ pax_set_fptr_mask
17408 movq initial_code(%rip),%rax
17409 pushq $0 # fake return address to stop unwinder
17410 pushq $__KERNEL_CS # set correct cs
17411@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
58c5fc13
MT
17412 bad_address:
17413 jmp bad_address
17414
17415- .section ".init.text","ax"
17416+ __INIT
17417 #ifdef CONFIG_EARLY_PRINTK
17418 .globl early_idt_handlers
17419 early_idt_handlers:
fe2de317 17420@@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
58c5fc13
MT
17421 #endif /* EARLY_PRINTK */
17422 1: hlt
17423 jmp 1b
17424+ .previous
17425
17426 #ifdef CONFIG_EARLY_PRINTK
17427+ __INITDATA
17428 early_recursion_flag:
17429 .long 0
17430+ .previous
17431
17432+ .section .rodata,"a",@progbits
17433 early_idt_msg:
17434 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17435 early_idt_ripmsg:
17436 .asciz "RIP %s\n"
fe2de317
MT
17437+ .previous
17438 #endif /* CONFIG_EARLY_PRINTK */
17439- .previous
58c5fc13
MT
17440
17441+ .section .rodata,"a",@progbits
17442 #define NEXT_PAGE(name) \
17443 .balign PAGE_SIZE; \
17444 ENTRY(name)
fe2de317 17445@@ -338,7 +348,6 @@ ENTRY(name)
bc901d79
MT
17446 i = i + 1 ; \
17447 .endr
17448
17449- .data
17450 /*
17451 * This default setting generates an ident mapping at address 0x100000
17452 * and a mapping for the kernel that precisely maps virtual address
fe2de317 17453@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
58c5fc13
MT
17454 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17455 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17456 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17457+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
fe2de317
MT
17458+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17459+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
17460+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
58c5fc13
MT
17461+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17462+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17463 .org init_level4_pgt + L4_START_KERNEL*8, 0
17464 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17465 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17466
df50ba0c
MT
17467+#ifdef CONFIG_PAX_PER_CPU_PGD
17468+NEXT_PAGE(cpu_pgd)
17469+ .rept NR_CPUS
17470+ .fill 512,8,0
17471+ .endr
17472+#endif
17473+
58c5fc13
MT
17474 NEXT_PAGE(level3_ident_pgt)
17475 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17476+#ifdef CONFIG_XEN
17477 .fill 511,8,0
17478+#else
17479+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
ae4e228f 17480+ .fill 510,8,0
58c5fc13
MT
17481+#endif
17482+
fe2de317
MT
17483+NEXT_PAGE(level3_vmalloc_start_pgt)
17484+ .fill 512,8,0
17485+
17486+NEXT_PAGE(level3_vmalloc_end_pgt)
58c5fc13
MT
17487+ .fill 512,8,0
17488+
17489+NEXT_PAGE(level3_vmemmap_pgt)
17490+ .fill L3_VMEMMAP_START,8,0
17491+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17492
17493 NEXT_PAGE(level3_kernel_pgt)
17494 .fill L3_START_KERNEL,8,0
fe2de317 17495@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
58c5fc13
MT
17496 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17497 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17498
17499+NEXT_PAGE(level2_vmemmap_pgt)
17500+ .fill 512,8,0
17501+
17502 NEXT_PAGE(level2_fixmap_pgt)
17503- .fill 506,8,0
17504- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17505- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17506- .fill 5,8,0
17507+ .fill 507,8,0
17508+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17509+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17510+ .fill 4,8,0
17511
17512-NEXT_PAGE(level1_fixmap_pgt)
17513+NEXT_PAGE(level1_vsyscall_pgt)
17514 .fill 512,8,0
17515
17516-NEXT_PAGE(level2_ident_pgt)
17517- /* Since I easily can, map the first 1G.
ae4e228f 17518+ /* Since I easily can, map the first 2G.
58c5fc13
MT
17519 * Don't set NX because code runs from these pages.
17520 */
17521- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17522+NEXT_PAGE(level2_ident_pgt)
ae4e228f 17523+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
58c5fc13
MT
17524
17525 NEXT_PAGE(level2_kernel_pgt)
17526 /*
5e856224 17527@@ -389,37 +429,59 @@ NEXT_PAGE(level2_kernel_pgt)
58c5fc13
MT
17528 * If you want to increase this then increase MODULES_VADDR
17529 * too.)
17530 */
17531- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17532- KERNEL_IMAGE_SIZE/PMD_SIZE)
17533-
17534-NEXT_PAGE(level2_spare_pgt)
17535- .fill 512, 8, 0
17536+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17537
17538 #undef PMDS
17539 #undef NEXT_PAGE
17540
17541- .data
17542+ .align PAGE_SIZE
17543+ENTRY(cpu_gdt_table)
17544+ .rept NR_CPUS
17545+ .quad 0x0000000000000000 /* NULL descriptor */
17546+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17547+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
17548+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
17549+ .quad 0x00cffb000000ffff /* __USER32_CS */
17550+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17551+ .quad 0x00affb000000ffff /* __USER_CS */
ae4e228f
MT
17552+
17553+#ifdef CONFIG_PAX_KERNEXEC
17554+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17555+#else
58c5fc13 17556+ .quad 0x0 /* unused */
ae4e228f
MT
17557+#endif
17558+
58c5fc13
MT
17559+ .quad 0,0 /* TSS */
17560+ .quad 0,0 /* LDT */
17561+ .quad 0,0,0 /* three TLS descriptors */
17562+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
17563+ /* asm/segment.h:GDT_ENTRIES must match this */
17564+
17565+ /* zero the remaining page */
17566+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17567+ .endr
17568+
17569 .align 16
17570 .globl early_gdt_descr
17571 early_gdt_descr:
17572 .word GDT_ENTRIES*8-1
17573 early_gdt_descr_base:
17574- .quad INIT_PER_CPU_VAR(gdt_page)
17575+ .quad cpu_gdt_table
17576
17577 ENTRY(phys_base)
17578 /* This must match the first entry in level2_kernel_pgt */
17579 .quad 0x0000000000000000
17580
17581 #include "../../x86/xen/xen-head.S"
17582-
17583- .section .bss, "aw", @nobits
17584+
17585+ .section .rodata,"a",@progbits
17586 .align L1_CACHE_BYTES
17587 ENTRY(idt_table)
17588- .skip IDT_ENTRIES * 16
5e856224
MT
17589+ .fill 512,8,0
17590
17591 .align L1_CACHE_BYTES
17592 ENTRY(nmi_idt_table)
17593- .skip IDT_ENTRIES * 16
58c5fc13
MT
17594+ .fill 512,8,0
17595
ae4e228f 17596 __PAGE_ALIGNED_BSS
58c5fc13 17597 .align PAGE_SIZE
fe2de317
MT
17598diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17599index 9c3bd4a..e1d9b35 100644
17600--- a/arch/x86/kernel/i386_ksyms_32.c
17601+++ b/arch/x86/kernel/i386_ksyms_32.c
ae4e228f
MT
17602@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17603 EXPORT_SYMBOL(cmpxchg8b_emu);
58c5fc13
MT
17604 #endif
17605
17606+EXPORT_SYMBOL_GPL(cpu_gdt_table);
17607+
17608 /* Networking helper routines. */
17609 EXPORT_SYMBOL(csum_partial_copy_generic);
17610+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17611+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17612
17613 EXPORT_SYMBOL(__get_user_1);
17614 EXPORT_SYMBOL(__get_user_2);
ae4e228f 17615@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
58c5fc13
MT
17616
17617 EXPORT_SYMBOL(csum_partial);
17618 EXPORT_SYMBOL(empty_zero_page);
17619+
17620+#ifdef CONFIG_PAX_KERNEXEC
17621+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17622+#endif
4c928ab7 17623diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
5e856224 17624index f239f30..aab2a58 100644
4c928ab7
MT
17625--- a/arch/x86/kernel/i387.c
17626+++ b/arch/x86/kernel/i387.c
5e856224 17627@@ -189,6 +189,9 @@ int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
4c928ab7
MT
17628
17629 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
17630 unsigned int pos, unsigned int count,
17631+ void *kbuf, void __user *ubuf) __size_overflow(4);
17632+int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
17633+ unsigned int pos, unsigned int count,
17634 void *kbuf, void __user *ubuf)
17635 {
17636 int ret;
5e856224 17637@@ -208,6 +211,9 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
4c928ab7
MT
17638
17639 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
17640 unsigned int pos, unsigned int count,
17641+ const void *kbuf, const void __user *ubuf) __size_overflow(4);
17642+int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
17643+ unsigned int pos, unsigned int count,
17644 const void *kbuf, const void __user *ubuf)
17645 {
17646 int ret;
5e856224 17647@@ -241,6 +247,9 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
4c928ab7
MT
17648
17649 int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
17650 unsigned int pos, unsigned int count,
17651+ void *kbuf, void __user *ubuf) __size_overflow(4);
17652+int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
17653+ unsigned int pos, unsigned int count,
17654 void *kbuf, void __user *ubuf)
17655 {
17656 int ret;
5e856224 17657@@ -270,6 +279,9 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
4c928ab7
MT
17658
17659 int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
17660 unsigned int pos, unsigned int count,
17661+ const void *kbuf, const void __user *ubuf) __size_overflow(4);
17662+int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
17663+ unsigned int pos, unsigned int count,
17664 const void *kbuf, const void __user *ubuf)
17665 {
17666 int ret;
5e856224 17667@@ -440,6 +452,9 @@ static void convert_to_fxsr(struct task_struct *tsk,
4c928ab7
MT
17668
17669 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
17670 unsigned int pos, unsigned int count,
17671+ void *kbuf, void __user *ubuf) __size_overflow(3,4);
17672+int fpregs_get(struct task_struct *target, const struct user_regset *regset,
17673+ unsigned int pos, unsigned int count,
17674 void *kbuf, void __user *ubuf)
17675 {
17676 struct user_i387_ia32_struct env;
5e856224 17677@@ -472,6 +487,9 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
4c928ab7
MT
17678
17679 int fpregs_set(struct task_struct *target, const struct user_regset *regset,
17680 unsigned int pos, unsigned int count,
17681+ const void *kbuf, const void __user *ubuf) __size_overflow(3,4);
17682+int fpregs_set(struct task_struct *target, const struct user_regset *regset,
17683+ unsigned int pos, unsigned int count,
17684 const void *kbuf, const void __user *ubuf)
17685 {
17686 struct user_i387_ia32_struct env;
5e856224 17687@@ -620,6 +638,8 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
4c928ab7
MT
17688 }
17689
17690 static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
17691+ unsigned int size) __size_overflow(2);
17692+static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
17693 unsigned int size)
17694 {
17695 struct task_struct *tsk = current;
fe2de317
MT
17696diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17697index 6104852..6114160 100644
17698--- a/arch/x86/kernel/i8259.c
17699+++ b/arch/x86/kernel/i8259.c
8308f9c9
MT
17700@@ -210,7 +210,7 @@ spurious_8259A_irq:
17701 "spurious 8259A interrupt: IRQ%d.\n", irq);
17702 spurious_irq_mask |= irqmask;
17703 }
17704- atomic_inc(&irq_err_count);
17705+ atomic_inc_unchecked(&irq_err_count);
17706 /*
17707 * Theoretically we do not have to handle this IRQ,
17708 * but in Linux this does not cause problems and is
fe2de317
MT
17709diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17710index 43e9ccf..44ccf6f 100644
17711--- a/arch/x86/kernel/init_task.c
17712+++ b/arch/x86/kernel/init_task.c
17713@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
71d190be
MT
17714 * way process stacks are handled. This is done by having a special
17715 * "init_task" linker map entry..
17716 */
17717-union thread_union init_thread_union __init_task_data =
17718- { INIT_THREAD_INFO(init_task) };
17719+union thread_union init_thread_union __init_task_data;
17720
17721 /*
17722 * Initial task structure.
17723@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
58c5fc13
MT
17724 * section. Since TSS's are completely CPU-local, we want them
17725 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17726 */
17727-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17728-
17729+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17730+EXPORT_SYMBOL(init_tss);
fe2de317
MT
17731diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17732index 8c96897..be66bfa 100644
17733--- a/arch/x86/kernel/ioport.c
17734+++ b/arch/x86/kernel/ioport.c
58c5fc13
MT
17735@@ -6,6 +6,7 @@
17736 #include <linux/sched.h>
17737 #include <linux/kernel.h>
17738 #include <linux/capability.h>
17739+#include <linux/security.h>
17740 #include <linux/errno.h>
17741 #include <linux/types.h>
17742 #include <linux/ioport.h>
fe2de317 17743@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
58c5fc13
MT
17744
17745 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17746 return -EINVAL;
17747+#ifdef CONFIG_GRKERNSEC_IO
df50ba0c 17748+ if (turn_on && grsec_disable_privio) {
58c5fc13
MT
17749+ gr_handle_ioperm();
17750+ return -EPERM;
17751+ }
17752+#endif
17753 if (turn_on && !capable(CAP_SYS_RAWIO))
17754 return -EPERM;
17755
fe2de317 17756@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
58c5fc13
MT
17757 * because the ->io_bitmap_max value must match the bitmap
17758 * contents:
17759 */
17760- tss = &per_cpu(init_tss, get_cpu());
17761+ tss = init_tss + get_cpu();
17762
66a7e928
MT
17763 if (turn_on)
17764 bitmap_clear(t->io_bitmap_ptr, from, num);
fe2de317 17765@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
58c5fc13
MT
17766 return -EINVAL;
17767 /* Trying to gain more privileges? */
17768 if (level > old) {
17769+#ifdef CONFIG_GRKERNSEC_IO
df50ba0c
MT
17770+ if (grsec_disable_privio) {
17771+ gr_handle_iopl();
17772+ return -EPERM;
17773+ }
17774+#endif
58c5fc13
MT
17775 if (!capable(CAP_SYS_RAWIO))
17776 return -EPERM;
58c5fc13 17777 }
fe2de317 17778diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
5e856224 17779index 7943e0c..dd32c5c 100644
fe2de317
MT
17780--- a/arch/x86/kernel/irq.c
17781+++ b/arch/x86/kernel/irq.c
4c928ab7 17782@@ -18,7 +18,7 @@
fe2de317
MT
17783 #include <asm/mce.h>
17784 #include <asm/hw_irq.h>
17785
17786-atomic_t irq_err_count;
17787+atomic_unchecked_t irq_err_count;
17788
17789 /* Function pointer for generic interrupt vector handling */
17790 void (*x86_platform_ipi_callback)(void) = NULL;
5e856224 17791@@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
fe2de317
MT
17792 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17793 seq_printf(p, " Machine check polls\n");
17794 #endif
17795- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17796+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17797 #if defined(CONFIG_X86_IO_APIC)
17798- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17799+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17800 #endif
17801 return 0;
17802 }
5e856224 17803@@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
fe2de317
MT
17804
17805 u64 arch_irq_stat(void)
17806 {
17807- u64 sum = atomic_read(&irq_err_count);
17808+ u64 sum = atomic_read_unchecked(&irq_err_count);
17809
17810 #ifdef CONFIG_X86_IO_APIC
17811- sum += atomic_read(&irq_mis_count);
17812+ sum += atomic_read_unchecked(&irq_mis_count);
17813 #endif
17814 return sum;
17815 }
17816diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
5e856224 17817index 40fc861..9b8739b 100644
fe2de317
MT
17818--- a/arch/x86/kernel/irq_32.c
17819+++ b/arch/x86/kernel/irq_32.c
5e856224 17820@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
71d190be
MT
17821 __asm__ __volatile__("andl %%esp,%0" :
17822 "=r" (sp) : "0" (THREAD_SIZE - 1));
17823
17824- return sp < (sizeof(struct thread_info) + STACK_WARN);
17825+ return sp < STACK_WARN;
17826 }
17827
17828 static void print_stack_overflow(void)
5e856224 17829@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
71d190be
MT
17830 * per-CPU IRQ handling contexts (thread information and stack)
17831 */
17832 union irq_ctx {
17833- struct thread_info tinfo;
17834- u32 stack[THREAD_SIZE/sizeof(u32)];
17835+ unsigned long previous_esp;
17836+ u32 stack[THREAD_SIZE/sizeof(u32)];
17837 } __attribute__((aligned(THREAD_SIZE)));
17838
17839 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
5e856224 17840@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
71d190be
MT
17841 static inline int
17842 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17843 {
17844- union irq_ctx *curctx, *irqctx;
17845+ union irq_ctx *irqctx;
17846 u32 *isp, arg1, arg2;
17847
17848- curctx = (union irq_ctx *) current_thread_info();
17849 irqctx = __this_cpu_read(hardirq_ctx);
17850
17851 /*
5e856224 17852@@ -92,21 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
71d190be
MT
17853 * handler) we can't do that and just have to keep using the
17854 * current stack (which is the irq stack already after all)
17855 */
17856- if (unlikely(curctx == irqctx))
17857+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
58c5fc13
MT
17858 return 0;
17859
17860 /* build the stack frame on the IRQ stack */
17861- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
71d190be
MT
17862- irqctx->tinfo.task = curctx->tinfo.task;
17863- irqctx->tinfo.previous_esp = current_stack_pointer;
58c5fc13 17864+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
71d190be 17865+ irqctx->previous_esp = current_stack_pointer;
bc901d79 17866
71d190be
MT
17867- /*
17868- * Copy the softirq bits in preempt_count so that the
17869- * softirq checks work in the hardirq context.
17870- */
17871- irqctx->tinfo.preempt_count =
17872- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
17873- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
bc901d79 17874+#ifdef CONFIG_PAX_MEMORY_UDEREF
71d190be 17875+ __set_fs(MAKE_MM_SEG(0));
bc901d79 17876+#endif
71d190be 17877
bc901d79
MT
17878 if (unlikely(overflow))
17879 call_on_stack(print_stack_overflow, isp);
5e856224 17880@@ -118,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
bc901d79
MT
17881 : "0" (irq), "1" (desc), "2" (isp),
17882 "D" (desc->handle_irq)
17883 : "memory", "cc", "ecx");
17884+
17885+#ifdef CONFIG_PAX_MEMORY_UDEREF
71d190be 17886+ __set_fs(current_thread_info()->addr_limit);
bc901d79
MT
17887+#endif
17888+
17889 return 1;
17890 }
17891
5e856224 17892@@ -126,29 +125,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
71d190be
MT
17893 */
17894 void __cpuinit irq_ctx_init(int cpu)
17895 {
17896- union irq_ctx *irqctx;
17897-
17898 if (per_cpu(hardirq_ctx, cpu))
17899 return;
17900
17901- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17902- THREAD_FLAGS,
17903- THREAD_ORDER));
17904- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17905- irqctx->tinfo.cpu = cpu;
17906- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17907- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17908-
17909- per_cpu(hardirq_ctx, cpu) = irqctx;
17910-
17911- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17912- THREAD_FLAGS,
17913- THREAD_ORDER));
17914- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17915- irqctx->tinfo.cpu = cpu;
17916- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17917-
17918- per_cpu(softirq_ctx, cpu) = irqctx;
17919+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17920+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17921
17922 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17923 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
5e856224 17924@@ -157,7 +138,6 @@ void __cpuinit irq_ctx_init(int cpu)
71d190be
MT
17925 asmlinkage void do_softirq(void)
17926 {
17927 unsigned long flags;
17928- struct thread_info *curctx;
17929 union irq_ctx *irqctx;
17930 u32 *isp;
17931
5e856224 17932@@ -167,15 +147,22 @@ asmlinkage void do_softirq(void)
71d190be
MT
17933 local_irq_save(flags);
17934
17935 if (local_softirq_pending()) {
17936- curctx = current_thread_info();
17937 irqctx = __this_cpu_read(softirq_ctx);
17938- irqctx->tinfo.task = curctx->task;
17939- irqctx->tinfo.previous_esp = current_stack_pointer;
17940+ irqctx->previous_esp = current_stack_pointer;
58c5fc13
MT
17941
17942 /* build the stack frame on the softirq stack */
17943- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17944+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
bc901d79
MT
17945+
17946+#ifdef CONFIG_PAX_MEMORY_UDEREF
71d190be 17947+ __set_fs(MAKE_MM_SEG(0));
bc901d79 17948+#endif
58c5fc13
MT
17949
17950 call_on_stack(__do_softirq, isp);
bc901d79
MT
17951+
17952+#ifdef CONFIG_PAX_MEMORY_UDEREF
71d190be 17953+ __set_fs(current_thread_info()->addr_limit);
bc901d79
MT
17954+#endif
17955+
58c5fc13 17956 /*
66a7e928 17957 * Shouldn't happen, we returned above if in_interrupt():
bc901d79 17958 */
4c928ab7 17959diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
5e856224 17960index d04d3ec..ea4b374 100644
4c928ab7
MT
17961--- a/arch/x86/kernel/irq_64.c
17962+++ b/arch/x86/kernel/irq_64.c
5e856224
MT
17963@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17964 u64 estack_top, estack_bottom;
4c928ab7
MT
17965 u64 curbase = (u64)task_stack_page(current);
17966
17967- if (user_mode_vm(regs))
17968+ if (user_mode(regs))
17969 return;
17970
5e856224
MT
17971 if (regs->sp >= curbase + sizeof(struct thread_info) +
17972diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
17973index 90fcf62..e682cdd 100644
17974--- a/arch/x86/kernel/kdebugfs.c
17975+++ b/arch/x86/kernel/kdebugfs.c
17976@@ -28,6 +28,8 @@ struct setup_data_node {
17977 };
17978
17979 static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17980+ size_t count, loff_t *ppos) __size_overflow(3);
17981+static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17982 size_t count, loff_t *ppos)
17983 {
17984 struct setup_data_node *node = file->private_data;
fe2de317 17985diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
5e856224 17986index 2f45c4c..d95504f 100644
fe2de317
MT
17987--- a/arch/x86/kernel/kgdb.c
17988+++ b/arch/x86/kernel/kgdb.c
5e856224 17989@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
6892158b 17990 #ifdef CONFIG_X86_32
66a7e928 17991 switch (regno) {
6892158b
MT
17992 case GDB_SS:
17993- if (!user_mode_vm(regs))
17994+ if (!user_mode(regs))
17995 *(unsigned long *)mem = __KERNEL_DS;
17996 break;
17997 case GDB_SP:
17998- if (!user_mode_vm(regs))
17999+ if (!user_mode(regs))
18000 *(unsigned long *)mem = kernel_stack_pointer(regs);
18001 break;
18002 case GDB_GS:
5e856224 18003@@ -475,12 +475,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
8308f9c9
MT
18004 case 'k':
18005 /* clear the trace bit */
18006 linux_regs->flags &= ~X86_EFLAGS_TF;
18007- atomic_set(&kgdb_cpu_doing_single_step, -1);
18008+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
18009
18010 /* set the trace bit if we're stepping */
18011 if (remcomInBuffer[0] == 's') {
18012 linux_regs->flags |= X86_EFLAGS_TF;
18013- atomic_set(&kgdb_cpu_doing_single_step,
18014+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
18015 raw_smp_processor_id());
18016 }
18017
5e856224 18018@@ -545,7 +545,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
8308f9c9 18019
4c928ab7 18020 switch (cmd) {
8308f9c9
MT
18021 case DIE_DEBUG:
18022- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
18023+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
18024 if (user_mode(regs))
18025 return single_step_cont(regs, args);
18026 break;
fe2de317 18027diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
4c928ab7 18028index 7da647d..56fe348 100644
fe2de317
MT
18029--- a/arch/x86/kernel/kprobes.c
18030+++ b/arch/x86/kernel/kprobes.c
4c928ab7 18031@@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
df50ba0c
MT
18032 } __attribute__((packed)) *insn;
18033
bc901d79 18034 insn = (struct __arch_relative_insn *)from;
58c5fc13 18035+
ae4e228f 18036+ pax_open_kernel();
df50ba0c
MT
18037 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
18038 insn->op = op;
ae4e228f 18039+ pax_close_kernel();
58c5fc13
MT
18040 }
18041
df50ba0c 18042 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
4c928ab7 18043@@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
bc901d79
MT
18044 kprobe_opcode_t opcode;
18045 kprobe_opcode_t *orig_opcodes = opcodes;
18046
18047- if (search_exception_tables((unsigned long)opcodes))
18048+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
18049 return 0; /* Page fault may occur on this address. */
18050
18051 retry:
4c928ab7 18052@@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
df50ba0c
MT
18053 }
18054 }
18055 insn_get_length(&insn);
ae4e228f 18056+ pax_open_kernel();
df50ba0c 18057 memcpy(dest, insn.kaddr, insn.length);
ae4e228f 18058+ pax_close_kernel();
58c5fc13 18059
df50ba0c
MT
18060 #ifdef CONFIG_X86_64
18061 if (insn_rip_relative(&insn)) {
4c928ab7 18062@@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
df50ba0c
MT
18063 (u8 *) dest;
18064 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
18065 disp = (u8 *) dest + insn_offset_displacement(&insn);
18066+ pax_open_kernel();
18067 *(s32 *) disp = (s32) newdisp;
18068+ pax_close_kernel();
18069 }
18070 #endif
18071 return insn.length;
4c928ab7 18072@@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
df50ba0c
MT
18073 */
18074 __copy_instruction(p->ainsn.insn, p->addr, 0);
58c5fc13
MT
18075
18076- if (can_boost(p->addr))
18077+ if (can_boost(ktla_ktva(p->addr)))
18078 p->ainsn.boostable = 0;
18079 else
18080 p->ainsn.boostable = -1;
18081
18082- p->opcode = *p->addr;
18083+ p->opcode = *(ktla_ktva(p->addr));
18084 }
18085
18086 int __kprobes arch_prepare_kprobe(struct kprobe *p)
4c928ab7 18087@@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
df50ba0c
MT
18088 * nor set current_kprobe, because it doesn't use single
18089 * stepping.
18090 */
18091- regs->ip = (unsigned long)p->ainsn.insn;
18092+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18093 preempt_enable_no_resched();
18094 return;
18095 }
4c928ab7 18096@@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
58c5fc13
MT
18097 if (p->opcode == BREAKPOINT_INSTRUCTION)
18098 regs->ip = (unsigned long)p->addr;
18099 else
18100- regs->ip = (unsigned long)p->ainsn.insn;
18101+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18102 }
18103
df50ba0c 18104 /*
4c928ab7 18105@@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
df50ba0c
MT
18106 setup_singlestep(p, regs, kcb, 0);
18107 return 1;
18108 }
18109- } else if (*addr != BREAKPOINT_INSTRUCTION) {
18110+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
58c5fc13
MT
18111 /*
18112 * The breakpoint instruction was removed right
18113 * after we hit it. Another cpu has removed
4c928ab7 18114@@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
6e9df6a3
MT
18115 " movq %rax, 152(%rsp)\n"
18116 RESTORE_REGS_STRING
18117 " popfq\n"
4c928ab7 18118+#ifdef KERNEXEC_PLUGIN
6e9df6a3
MT
18119+ " btsq $63,(%rsp)\n"
18120+#endif
18121 #else
18122 " pushf\n"
18123 SAVE_REGS_STRING
4c928ab7 18124@@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
58c5fc13
MT
18125 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
18126 {
18127 unsigned long *tos = stack_addr(regs);
18128- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
18129+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
18130 unsigned long orig_ip = (unsigned long)p->addr;
18131 kprobe_opcode_t *insn = p->ainsn.insn;
18132
4c928ab7 18133@@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
58c5fc13
MT
18134 struct die_args *args = data;
18135 int ret = NOTIFY_DONE;
18136
18137- if (args->regs && user_mode_vm(args->regs))
18138+ if (args->regs && user_mode(args->regs))
18139 return ret;
18140
18141 switch (val) {
4c928ab7 18142@@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
bc901d79
MT
18143 * Verify if the address gap is in 2GB range, because this uses
18144 * a relative jump.
18145 */
18146- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
18147+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
18148 if (abs(rel) > 0x7fffffff)
18149 return -ERANGE;
18150
4c928ab7 18151@@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
bc901d79
MT
18152 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
18153
18154 /* Set probe function call */
18155- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
18156+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
18157
18158 /* Set returning jmp instruction at the tail of out-of-line buffer */
18159 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
18160- (u8 *)op->kp.addr + op->optinsn.size);
18161+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
18162
18163 flush_icache_range((unsigned long) buf,
18164 (unsigned long) buf + TMPL_END_IDX +
4c928ab7 18165@@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
bc901d79
MT
18166 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
18167
18168 /* Backup instructions which will be replaced by jump address */
18169- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
18170+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
18171 RELATIVE_ADDR_SIZE);
18172
16454cff 18173 insn_buf[0] = RELATIVEJUMP_OPCODE;
fe2de317 18174diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
5e856224 18175index ea69726..a305f16 100644
fe2de317
MT
18176--- a/arch/x86/kernel/ldt.c
18177+++ b/arch/x86/kernel/ldt.c
18178@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
58c5fc13
MT
18179 if (reload) {
18180 #ifdef CONFIG_SMP
18181 preempt_disable();
18182- load_LDT(pc);
18183+ load_LDT_nolock(pc);
ae4e228f
MT
18184 if (!cpumask_equal(mm_cpumask(current->mm),
18185 cpumask_of(smp_processor_id())))
58c5fc13
MT
18186 smp_call_function(flush_ldt, current->mm, 1);
18187 preempt_enable();
18188 #else
18189- load_LDT(pc);
18190+ load_LDT_nolock(pc);
18191 #endif
18192 }
18193 if (oldsize) {
fe2de317 18194@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
58c5fc13
MT
18195 return err;
18196
18197 for (i = 0; i < old->size; i++)
18198- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18199+ write_ldt_entry(new->ldt, i, old->ldt + i);
18200 return 0;
18201 }
18202
fe2de317 18203@@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
58c5fc13
MT
18204 retval = copy_ldt(&mm->context, &old_mm->context);
18205 mutex_unlock(&old_mm->context.lock);
18206 }
18207+
18208+ if (tsk == current) {
6892158b 18209+ mm->context.vdso = 0;
58c5fc13
MT
18210+
18211+#ifdef CONFIG_X86_32
18212+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18213+ mm->context.user_cs_base = 0UL;
18214+ mm->context.user_cs_limit = ~0UL;
18215+
18216+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18217+ cpus_clear(mm->context.cpu_user_cs_mask);
18218+#endif
18219+
18220+#endif
18221+#endif
18222+
18223+ }
18224+
18225 return retval;
18226 }
18227
4c928ab7
MT
18228@@ -141,6 +159,7 @@ void destroy_context(struct mm_struct *mm)
18229 }
18230 }
18231
18232+static int read_ldt(void __user *ptr, unsigned long bytecount) __size_overflow(2);
18233 static int read_ldt(void __user *ptr, unsigned long bytecount)
18234 {
18235 int err;
5e856224
MT
18236@@ -175,6 +194,7 @@ error_return:
18237 return err;
18238 }
18239
18240+static int read_default_ldt(void __user *ptr, unsigned long bytecount) __size_overflow(2);
18241 static int read_default_ldt(void __user *ptr, unsigned long bytecount)
18242 {
18243 /* CHECKME: Can we use _one_ random number ? */
18244@@ -230,6 +250,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
58c5fc13
MT
18245 }
18246 }
18247
18248+#ifdef CONFIG_PAX_SEGMEXEC
18249+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18250+ error = -EINVAL;
18251+ goto out_unlock;
18252+ }
18253+#endif
18254+
18255 fill_ldt(&ldt, &ldt_info);
18256 if (oldmode)
18257 ldt.avl = 0;
fe2de317
MT
18258diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18259index a3fa43b..8966f4c 100644
18260--- a/arch/x86/kernel/machine_kexec_32.c
18261+++ b/arch/x86/kernel/machine_kexec_32.c
ae4e228f 18262@@ -27,7 +27,7 @@
58c5fc13 18263 #include <asm/cacheflush.h>
ae4e228f 18264 #include <asm/debugreg.h>
58c5fc13
MT
18265
18266-static void set_idt(void *newidt, __u16 limit)
18267+static void set_idt(struct desc_struct *newidt, __u16 limit)
18268 {
18269 struct desc_ptr curidt;
18270
fe2de317 18271@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
58c5fc13
MT
18272 }
18273
18274
18275-static void set_gdt(void *newgdt, __u16 limit)
18276+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18277 {
18278 struct desc_ptr curgdt;
18279
18280@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
18281 }
18282
18283 control_page = page_address(image->control_code_page);
18284- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18285+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18286
18287 relocate_kernel_ptr = control_page;
18288 page_list[PA_CONTROL_PAGE] = __pa(control_page);
fe2de317 18289diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
4c928ab7 18290index 3ca42d0..79d24cd 100644
fe2de317
MT
18291--- a/arch/x86/kernel/microcode_intel.c
18292+++ b/arch/x86/kernel/microcode_intel.c
4c928ab7
MT
18293@@ -434,15 +434,16 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18294 return ret;
18295 }
ae4e228f 18296
4c928ab7 18297+static int get_ucode_user(void *to, const void *from, size_t n) __size_overflow(3);
ae4e228f
MT
18298 static int get_ucode_user(void *to, const void *from, size_t n)
18299 {
18300- return copy_from_user(to, from, n);
6e9df6a3 18301+ return copy_from_user(to, (const void __force_user *)from, n);
ae4e228f
MT
18302 }
18303
18304 static enum ucode_state
18305 request_microcode_user(int cpu, const void __user *buf, size_t size)
18306 {
18307- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
6e9df6a3 18308+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
ae4e228f
MT
18309 }
18310
18311 static void microcode_fini_cpu(int cpu)
fe2de317 18312diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
5e856224 18313index 925179f..1f0d561 100644
fe2de317
MT
18314--- a/arch/x86/kernel/module.c
18315+++ b/arch/x86/kernel/module.c
5e856224 18316@@ -36,15 +36,61 @@
58c5fc13
MT
18317 #define DEBUGP(fmt...)
18318 #endif
18319
18320-void *module_alloc(unsigned long size)
5e856224 18321+static inline void *__module_alloc(unsigned long size, pgprot_t prot) __size_overflow(1);
16454cff 18322+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
58c5fc13 18323 {
4c928ab7
MT
18324- if (PAGE_ALIGN(size) > MODULES_LEN)
18325+ if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
58c5fc13 18326 return NULL;
16454cff
MT
18327 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
18328- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
18329+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
18330 -1, __builtin_return_address(0));
18331 }
58c5fc13 18332
58c5fc13
MT
18333+void *module_alloc(unsigned long size)
18334+{
ae4e228f
MT
18335+
18336+#ifdef CONFIG_PAX_KERNEXEC
58c5fc13 18337+ return __module_alloc(size, PAGE_KERNEL);
ae4e228f
MT
18338+#else
18339+ return __module_alloc(size, PAGE_KERNEL_EXEC);
18340+#endif
18341+
16454cff
MT
18342+}
18343+
ae4e228f
MT
18344+#ifdef CONFIG_PAX_KERNEXEC
18345+#ifdef CONFIG_X86_32
58c5fc13
MT
18346+void *module_alloc_exec(unsigned long size)
18347+{
18348+ struct vm_struct *area;
18349+
18350+ if (size == 0)
18351+ return NULL;
18352+
18353+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
ae4e228f 18354+ return area ? area->addr : NULL;
58c5fc13
MT
18355+}
18356+EXPORT_SYMBOL(module_alloc_exec);
18357+
18358+void module_free_exec(struct module *mod, void *module_region)
18359+{
ae4e228f 18360+ vunmap(module_region);
58c5fc13
MT
18361+}
18362+EXPORT_SYMBOL(module_free_exec);
18363+#else
58c5fc13
MT
18364+void module_free_exec(struct module *mod, void *module_region)
18365+{
18366+ module_free(mod, module_region);
18367+}
18368+EXPORT_SYMBOL(module_free_exec);
18369+
18370+void *module_alloc_exec(unsigned long size)
18371+{
18372+ return __module_alloc(size, PAGE_KERNEL_RX);
18373+}
18374+EXPORT_SYMBOL(module_alloc_exec);
18375+#endif
58c5fc13 18376+#endif
ae4e228f 18377+
6e9df6a3
MT
18378 #ifdef CONFIG_X86_32
18379 int apply_relocate(Elf32_Shdr *sechdrs,
18380 const char *strtab,
5e856224 18381@@ -55,14 +101,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
58c5fc13
MT
18382 unsigned int i;
18383 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18384 Elf32_Sym *sym;
18385- uint32_t *location;
18386+ uint32_t *plocation, location;
58c5fc13
MT
18387
18388 DEBUGP("Applying relocate section %u to %u\n", relsec,
18389 sechdrs[relsec].sh_info);
18390 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18391 /* This is where to make the change */
18392- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18393- + rel[i].r_offset;
18394+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18395+ location = (uint32_t)plocation;
18396+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18397+ plocation = ktla_ktva((void *)plocation);
18398 /* This is the symbol it is referring to. Note that all
18399 undefined symbols have been resolved. */
18400 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
5e856224 18401@@ -71,11 +119,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
58c5fc13
MT
18402 switch (ELF32_R_TYPE(rel[i].r_info)) {
18403 case R_386_32:
18404 /* We add the value into the location given */
18405- *location += sym->st_value;
ae4e228f 18406+ pax_open_kernel();
58c5fc13 18407+ *plocation += sym->st_value;
ae4e228f 18408+ pax_close_kernel();
58c5fc13
MT
18409 break;
18410 case R_386_PC32:
18411 /* Add the value, subtract its postition */
18412- *location += sym->st_value - (uint32_t)location;
ae4e228f 18413+ pax_open_kernel();
58c5fc13 18414+ *plocation += sym->st_value - location;
ae4e228f 18415+ pax_close_kernel();
58c5fc13
MT
18416 break;
18417 default:
18418 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
5e856224 18419@@ -120,21 +172,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
58c5fc13
MT
18420 case R_X86_64_NONE:
18421 break;
18422 case R_X86_64_64:
ae4e228f 18423+ pax_open_kernel();
58c5fc13 18424 *(u64 *)loc = val;
ae4e228f 18425+ pax_close_kernel();
58c5fc13
MT
18426 break;
18427 case R_X86_64_32:
ae4e228f 18428+ pax_open_kernel();
58c5fc13 18429 *(u32 *)loc = val;
ae4e228f 18430+ pax_close_kernel();
58c5fc13
MT
18431 if (val != *(u32 *)loc)
18432 goto overflow;
18433 break;
18434 case R_X86_64_32S:
ae4e228f 18435+ pax_open_kernel();
58c5fc13 18436 *(s32 *)loc = val;
ae4e228f 18437+ pax_close_kernel();
58c5fc13
MT
18438 if ((s64)val != *(s32 *)loc)
18439 goto overflow;
18440 break;
18441 case R_X86_64_PC32:
18442 val -= (u64)loc;
ae4e228f 18443+ pax_open_kernel();
58c5fc13 18444 *(u32 *)loc = val;
ae4e228f 18445+ pax_close_kernel();
58c5fc13
MT
18446+
18447 #if 0
18448 if ((s64)val != *(s32 *)loc)
18449 goto overflow;
4c928ab7 18450diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
5e856224 18451index 47acaf3..ec48ab6 100644
4c928ab7
MT
18452--- a/arch/x86/kernel/nmi.c
18453+++ b/arch/x86/kernel/nmi.c
5e856224 18454@@ -505,6 +505,17 @@ static inline void nmi_nesting_postprocess(void)
4c928ab7
MT
18455 dotraplinkage notrace __kprobes void
18456 do_nmi(struct pt_regs *regs, long error_code)
18457 {
18458+
18459+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18460+ if (!user_mode(regs)) {
18461+ unsigned long cs = regs->cs & 0xFFFF;
18462+ unsigned long ip = ktva_ktla(regs->ip);
18463+
18464+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
18465+ regs->ip = ip;
18466+ }
18467+#endif
18468+
5e856224 18469 nmi_nesting_preprocess(regs);
4c928ab7 18470
5e856224 18471 nmi_enter();
fe2de317
MT
18472diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18473index 676b8c7..870ba04 100644
18474--- a/arch/x86/kernel/paravirt-spinlocks.c
18475+++ b/arch/x86/kernel/paravirt-spinlocks.c
18476@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
18477 arch_spin_lock(lock);
18478 }
18479
18480-struct pv_lock_ops pv_lock_ops = {
18481+struct pv_lock_ops pv_lock_ops __read_only = {
18482 #ifdef CONFIG_SMP
18483 .spin_is_locked = __ticket_spin_is_locked,
18484 .spin_is_contended = __ticket_spin_is_contended,
18485diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
4c928ab7 18486index d90272e..6bb013b 100644
fe2de317
MT
18487--- a/arch/x86/kernel/paravirt.c
18488+++ b/arch/x86/kernel/paravirt.c
15a11c5b
MT
18489@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
18490 {
18491 return x;
18492 }
18493+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18494+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18495+#endif
18496
18497 void __init default_banner(void)
18498 {
4c928ab7 18499@@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
58c5fc13 18500 if (opfunc == NULL)
df50ba0c
MT
18501 /* If there's no function, patch it with a ud2a (BUG) */
18502 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18503- else if (opfunc == _paravirt_nop)
18504+ else if (opfunc == (void *)_paravirt_nop)
18505 /* If the operation is a nop, then nop the callsite */
18506 ret = paravirt_patch_nop();
18507
18508 /* identity functions just return their single argument */
18509- else if (opfunc == _paravirt_ident_32)
18510+ else if (opfunc == (void *)_paravirt_ident_32)
18511 ret = paravirt_patch_ident_32(insnbuf, len);
18512- else if (opfunc == _paravirt_ident_64)
18513+ else if (opfunc == (void *)_paravirt_ident_64)
18514 ret = paravirt_patch_ident_64(insnbuf, len);
15a11c5b
MT
18515+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18516+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18517+ ret = paravirt_patch_ident_64(insnbuf, len);
18518+#endif
df50ba0c
MT
18519
18520 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15a11c5b 18521 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
4c928ab7 18522@@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
58c5fc13
MT
18523 if (insn_len > len || start == NULL)
18524 insn_len = len;
18525 else
18526- memcpy(insnbuf, start, insn_len);
18527+ memcpy(insnbuf, ktla_ktva(start), insn_len);
18528
18529 return insn_len;
18530 }
4c928ab7 18531@@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
58c5fc13
MT
18532 preempt_enable();
18533 }
18534
18535-struct pv_info pv_info = {
18536+struct pv_info pv_info __read_only = {
18537 .name = "bare hardware",
18538 .paravirt_enabled = 0,
18539 .kernel_rpl = 0,
4c928ab7 18540@@ -313,16 +320,16 @@ struct pv_info pv_info = {
6e9df6a3 18541 #endif
58c5fc13
MT
18542 };
18543
18544-struct pv_init_ops pv_init_ops = {
18545+struct pv_init_ops pv_init_ops __read_only = {
18546 .patch = native_patch,
58c5fc13
MT
18547 };
18548
18549-struct pv_time_ops pv_time_ops = {
18550+struct pv_time_ops pv_time_ops __read_only = {
ae4e228f 18551 .sched_clock = native_sched_clock,
6e9df6a3 18552 .steal_clock = native_steal_clock,
58c5fc13
MT
18553 };
18554
18555-struct pv_irq_ops pv_irq_ops = {
18556+struct pv_irq_ops pv_irq_ops __read_only = {
58c5fc13
MT
18557 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18558 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
ae4e228f 18559 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
4c928ab7 18560@@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
58c5fc13
MT
18561 #endif
18562 };
18563
18564-struct pv_cpu_ops pv_cpu_ops = {
18565+struct pv_cpu_ops pv_cpu_ops __read_only = {
18566 .cpuid = native_cpuid,
18567 .get_debugreg = native_get_debugreg,
18568 .set_debugreg = native_set_debugreg,
4c928ab7 18569@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
58c5fc13
MT
18570 .end_context_switch = paravirt_nop,
18571 };
18572
18573-struct pv_apic_ops pv_apic_ops = {
18574+struct pv_apic_ops pv_apic_ops __read_only = {
18575 #ifdef CONFIG_X86_LOCAL_APIC
ae4e228f
MT
18576 .startup_ipi_hook = paravirt_nop,
18577 #endif
15a11c5b
MT
18578 };
18579
18580-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18581+#ifdef CONFIG_X86_32
18582+#ifdef CONFIG_X86_PAE
18583+/* 64-bit pagetable entries */
18584+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18585+#else
18586 /* 32-bit pagetable entries */
18587 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18588+#endif
18589 #else
18590 /* 64-bit pagetable entries */
58c5fc13
MT
18591 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18592 #endif
18593
18594-struct pv_mmu_ops pv_mmu_ops = {
18595+struct pv_mmu_ops pv_mmu_ops __read_only = {
ae4e228f
MT
18596
18597 .read_cr2 = native_read_cr2,
18598 .write_cr2 = native_write_cr2,
4c928ab7 18599@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
15a11c5b
MT
18600 .make_pud = PTE_IDENT,
18601
18602 .set_pgd = native_set_pgd,
18603+ .set_pgd_batched = native_set_pgd_batched,
18604 #endif
18605 #endif /* PAGETABLE_LEVELS >= 3 */
18606
4c928ab7 18607@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
ae4e228f
MT
18608 },
18609
18610 .set_fixmap = native_set_fixmap,
18611+
18612+#ifdef CONFIG_PAX_KERNEXEC
18613+ .pax_open_kernel = native_pax_open_kernel,
18614+ .pax_close_kernel = native_pax_close_kernel,
18615+#endif
18616+
18617 };
18618
18619 EXPORT_SYMBOL_GPL(pv_time_ops);
fe2de317 18620diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
4c928ab7 18621index 35ccf75..7a15747 100644
fe2de317
MT
18622--- a/arch/x86/kernel/pci-iommu_table.c
18623+++ b/arch/x86/kernel/pci-iommu_table.c
66a7e928
MT
18624@@ -2,7 +2,7 @@
18625 #include <asm/iommu_table.h>
18626 #include <linux/string.h>
18627 #include <linux/kallsyms.h>
18628-
18629+#include <linux/sched.h>
18630
18631 #define DEBUG 1
18632
fe2de317 18633diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
5e856224 18634index 15763af..da59ada 100644
fe2de317
MT
18635--- a/arch/x86/kernel/process.c
18636+++ b/arch/x86/kernel/process.c
18637@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
18638
18639 void free_thread_info(struct thread_info *ti)
18640 {
18641- free_thread_xstate(ti->task);
4c928ab7 18642 free_pages((unsigned long)ti, THREAD_ORDER);
fe2de317
MT
18643 }
18644
18645+static struct kmem_cache *task_struct_cachep;
18646+
18647 void arch_task_cache_init(void)
18648 {
18649- task_xstate_cachep =
18650- kmem_cache_create("task_xstate", xstate_size,
18651+ /* create a slab on which task_structs can be allocated */
18652+ task_struct_cachep =
18653+ kmem_cache_create("task_struct", sizeof(struct task_struct),
18654+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18655+
18656+ task_xstate_cachep =
18657+ kmem_cache_create("task_xstate", xstate_size,
18658 __alignof__(union thread_xstate),
18659- SLAB_PANIC | SLAB_NOTRACK, NULL);
18660+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18661+}
18662+
18663+struct task_struct *alloc_task_struct_node(int node)
18664+{
18665+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
18666+}
18667+
18668+void free_task_struct(struct task_struct *task)
18669+{
18670+ free_thread_xstate(task);
18671+ kmem_cache_free(task_struct_cachep, task);
18672 }
18673
18674 /*
18675@@ -70,7 +87,7 @@ void exit_thread(void)
18676 unsigned long *bp = t->io_bitmap_ptr;
18677
18678 if (bp) {
18679- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18680+ struct tss_struct *tss = init_tss + get_cpu();
18681
18682 t->io_bitmap_ptr = NULL;
18683 clear_thread_flag(TIF_IO_BITMAP);
18684@@ -106,7 +123,7 @@ void show_regs_common(void)
18685
18686 printk(KERN_CONT "\n");
18687 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
18688- current->pid, current->comm, print_tainted(),
18689+ task_pid_nr(current), current->comm, print_tainted(),
18690 init_utsname()->release,
18691 (int)strcspn(init_utsname()->version, " "),
18692 init_utsname()->version);
18693@@ -120,6 +137,9 @@ void flush_thread(void)
18694 {
18695 struct task_struct *tsk = current;
18696
18697+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18698+ loadsegment(gs, 0);
18699+#endif
18700 flush_ptrace_hw_breakpoint(tsk);
18701 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
18702 /*
18703@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18704 regs.di = (unsigned long) arg;
18705
18706 #ifdef CONFIG_X86_32
18707- regs.ds = __USER_DS;
18708- regs.es = __USER_DS;
18709+ regs.ds = __KERNEL_DS;
18710+ regs.es = __KERNEL_DS;
18711 regs.fs = __KERNEL_PERCPU;
18712- regs.gs = __KERNEL_STACK_CANARY;
18713+ savesegment(gs, regs.gs);
18714 #else
18715 regs.ss = __KERNEL_DS;
18716 #endif
4c928ab7 18717@@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
fe2de317 18718
4c928ab7
MT
18719 return ret;
18720 }
fe2de317
MT
18721-void stop_this_cpu(void *dummy)
18722+__noreturn void stop_this_cpu(void *dummy)
18723 {
18724 local_irq_disable();
18725 /*
4c928ab7 18726@@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
fe2de317
MT
18727 }
18728 early_param("idle", idle_setup);
18729
18730-unsigned long arch_align_stack(unsigned long sp)
18731+#ifdef CONFIG_PAX_RANDKSTACK
18732+void pax_randomize_kstack(struct pt_regs *regs)
18733 {
18734- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18735- sp -= get_random_int() % 8192;
18736- return sp & ~0xf;
18737-}
18738+ struct thread_struct *thread = &current->thread;
18739+ unsigned long time;
18740
18741-unsigned long arch_randomize_brk(struct mm_struct *mm)
18742-{
18743- unsigned long range_end = mm->brk + 0x02000000;
18744- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18745-}
18746+ if (!randomize_va_space)
18747+ return;
18748+
18749+ if (v8086_mode(regs))
18750+ return;
18751
18752+ rdtscl(time);
18753+
18754+ /* P4 seems to return a 0 LSB, ignore it */
18755+#ifdef CONFIG_MPENTIUM4
18756+ time &= 0x3EUL;
18757+ time <<= 2;
18758+#elif defined(CONFIG_X86_64)
18759+ time &= 0xFUL;
18760+ time <<= 4;
18761+#else
18762+ time &= 0x1FUL;
18763+ time <<= 3;
18764+#endif
18765+
18766+ thread->sp0 ^= time;
18767+ load_sp0(init_tss + smp_processor_id(), thread);
18768+
18769+#ifdef CONFIG_X86_64
18770+ percpu_write(kernel_stack, thread->sp0);
18771+#endif
18772+}
18773+#endif
18774diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
5e856224 18775index c08d1ff..6ae1c81 100644
fe2de317
MT
18776--- a/arch/x86/kernel/process_32.c
18777+++ b/arch/x86/kernel/process_32.c
4c928ab7 18778@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
57199397
MT
18779 unsigned long thread_saved_pc(struct task_struct *tsk)
18780 {
18781 return ((unsigned long *)tsk->thread.sp)[3];
18782+//XXX return tsk->thread.eip;
18783 }
18784
18785 #ifndef CONFIG_SMP
5e856224 18786@@ -132,15 +133,14 @@ void __show_regs(struct pt_regs *regs, int all)
57199397
MT
18787 unsigned long sp;
18788 unsigned short ss, gs;
18789
18790- if (user_mode_vm(regs)) {
18791+ if (user_mode(regs)) {
18792 sp = regs->sp;
18793 ss = regs->ss & 0xffff;
bc901d79
MT
18794- gs = get_user_gs(regs);
18795 } else {
18796 sp = kernel_stack_pointer(regs);
18797 savesegment(ss, ss);
18798- savesegment(gs, gs);
18799 }
18800+ gs = get_user_gs(regs);
18801
18802 show_regs_common();
18803
5e856224 18804@@ -202,13 +202,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
57199397
MT
18805 struct task_struct *tsk;
18806 int err;
18807
18808- childregs = task_pt_regs(p);
18809+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18810 *childregs = *regs;
18811 childregs->ax = 0;
18812 childregs->sp = sp;
66a7e928
MT
18813
18814 p->thread.sp = (unsigned long) childregs;
18815 p->thread.sp0 = (unsigned long) (childregs+1);
18816+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18817
18818 p->thread.ip = (unsigned long) ret_from_fork;
18819
5e856224 18820@@ -299,7 +300,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
57199397
MT
18821 struct thread_struct *prev = &prev_p->thread,
18822 *next = &next_p->thread;
18823 int cpu = smp_processor_id();
18824- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18825+ struct tss_struct *tss = init_tss + cpu;
4c928ab7 18826 fpu_switch_t fpu;
57199397
MT
18827
18828 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
5e856224 18829@@ -323,6 +324,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
57199397
MT
18830 */
18831 lazy_save_gs(prev->gs);
18832
18833+#ifdef CONFIG_PAX_MEMORY_UDEREF
bc901d79 18834+ __set_fs(task_thread_info(next_p)->addr_limit);
57199397
MT
18835+#endif
18836+
18837 /*
18838 * Load the per-thread Thread-Local Storage descriptor.
18839 */
5e856224 18840@@ -353,6 +358,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
71d190be
MT
18841 */
18842 arch_end_context_switch(next_p);
57199397 18843
71d190be
MT
18844+ percpu_write(current_task, next_p);
18845+ percpu_write(current_tinfo, &next_p->tinfo);
57199397 18846+
4c928ab7
MT
18847 /*
18848 * Restore %gs if needed (which is common)
18849 */
5e856224 18850@@ -361,8 +369,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
71d190be 18851
4c928ab7 18852 switch_fpu_finish(next_p, fpu);
71d190be
MT
18853
18854- percpu_write(current_task, next_p);
18855-
18856 return prev_p;
18857 }
18858
5e856224 18859@@ -392,4 +398,3 @@ unsigned long get_wchan(struct task_struct *p)
71d190be
MT
18860 } while (count++ < 16);
18861 return 0;
18862 }
18863-
fe2de317 18864diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
5e856224 18865index cfa5c90..4facd28 100644
fe2de317
MT
18866--- a/arch/x86/kernel/process_64.c
18867+++ b/arch/x86/kernel/process_64.c
4c928ab7 18868@@ -89,7 +89,7 @@ static void __exit_idle(void)
57199397
MT
18869 void exit_idle(void)
18870 {
18871 /* idle loop has pid 0 */
18872- if (current->pid)
18873+ if (task_pid_nr(current))
18874 return;
18875 __exit_idle();
18876 }
5e856224 18877@@ -270,8 +270,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
71d190be
MT
18878 struct pt_regs *childregs;
18879 struct task_struct *me = current;
18880
18881- childregs = ((struct pt_regs *)
18882- (THREAD_SIZE + task_stack_page(p))) - 1;
66a7e928 18883+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
71d190be
MT
18884 *childregs = *regs;
18885
18886 childregs->ax = 0;
5e856224 18887@@ -283,6 +282,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
66a7e928
MT
18888 p->thread.sp = (unsigned long) childregs;
18889 p->thread.sp0 = (unsigned long) (childregs+1);
18890 p->thread.usersp = me->thread.usersp;
18891+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18892
18893 set_tsk_thread_flag(p, TIF_FORK);
18894
5e856224 18895@@ -385,7 +385,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
57199397
MT
18896 struct thread_struct *prev = &prev_p->thread;
18897 struct thread_struct *next = &next_p->thread;
18898 int cpu = smp_processor_id();
18899- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18900+ struct tss_struct *tss = init_tss + cpu;
18901 unsigned fsindex, gsindex;
4c928ab7 18902 fpu_switch_t fpu;
57199397 18903
5e856224 18904@@ -467,10 +467,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
71d190be
MT
18905 prev->usersp = percpu_read(old_rsp);
18906 percpu_write(old_rsp, next->usersp);
18907 percpu_write(current_task, next_p);
18908+ percpu_write(current_tinfo, &next_p->tinfo);
18909
18910- percpu_write(kernel_stack,
18911- (unsigned long)task_stack_page(next_p) +
18912- THREAD_SIZE - KERNEL_STACK_OFFSET);
18913+ percpu_write(kernel_stack, next->sp0);
18914
18915 /*
18916 * Now maybe reload the debug registers and handle I/O bitmaps
5e856224 18917@@ -525,12 +524,11 @@ unsigned long get_wchan(struct task_struct *p)
57199397
MT
18918 if (!p || p == current || p->state == TASK_RUNNING)
18919 return 0;
18920 stack = (unsigned long)task_stack_page(p);
18921- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
66a7e928 18922+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
57199397
MT
18923 return 0;
18924 fp = *(u64 *)(p->thread.sp);
18925 do {
18926- if (fp < (unsigned long)stack ||
18927- fp >= (unsigned long)stack+THREAD_SIZE)
66a7e928 18928+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
57199397
MT
18929 return 0;
18930 ip = *(u64 *)(fp+8);
18931 if (!in_sched_functions(ip))
fe2de317 18932diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
5e856224 18933index 5026738..574f70a 100644
fe2de317
MT
18934--- a/arch/x86/kernel/ptrace.c
18935+++ b/arch/x86/kernel/ptrace.c
5e856224 18936@@ -792,6 +792,10 @@ static int ioperm_active(struct task_struct *target,
4c928ab7
MT
18937 static int ioperm_get(struct task_struct *target,
18938 const struct user_regset *regset,
18939 unsigned int pos, unsigned int count,
18940+ void *kbuf, void __user *ubuf) __size_overflow(3,4);
18941+static int ioperm_get(struct task_struct *target,
18942+ const struct user_regset *regset,
18943+ unsigned int pos, unsigned int count,
18944 void *kbuf, void __user *ubuf)
18945 {
18946 if (!target->thread.io_bitmap_ptr)
5e856224 18947@@ -823,7 +827,7 @@ long arch_ptrace(struct task_struct *child, long request,
bc901d79 18948 unsigned long addr, unsigned long data)
ae4e228f
MT
18949 {
18950 int ret;
18951- unsigned long __user *datap = (unsigned long __user *)data;
18952+ unsigned long __user *datap = (__force unsigned long __user *)data;
18953
18954 switch (request) {
18955 /* read the word at location addr in the USER area. */
5e856224 18956@@ -908,14 +912,14 @@ long arch_ptrace(struct task_struct *child, long request,
bc901d79 18957 if ((int) addr < 0)
ae4e228f
MT
18958 return -EIO;
18959 ret = do_get_thread_area(child, addr,
bc901d79
MT
18960- (struct user_desc __user *)data);
18961+ (__force struct user_desc __user *) data);
ae4e228f
MT
18962 break;
18963
18964 case PTRACE_SET_THREAD_AREA:
bc901d79 18965 if ((int) addr < 0)
ae4e228f
MT
18966 return -EIO;
18967 ret = do_set_thread_area(child, addr,
bc901d79
MT
18968- (struct user_desc __user *)data, 0);
18969+ (__force struct user_desc __user *) data, 0);
ae4e228f
MT
18970 break;
18971 #endif
18972
5e856224 18973@@ -1332,7 +1336,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
ae4e228f
MT
18974 memset(info, 0, sizeof(*info));
18975 info->si_signo = SIGTRAP;
18976 info->si_code = si_code;
18977- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18978+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18979 }
18980
18981 void user_single_step_siginfo(struct task_struct *tsk,
5e856224
MT
18982@@ -1361,6 +1365,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18983 # define IS_IA32 0
18984 #endif
18985
18986+#ifdef CONFIG_GRKERNSEC_SETXID
18987+extern void gr_delayed_cred_worker(void);
18988+#endif
18989+
18990 /*
18991 * We must return the syscall number to actually look up in the table.
18992 * This can be -1L to skip running any syscall at all.
18993@@ -1369,6 +1377,11 @@ long syscall_trace_enter(struct pt_regs *regs)
18994 {
18995 long ret = 0;
18996
18997+#ifdef CONFIG_GRKERNSEC_SETXID
18998+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18999+ gr_delayed_cred_worker();
19000+#endif
19001+
19002 /*
19003 * If we stepped into a sysenter/syscall insn, it trapped in
19004 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
19005@@ -1412,6 +1425,11 @@ void syscall_trace_leave(struct pt_regs *regs)
19006 {
19007 bool step;
19008
19009+#ifdef CONFIG_GRKERNSEC_SETXID
19010+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
19011+ gr_delayed_cred_worker();
19012+#endif
19013+
19014 audit_syscall_exit(regs);
19015
19016 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
fe2de317
MT
19017diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
19018index 42eb330..139955c 100644
19019--- a/arch/x86/kernel/pvclock.c
19020+++ b/arch/x86/kernel/pvclock.c
19021@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
66a7e928
MT
19022 return pv_tsc_khz;
19023 }
19024
19025-static atomic64_t last_value = ATOMIC64_INIT(0);
19026+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
19027
19028 void pvclock_resume(void)
19029 {
19030- atomic64_set(&last_value, 0);
19031+ atomic64_set_unchecked(&last_value, 0);
19032 }
19033
19034 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
fe2de317 19035@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
66a7e928
MT
19036 * updating at the same time, and one of them could be slightly behind,
19037 * making the assumption that last_value always go forward fail to hold.
19038 */
19039- last = atomic64_read(&last_value);
19040+ last = atomic64_read_unchecked(&last_value);
19041 do {
19042 if (ret < last)
19043 return last;
19044- last = atomic64_cmpxchg(&last_value, last, ret);
19045+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
19046 } while (unlikely(last != ret));
19047
19048 return ret;
fe2de317 19049diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
5e856224 19050index d840e69..98e9581 100644
fe2de317
MT
19051--- a/arch/x86/kernel/reboot.c
19052+++ b/arch/x86/kernel/reboot.c
66a7e928 19053@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
58c5fc13
MT
19054 EXPORT_SYMBOL(pm_power_off);
19055
19056 static const struct desc_ptr no_idt = {};
19057-static int reboot_mode;
19058+static unsigned short reboot_mode;
15a11c5b 19059 enum reboot_type reboot_type = BOOT_ACPI;
58c5fc13
MT
19060 int reboot_force;
19061
5e856224 19062@@ -335,13 +335,17 @@ core_initcall(reboot_init);
66a7e928
MT
19063 extern const unsigned char machine_real_restart_asm[];
19064 extern const u64 machine_real_restart_gdt[3];
19065
19066-void machine_real_restart(unsigned int type)
19067+__noreturn void machine_real_restart(unsigned int type)
58c5fc13 19068 {
66a7e928
MT
19069 void *restart_va;
19070 unsigned long restart_pa;
19071- void (*restart_lowmem)(unsigned int);
19072+ void (* __noreturn restart_lowmem)(unsigned int);
19073 u64 *lowmem_gdt;
19074
19075+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
19076+ struct desc_struct *gdt;
19077+#endif
19078+
58c5fc13
MT
19079 local_irq_disable();
19080
66a7e928 19081 /* Write zero to CMOS register number 0x0f, which the BIOS POST
5e856224 19082@@ -367,14 +371,14 @@ void machine_real_restart(unsigned int type)
58c5fc13
MT
19083 boot)". This seems like a fairly standard thing that gets set by
19084 REBOOT.COM programs, and the previous reset routine did this
19085 too. */
19086- *((unsigned short *)0x472) = reboot_mode;
19087+ *(unsigned short *)(__va(0x472)) = reboot_mode;
19088
66a7e928
MT
19089 /* Patch the GDT in the low memory trampoline */
19090 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
19091
19092 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
19093 restart_pa = virt_to_phys(restart_va);
19094- restart_lowmem = (void (*)(unsigned int))restart_pa;
19095+ restart_lowmem = (void *)restart_pa;
19096
19097 /* GDT[0]: GDT self-pointer */
19098 lowmem_gdt[0] =
5e856224 19099@@ -385,7 +389,33 @@ void machine_real_restart(unsigned int type)
66a7e928
MT
19100 GDT_ENTRY(0x009b, restart_pa, 0xffff);
19101
19102 /* Jump to the identity-mapped low memory code */
19103+
19104+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
19105+ gdt = get_cpu_gdt_table(smp_processor_id());
19106+ pax_open_kernel();
19107+#ifdef CONFIG_PAX_MEMORY_UDEREF
19108+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
19109+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
19110+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
19111+#endif
19112+#ifdef CONFIG_PAX_KERNEXEC
19113+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
19114+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
19115+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
19116+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
19117+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
19118+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
19119+#endif
19120+ pax_close_kernel();
19121+#endif
19122+
19123+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19124+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
19125+ unreachable();
19126+#else
19127 restart_lowmem(type);
19128+#endif
19129+
19130 }
19131 #ifdef CONFIG_APM_MODULE
19132 EXPORT_SYMBOL(machine_real_restart);
5e856224 19133@@ -556,7 +586,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
15a11c5b
MT
19134 * try to force a triple fault and then cycle between hitting the keyboard
19135 * controller and doing that
19136 */
66a7e928
MT
19137-static void native_machine_emergency_restart(void)
19138+__noreturn static void native_machine_emergency_restart(void)
19139 {
19140 int i;
15a11c5b 19141 int attempt = 0;
5e856224 19142@@ -680,13 +710,13 @@ void native_machine_shutdown(void)
66a7e928
MT
19143 #endif
19144 }
19145
19146-static void __machine_emergency_restart(int emergency)
19147+static __noreturn void __machine_emergency_restart(int emergency)
19148 {
19149 reboot_emergency = emergency;
19150 machine_ops.emergency_restart();
19151 }
19152
19153-static void native_machine_restart(char *__unused)
19154+static __noreturn void native_machine_restart(char *__unused)
19155 {
19156 printk("machine restart\n");
19157
5e856224 19158@@ -695,7 +725,7 @@ static void native_machine_restart(char *__unused)
66a7e928
MT
19159 __machine_emergency_restart(0);
19160 }
19161
19162-static void native_machine_halt(void)
19163+static __noreturn void native_machine_halt(void)
19164 {
19165 /* stop other cpus and apics */
19166 machine_shutdown();
5e856224 19167@@ -706,7 +736,7 @@ static void native_machine_halt(void)
66a7e928
MT
19168 stop_this_cpu(NULL);
19169 }
19170
19171-static void native_machine_power_off(void)
19172+__noreturn static void native_machine_power_off(void)
19173 {
19174 if (pm_power_off) {
19175 if (!reboot_force)
5e856224 19176@@ -715,6 +745,7 @@ static void native_machine_power_off(void)
66a7e928
MT
19177 }
19178 /* a fallback in case there is no PM info available */
19179 tboot_shutdown(TB_SHUTDOWN_HALT);
19180+ unreachable();
19181 }
19182
19183 struct machine_ops machine_ops = {
fe2de317
MT
19184diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19185index 7a6f3b3..bed145d7 100644
19186--- a/arch/x86/kernel/relocate_kernel_64.S
19187+++ b/arch/x86/kernel/relocate_kernel_64.S
19188@@ -11,6 +11,7 @@
19189 #include <asm/kexec.h>
19190 #include <asm/processor-flags.h>
19191 #include <asm/pgtable_types.h>
19192+#include <asm/alternative-asm.h>
19193
19194 /*
19195 * Must be relocatable PIC code callable as a C function
19196@@ -160,13 +161,14 @@ identity_mapped:
19197 xorq %rbp, %rbp
19198 xorq %r8, %r8
19199 xorq %r9, %r9
19200- xorq %r10, %r9
19201+ xorq %r10, %r10
19202 xorq %r11, %r11
19203 xorq %r12, %r12
19204 xorq %r13, %r13
19205 xorq %r14, %r14
19206 xorq %r15, %r15
19207
19208+ pax_force_retaddr 0, 1
19209 ret
19210
19211 1:
19212diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
5e856224 19213index d7d5099..28555d0 100644
fe2de317
MT
19214--- a/arch/x86/kernel/setup.c
19215+++ b/arch/x86/kernel/setup.c
5e856224 19216@@ -448,7 +448,7 @@ static void __init parse_setup_data(void)
6e9df6a3
MT
19217
19218 switch (data->type) {
19219 case SETUP_E820_EXT:
19220- parse_e820_ext(data);
19221+ parse_e820_ext((struct setup_data __force_kernel *)data);
19222 break;
19223 case SETUP_DTB:
19224 add_dtb(pa_data);
5e856224 19225@@ -649,7 +649,7 @@ static void __init trim_bios_range(void)
efbe55a5
MT
19226 * area (640->1Mb) as ram even though it is not.
19227 * take them out.
19228 */
19229- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
19230+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
19231 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
19232 }
19233
5e856224 19234@@ -767,14 +767,14 @@ void __init setup_arch(char **cmdline_p)
58c5fc13
MT
19235
19236 if (!boot_params.hdr.root_flags)
19237 root_mountflags &= ~MS_RDONLY;
19238- init_mm.start_code = (unsigned long) _text;
19239- init_mm.end_code = (unsigned long) _etext;
19240+ init_mm.start_code = ktla_ktva((unsigned long) _text);
19241+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
19242 init_mm.end_data = (unsigned long) _edata;
19243 init_mm.brk = _brk_end;
19244
19245- code_resource.start = virt_to_phys(_text);
19246- code_resource.end = virt_to_phys(_etext)-1;
19247- data_resource.start = virt_to_phys(_etext);
19248+ code_resource.start = virt_to_phys(ktla_ktva(_text));
19249+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19250+ data_resource.start = virt_to_phys(_sdata);
19251 data_resource.end = virt_to_phys(_edata)-1;
19252 bss_resource.start = virt_to_phys(&__bss_start);
19253 bss_resource.end = virt_to_phys(&__bss_stop)-1;
fe2de317 19254diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
5e856224 19255index 5a98aa2..848d2be 100644
fe2de317
MT
19256--- a/arch/x86/kernel/setup_percpu.c
19257+++ b/arch/x86/kernel/setup_percpu.c
57199397
MT
19258@@ -21,19 +21,17 @@
19259 #include <asm/cpu.h>
19260 #include <asm/stackprotector.h>
58c5fc13 19261
6892158b 19262-DEFINE_PER_CPU(int, cpu_number);
58c5fc13 19263+#ifdef CONFIG_SMP
6892158b 19264+DEFINE_PER_CPU(unsigned int, cpu_number);
58c5fc13
MT
19265 EXPORT_PER_CPU_SYMBOL(cpu_number);
19266+#endif
19267
19268-#ifdef CONFIG_X86_64
19269 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19270-#else
19271-#define BOOT_PERCPU_OFFSET 0
19272-#endif
19273
19274 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19275 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19276
19277-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19278+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19279 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19280 };
19281 EXPORT_SYMBOL(__per_cpu_offset);
4c928ab7
MT
19282@@ -96,6 +94,8 @@ static bool __init pcpu_need_numa(void)
19283 * Pointer to the allocated area on success, NULL on failure.
19284 */
19285 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
19286+ unsigned long align) __size_overflow(2);
19287+static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
19288 unsigned long align)
19289 {
19290 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
19291@@ -124,6 +124,8 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
19292 /*
19293 * Helpers for first chunk memory allocation
19294 */
19295+static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) __size_overflow(2);
19296+
19297 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
19298 {
19299 return pcpu_alloc_bootmem(cpu, size, align);
19300@@ -155,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
58c5fc13
MT
19301 {
19302 #ifdef CONFIG_X86_32
ae4e228f
MT
19303 struct desc_struct gdt;
19304+ unsigned long base = per_cpu_offset(cpu);
19305
58c5fc13
MT
19306- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19307- 0x2 | DESCTYPE_S, 0x8);
19308- gdt.s = 1;
ae4e228f
MT
19309+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19310+ 0x83 | DESCTYPE_S, 0xC);
19311 write_gdt_entry(get_cpu_gdt_table(cpu),
19312 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
58c5fc13 19313 #endif
5e856224 19314@@ -219,6 +221,11 @@ void __init setup_per_cpu_areas(void)
58c5fc13
MT
19315 /* alrighty, percpu areas up and running */
19316 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19317 for_each_possible_cpu(cpu) {
19318+#ifdef CONFIG_CC_STACKPROTECTOR
15a11c5b
MT
19319+#ifdef CONFIG_X86_32
19320+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
58c5fc13
MT
19321+#endif
19322+#endif
ae4e228f 19323 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
58c5fc13
MT
19324 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19325 per_cpu(cpu_number, cpu) = cpu;
5e856224 19326@@ -259,6 +266,12 @@ void __init setup_per_cpu_areas(void)
66a7e928 19327 */
57199397 19328 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
58c5fc13 19329 #endif
58c5fc13 19330+#ifdef CONFIG_CC_STACKPROTECTOR
15a11c5b
MT
19331+#ifdef CONFIG_X86_32
19332+ if (!cpu)
19333+ per_cpu(stack_canary.canary, cpu) = canary;
58c5fc13
MT
19334+#endif
19335+#endif
19336 /*
57199397 19337 * Up to this point, the boot CPU has been using .init.data
58c5fc13 19338 * area. Reload any changed state for the boot CPU.
fe2de317 19339diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
5e856224 19340index 46a01bd..2e88e6d 100644
fe2de317
MT
19341--- a/arch/x86/kernel/signal.c
19342+++ b/arch/x86/kernel/signal.c
19343@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
58c5fc13
MT
19344 * Align the stack pointer according to the i386 ABI,
19345 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19346 */
19347- sp = ((sp + 4) & -16ul) - 4;
19348+ sp = ((sp - 12) & -16ul) - 4;
19349 #else /* !CONFIG_X86_32 */
19350 sp = round_down(sp, 16) - 8;
19351 #endif
fe2de317 19352@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
ae4e228f
MT
19353 * Return an always-bogus address instead so we will die with SIGSEGV.
19354 */
19355 if (onsigstack && !likely(on_sig_stack(sp)))
19356- return (void __user *)-1L;
19357+ return (__force void __user *)-1L;
19358
19359 /* save i387 state */
19360 if (used_math() && save_i387_xstate(*fpstate) < 0)
19361- return (void __user *)-1L;
19362+ return (__force void __user *)-1L;
19363
19364 return (void __user *)sp;
19365 }
fe2de317 19366@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
58c5fc13
MT
19367 }
19368
19369 if (current->mm->context.vdso)
19370- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
ae4e228f 19371+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
58c5fc13
MT
19372 else
19373- restorer = &frame->retcode;
19374+ restorer = (void __user *)&frame->retcode;
19375 if (ka->sa.sa_flags & SA_RESTORER)
19376 restorer = ka->sa.sa_restorer;
19377
fe2de317 19378@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
ae4e228f
MT
19379 * reasons and because gdb uses it as a signature to notice
19380 * signal handler stack frames.
19381 */
19382- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19383+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19384
19385 if (err)
19386 return -EFAULT;
fe2de317 19387@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
58c5fc13
MT
19388 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19389
19390 /* Set up to return from userspace. */
19391- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6892158b
MT
19392+ if (current->mm->context.vdso)
19393+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19394+ else
19395+ restorer = (void __user *)&frame->retcode;
58c5fc13
MT
19396 if (ka->sa.sa_flags & SA_RESTORER)
19397 restorer = ka->sa.sa_restorer;
19398 put_user_ex(restorer, &frame->pretcode);
fe2de317 19399@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
ae4e228f
MT
19400 * reasons and because gdb uses it as a signature to notice
19401 * signal handler stack frames.
19402 */
19403- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19404+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19405 } put_user_catch(err);
19406
19407 if (err)
5e856224 19408@@ -765,7 +768,7 @@ static void do_signal(struct pt_regs *regs)
58c5fc13
MT
19409 * X86_32: vm86 regs switched out by assembly code before reaching
19410 * here, so testing against kernel CS suffices.
19411 */
19412- if (!user_mode(regs))
19413+ if (!user_mode_novm(regs))
19414 return;
19415
6e9df6a3 19416 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
fe2de317 19417diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
5e856224 19418index 66d250c..f1b10bd 100644
fe2de317
MT
19419--- a/arch/x86/kernel/smpboot.c
19420+++ b/arch/x86/kernel/smpboot.c
5e856224 19421@@ -715,17 +715,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
71d190be
MT
19422 set_idle_for_cpu(cpu, c_idle.idle);
19423 do_rest:
19424 per_cpu(current_task, cpu) = c_idle.idle;
19425+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19426 #ifdef CONFIG_X86_32
19427 /* Stack for startup_32 can be just as for start_secondary onwards */
19428 irq_ctx_init(cpu);
19429 #else
19430 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19431 initial_gs = per_cpu_offset(cpu);
19432- per_cpu(kernel_stack, cpu) =
19433- (unsigned long)task_stack_page(c_idle.idle) -
19434- KERNEL_STACK_OFFSET + THREAD_SIZE;
66a7e928 19435+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
58c5fc13
MT
19436 #endif
19437+
ae4e228f 19438+ pax_open_kernel();
58c5fc13 19439 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
ae4e228f 19440+ pax_close_kernel();
58c5fc13
MT
19441+
19442 initial_code = (unsigned long)start_secondary;
16454cff 19443 stack_start = c_idle.idle->thread.sp;
58c5fc13 19444
5e856224 19445@@ -868,6 +871,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
df50ba0c
MT
19446
19447 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19448
19449+#ifdef CONFIG_PAX_PER_CPU_PGD
19450+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19451+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19452+ KERNEL_PGD_PTRS);
19453+#endif
19454+
6892158b 19455 err = do_boot_cpu(apicid, cpu);
6892158b 19456 if (err) {
bc901d79 19457 pr_debug("do_boot_cpu failed %d\n", err);
fe2de317
MT
19458diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19459index c346d11..d43b163 100644
19460--- a/arch/x86/kernel/step.c
19461+++ b/arch/x86/kernel/step.c
19462@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
ae4e228f 19463 struct desc_struct *desc;
58c5fc13
MT
19464 unsigned long base;
19465
19466- seg &= ~7UL;
19467+ seg >>= 3;
19468
19469 mutex_lock(&child->mm->context.lock);
19470- if (unlikely((seg >> 3) >= child->mm->context.size))
58c5fc13 19471+ if (unlikely(seg >= child->mm->context.size))
ae4e228f 19472 addr = -1L; /* bogus selector, access would fault */
58c5fc13 19473 else {
ae4e228f 19474 desc = child->mm->context.ldt + seg;
fe2de317 19475@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
bc901d79
MT
19476 addr += base;
19477 }
19478 mutex_unlock(&child->mm->context.lock);
19479- }
19480+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19481+ addr = ktla_ktva(addr);
19482
19483 return addr;
19484 }
fe2de317 19485@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
58c5fc13
MT
19486 unsigned char opcode[15];
19487 unsigned long addr = convert_ip_to_linear(child, regs);
19488
19489+ if (addr == -EINVAL)
19490+ return 0;
19491+
19492 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19493 for (i = 0; i < copied; i++) {
19494 switch (opcode[i]) {
fe2de317
MT
19495diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19496index 0b0cb5f..db6b9ed 100644
19497--- a/arch/x86/kernel/sys_i386_32.c
19498+++ b/arch/x86/kernel/sys_i386_32.c
bc901d79 19499@@ -24,17 +24,224 @@
58c5fc13
MT
19500
19501 #include <asm/syscalls.h>
19502
bc901d79
MT
19503-/*
19504- * Do a system call from kernel instead of calling sys_execve so we
19505- * end up with proper pt_regs.
19506- */
19507-int kernel_execve(const char *filename,
19508- const char *const argv[],
19509- const char *const envp[])
58c5fc13 19510+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
bc901d79
MT
19511 {
19512- long __res;
19513- asm volatile ("int $0x80"
19514- : "=a" (__res)
19515- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
19516- return __res;
58c5fc13
MT
19517+ unsigned long pax_task_size = TASK_SIZE;
19518+
19519+#ifdef CONFIG_PAX_SEGMEXEC
19520+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19521+ pax_task_size = SEGMEXEC_TASK_SIZE;
19522+#endif
19523+
19524+ if (len > pax_task_size || addr > pax_task_size - len)
19525+ return -EINVAL;
19526+
19527+ return 0;
19528+}
19529+
58c5fc13
MT
19530+unsigned long
19531+arch_get_unmapped_area(struct file *filp, unsigned long addr,
19532+ unsigned long len, unsigned long pgoff, unsigned long flags)
19533+{
19534+ struct mm_struct *mm = current->mm;
19535+ struct vm_area_struct *vma;
19536+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19537+
19538+#ifdef CONFIG_PAX_SEGMEXEC
19539+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19540+ pax_task_size = SEGMEXEC_TASK_SIZE;
19541+#endif
19542+
6892158b
MT
19543+ pax_task_size -= PAGE_SIZE;
19544+
58c5fc13
MT
19545+ if (len > pax_task_size)
19546+ return -ENOMEM;
19547+
19548+ if (flags & MAP_FIXED)
19549+ return addr;
19550+
19551+#ifdef CONFIG_PAX_RANDMMAP
19552+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19553+#endif
19554+
19555+ if (addr) {
19556+ addr = PAGE_ALIGN(addr);
57199397
MT
19557+ if (pax_task_size - len >= addr) {
19558+ vma = find_vma(mm, addr);
19559+ if (check_heap_stack_gap(vma, addr, len))
19560+ return addr;
19561+ }
58c5fc13
MT
19562+ }
19563+ if (len > mm->cached_hole_size) {
19564+ start_addr = addr = mm->free_area_cache;
19565+ } else {
19566+ start_addr = addr = mm->mmap_base;
19567+ mm->cached_hole_size = 0;
19568+ }
19569+
19570+#ifdef CONFIG_PAX_PAGEEXEC
ae4e228f 19571+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
58c5fc13
MT
19572+ start_addr = 0x00110000UL;
19573+
19574+#ifdef CONFIG_PAX_RANDMMAP
19575+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19576+ start_addr += mm->delta_mmap & 0x03FFF000UL;
19577+#endif
19578+
19579+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19580+ start_addr = addr = mm->mmap_base;
19581+ else
19582+ addr = start_addr;
19583+ }
19584+#endif
19585+
19586+full_search:
19587+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19588+ /* At this point: (!vma || addr < vma->vm_end). */
19589+ if (pax_task_size - len < addr) {
19590+ /*
19591+ * Start a new search - just in case we missed
19592+ * some holes.
19593+ */
19594+ if (start_addr != mm->mmap_base) {
19595+ start_addr = addr = mm->mmap_base;
19596+ mm->cached_hole_size = 0;
19597+ goto full_search;
19598+ }
19599+ return -ENOMEM;
19600+ }
57199397
MT
19601+ if (check_heap_stack_gap(vma, addr, len))
19602+ break;
58c5fc13
MT
19603+ if (addr + mm->cached_hole_size < vma->vm_start)
19604+ mm->cached_hole_size = vma->vm_start - addr;
19605+ addr = vma->vm_end;
19606+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
19607+ start_addr = addr = mm->mmap_base;
19608+ mm->cached_hole_size = 0;
19609+ goto full_search;
19610+ }
19611+ }
57199397
MT
19612+
19613+ /*
19614+ * Remember the place where we stopped the search:
19615+ */
19616+ mm->free_area_cache = addr + len;
19617+ return addr;
58c5fc13
MT
19618+}
19619+
19620+unsigned long
19621+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19622+ const unsigned long len, const unsigned long pgoff,
19623+ const unsigned long flags)
19624+{
19625+ struct vm_area_struct *vma;
19626+ struct mm_struct *mm = current->mm;
19627+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19628+
19629+#ifdef CONFIG_PAX_SEGMEXEC
19630+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19631+ pax_task_size = SEGMEXEC_TASK_SIZE;
19632+#endif
19633+
6892158b
MT
19634+ pax_task_size -= PAGE_SIZE;
19635+
58c5fc13
MT
19636+ /* requested length too big for entire address space */
19637+ if (len > pax_task_size)
19638+ return -ENOMEM;
19639+
19640+ if (flags & MAP_FIXED)
19641+ return addr;
19642+
19643+#ifdef CONFIG_PAX_PAGEEXEC
ae4e228f 19644+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
58c5fc13
MT
19645+ goto bottomup;
19646+#endif
19647+
19648+#ifdef CONFIG_PAX_RANDMMAP
19649+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19650+#endif
19651+
19652+ /* requesting a specific address */
19653+ if (addr) {
19654+ addr = PAGE_ALIGN(addr);
57199397
MT
19655+ if (pax_task_size - len >= addr) {
19656+ vma = find_vma(mm, addr);
19657+ if (check_heap_stack_gap(vma, addr, len))
19658+ return addr;
19659+ }
58c5fc13
MT
19660+ }
19661+
19662+ /* check if free_area_cache is useful for us */
19663+ if (len <= mm->cached_hole_size) {
19664+ mm->cached_hole_size = 0;
19665+ mm->free_area_cache = mm->mmap_base;
19666+ }
19667+
19668+ /* either no address requested or can't fit in requested address hole */
19669+ addr = mm->free_area_cache;
19670+
19671+ /* make sure it can fit in the remaining address space */
19672+ if (addr > len) {
19673+ vma = find_vma(mm, addr-len);
57199397 19674+ if (check_heap_stack_gap(vma, addr - len, len))
58c5fc13
MT
19675+ /* remember the address as a hint for next time */
19676+ return (mm->free_area_cache = addr-len);
19677+ }
19678+
19679+ if (mm->mmap_base < len)
19680+ goto bottomup;
19681+
19682+ addr = mm->mmap_base-len;
19683+
19684+ do {
19685+ /*
19686+ * Lookup failure means no vma is above this address,
19687+ * else if new region fits below vma->vm_start,
19688+ * return with success:
19689+ */
19690+ vma = find_vma(mm, addr);
57199397 19691+ if (check_heap_stack_gap(vma, addr, len))
58c5fc13
MT
19692+ /* remember the address as a hint for next time */
19693+ return (mm->free_area_cache = addr);
19694+
19695+ /* remember the largest hole we saw so far */
19696+ if (addr + mm->cached_hole_size < vma->vm_start)
19697+ mm->cached_hole_size = vma->vm_start - addr;
19698+
19699+ /* try just below the current vma->vm_start */
16454cff
MT
19700+ addr = skip_heap_stack_gap(vma, len);
19701+ } while (!IS_ERR_VALUE(addr));
58c5fc13
MT
19702+
19703+bottomup:
19704+ /*
19705+ * A failed mmap() very likely causes application failure,
19706+ * so fall back to the bottom-up function here. This scenario
19707+ * can happen with large stack limits and large mmap()
19708+ * allocations.
19709+ */
19710+
19711+#ifdef CONFIG_PAX_SEGMEXEC
19712+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19713+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19714+ else
19715+#endif
19716+
19717+ mm->mmap_base = TASK_UNMAPPED_BASE;
19718+
19719+#ifdef CONFIG_PAX_RANDMMAP
19720+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19721+ mm->mmap_base += mm->delta_mmap;
19722+#endif
19723+
19724+ mm->free_area_cache = mm->mmap_base;
19725+ mm->cached_hole_size = ~0UL;
19726+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19727+ /*
19728+ * Restore the topdown base:
19729+ */
19730+ mm->mmap_base = base;
19731+ mm->free_area_cache = base;
19732+ mm->cached_hole_size = ~0UL;
19733+
19734+ return addr;
bc901d79 19735 }
fe2de317 19736diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
4c928ab7 19737index 0514890..3dbebce 100644
fe2de317
MT
19738--- a/arch/x86/kernel/sys_x86_64.c
19739+++ b/arch/x86/kernel/sys_x86_64.c
4c928ab7 19740@@ -95,8 +95,8 @@ out:
58c5fc13
MT
19741 return error;
19742 }
19743
19744-static void find_start_end(unsigned long flags, unsigned long *begin,
19745- unsigned long *end)
19746+static void find_start_end(struct mm_struct *mm, unsigned long flags,
19747+ unsigned long *begin, unsigned long *end)
19748 {
19749 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
19750 unsigned long new_begin;
4c928ab7 19751@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
58c5fc13
MT
19752 *begin = new_begin;
19753 }
19754 } else {
19755- *begin = TASK_UNMAPPED_BASE;
19756+ *begin = mm->mmap_base;
19757 *end = TASK_SIZE;
19758 }
19759 }
4c928ab7 19760@@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
58c5fc13
MT
19761 if (flags & MAP_FIXED)
19762 return addr;
19763
19764- find_start_end(flags, &begin, &end);
19765+ find_start_end(mm, flags, &begin, &end);
19766
19767 if (len > end)
19768 return -ENOMEM;
19769
19770+#ifdef CONFIG_PAX_RANDMMAP
19771+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19772+#endif
19773+
19774 if (addr) {
19775 addr = PAGE_ALIGN(addr);
19776 vma = find_vma(mm, addr);
57199397
MT
19777- if (end - len >= addr &&
19778- (!vma || addr + len <= vma->vm_start))
19779+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19780 return addr;
19781 }
19782 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
4c928ab7 19783@@ -172,7 +175,7 @@ full_search:
57199397
MT
19784 }
19785 return -ENOMEM;
19786 }
19787- if (!vma || addr + len <= vma->vm_start) {
19788+ if (check_heap_stack_gap(vma, addr, len)) {
19789 /*
19790 * Remember the place where we stopped the search:
19791 */
4c928ab7 19792@@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
58c5fc13
MT
19793 {
19794 struct vm_area_struct *vma;
19795 struct mm_struct *mm = current->mm;
19796- unsigned long addr = addr0;
19797+ unsigned long base = mm->mmap_base, addr = addr0;
19798
19799 /* requested length too big for entire address space */
19800 if (len > TASK_SIZE)
4c928ab7 19801@@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
58c5fc13
MT
19802 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
19803 goto bottomup;
19804
19805+#ifdef CONFIG_PAX_RANDMMAP
19806+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19807+#endif
19808+
19809 /* requesting a specific address */
19810 if (addr) {
19811 addr = PAGE_ALIGN(addr);
16454cff 19812- vma = find_vma(mm, addr);
57199397
MT
19813- if (TASK_SIZE - len >= addr &&
19814- (!vma || addr + len <= vma->vm_start))
16454cff
MT
19815- return addr;
19816+ if (TASK_SIZE - len >= addr) {
19817+ vma = find_vma(mm, addr);
19818+ if (check_heap_stack_gap(vma, addr, len))
19819+ return addr;
19820+ }
57199397
MT
19821 }
19822
16454cff 19823 /* check if free_area_cache is useful for us */
4c928ab7
MT
19824@@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19825 ALIGN_TOPDOWN);
19826
19827 vma = find_vma(mm, tmp_addr);
19828- if (!vma || tmp_addr + len <= vma->vm_start)
19829+ if (check_heap_stack_gap(vma, tmp_addr, len))
57199397 19830 /* remember the address as a hint for next time */
4c928ab7 19831 return mm->free_area_cache = tmp_addr;
57199397 19832 }
4c928ab7 19833@@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
19834 * return with success:
19835 */
19836 vma = find_vma(mm, addr);
19837- if (!vma || addr+len <= vma->vm_start)
19838+ if (check_heap_stack_gap(vma, addr, len))
19839 /* remember the address as a hint for next time */
19840 return mm->free_area_cache = addr;
19841
4c928ab7 19842@@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
19843 mm->cached_hole_size = vma->vm_start - addr;
19844
19845 /* try just below the current vma->vm_start */
19846- addr = vma->vm_start-len;
19847- } while (len < vma->vm_start);
19848+ addr = skip_heap_stack_gap(vma, len);
19849+ } while (!IS_ERR_VALUE(addr));
19850
19851 bottomup:
19852 /*
4c928ab7 19853@@ -270,13 +278,21 @@ bottomup:
58c5fc13
MT
19854 * can happen with large stack limits and large mmap()
19855 * allocations.
19856 */
19857+ mm->mmap_base = TASK_UNMAPPED_BASE;
19858+
19859+#ifdef CONFIG_PAX_RANDMMAP
19860+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19861+ mm->mmap_base += mm->delta_mmap;
19862+#endif
19863+
19864+ mm->free_area_cache = mm->mmap_base;
19865 mm->cached_hole_size = ~0UL;
19866- mm->free_area_cache = TASK_UNMAPPED_BASE;
19867 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19868 /*
19869 * Restore the topdown base:
19870 */
19871- mm->free_area_cache = mm->mmap_base;
19872+ mm->mmap_base = base;
19873+ mm->free_area_cache = base;
19874 mm->cached_hole_size = ~0UL;
19875
19876 return addr;
fe2de317 19877diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
4c928ab7 19878index e2410e2..4fe3fbc 100644
fe2de317
MT
19879--- a/arch/x86/kernel/tboot.c
19880+++ b/arch/x86/kernel/tboot.c
4c928ab7 19881@@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
66a7e928
MT
19882
19883 void tboot_shutdown(u32 shutdown_type)
19884 {
19885- void (*shutdown)(void);
19886+ void (* __noreturn shutdown)(void);
19887
19888 if (!tboot_enabled())
19889 return;
4c928ab7 19890@@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
66a7e928
MT
19891
19892 switch_to_tboot_pt();
19893
19894- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19895+ shutdown = (void *)tboot->shutdown_entry;
19896 shutdown();
19897
19898 /* should not reach here */
4c928ab7 19899@@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
8308f9c9
MT
19900 tboot_shutdown(acpi_shutdown_map[sleep_state]);
19901 }
19902
19903-static atomic_t ap_wfs_count;
19904+static atomic_unchecked_t ap_wfs_count;
19905
19906 static int tboot_wait_for_aps(int num_aps)
19907 {
4c928ab7 19908@@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
8308f9c9
MT
19909 {
19910 switch (action) {
19911 case CPU_DYING:
19912- atomic_inc(&ap_wfs_count);
19913+ atomic_inc_unchecked(&ap_wfs_count);
19914 if (num_online_cpus() == 1)
19915- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19916+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19917 return NOTIFY_BAD;
19918 break;
19919 }
4c928ab7 19920@@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
8308f9c9
MT
19921
19922 tboot_create_trampoline();
19923
19924- atomic_set(&ap_wfs_count, 0);
19925+ atomic_set_unchecked(&ap_wfs_count, 0);
19926 register_hotcpu_notifier(&tboot_cpu_notifier);
19927 return 0;
19928 }
fe2de317 19929diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
4c928ab7 19930index dd5fbf4..b7f2232 100644
fe2de317
MT
19931--- a/arch/x86/kernel/time.c
19932+++ b/arch/x86/kernel/time.c
4c928ab7 19933@@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
ae4e228f 19934 {
58c5fc13
MT
19935 unsigned long pc = instruction_pointer(regs);
19936
58c5fc13
MT
19937- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19938+ if (!user_mode(regs) && in_lock_functions(pc)) {
19939 #ifdef CONFIG_FRAME_POINTER
19940- return *(unsigned long *)(regs->bp + sizeof(long));
19941+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19942 #else
ae4e228f
MT
19943 unsigned long *sp =
19944 (unsigned long *)kernel_stack_pointer(regs);
4c928ab7 19945@@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
ae4e228f
MT
19946 * or above a saved flags. Eflags has bits 22-31 zero,
19947 * kernel addresses don't.
19948 */
58c5fc13
MT
19949+
19950+#ifdef CONFIG_PAX_KERNEXEC
19951+ return ktla_ktva(sp[0]);
19952+#else
19953 if (sp[0] >> 22)
19954 return sp[0];
19955 if (sp[1] >> 22)
19956 return sp[1];
19957 #endif
19958+
19959+#endif
19960 }
58c5fc13
MT
19961 return pc;
19962 }
fe2de317 19963diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
5e856224 19964index bcfec2d..8f88b4a 100644
fe2de317
MT
19965--- a/arch/x86/kernel/tls.c
19966+++ b/arch/x86/kernel/tls.c
19967@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
58c5fc13
MT
19968 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19969 return -EINVAL;
19970
19971+#ifdef CONFIG_PAX_SEGMEXEC
19972+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19973+ return -EINVAL;
19974+#endif
19975+
19976 set_tls_desc(p, idx, &info, 1);
19977
19978 return 0;
4c928ab7
MT
19979diff --git a/arch/x86/kernel/tls.h b/arch/x86/kernel/tls.h
19980index 2f083a2..7d3fecc 100644
19981--- a/arch/x86/kernel/tls.h
19982+++ b/arch/x86/kernel/tls.h
19983@@ -16,6 +16,6 @@
19984
19985 extern user_regset_active_fn regset_tls_active;
19986 extern user_regset_get_fn regset_tls_get;
19987-extern user_regset_set_fn regset_tls_set;
19988+extern user_regset_set_fn regset_tls_set __size_overflow(4);
19989
19990 #endif /* _ARCH_X86_KERNEL_TLS_H */
fe2de317
MT
19991diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19992index 451c0a7..e57f551 100644
19993--- a/arch/x86/kernel/trampoline_32.S
19994+++ b/arch/x86/kernel/trampoline_32.S
ae4e228f
MT
19995@@ -32,6 +32,12 @@
19996 #include <asm/segment.h>
19997 #include <asm/page_types.h>
19998
19999+#ifdef CONFIG_PAX_KERNEXEC
20000+#define ta(X) (X)
20001+#else
20002+#define ta(X) ((X) - __PAGE_OFFSET)
20003+#endif
20004+
66a7e928
MT
20005 #ifdef CONFIG_SMP
20006
20007 .section ".x86_trampoline","a"
20008@@ -62,7 +68,7 @@ r_base = .
ae4e228f
MT
20009 inc %ax # protected mode (PE) bit
20010 lmsw %ax # into protected mode
20011 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
20012- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
20013+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
20014
20015 # These need to be in the same 64K segment as the above;
20016 # hence we don't use the boot_gdt_descr defined in head.S
fe2de317
MT
20017diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
20018index 09ff517..df19fbff 100644
20019--- a/arch/x86/kernel/trampoline_64.S
20020+++ b/arch/x86/kernel/trampoline_64.S
66a7e928 20021@@ -90,7 +90,7 @@ startup_32:
6892158b
MT
20022 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
20023 movl %eax, %ds
20024
20025- movl $X86_CR4_PAE, %eax
20026+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
20027 movl %eax, %cr4 # Enable PAE mode
20028
20029 # Setup trampoline 4 level pagetables
20030@@ -138,7 +138,7 @@ tidt:
20031 # so the kernel can live anywhere
20032 .balign 4
20033 tgdt:
20034- .short tgdt_end - tgdt # gdt limit
20035+ .short tgdt_end - tgdt - 1 # gdt limit
20036 .long tgdt - r_base
20037 .short 0
20038 .quad 0x00cf9b000000ffff # __KERNEL32_CS
fe2de317 20039diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
5e856224 20040index 4bbe04d..41d0943 100644
fe2de317
MT
20041--- a/arch/x86/kernel/traps.c
20042+++ b/arch/x86/kernel/traps.c
57199397 20043@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
58c5fc13
MT
20044
20045 /* Do we ignore FPU interrupts ? */
20046 char ignore_fpu_irq;
20047-
20048-/*
20049- * The IDT has to be page-aligned to simplify the Pentium
ae4e228f 20050- * F0 0F bug workaround.
58c5fc13 20051- */
ae4e228f 20052-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
58c5fc13
MT
20053 #endif
20054
20055 DECLARE_BITMAP(used_vectors, NR_VECTORS);
4c928ab7 20056@@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
58c5fc13 20057 }
ae4e228f
MT
20058
20059 static void __kprobes
20060-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20061+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
20062 long error_code, siginfo_t *info)
20063 {
58c5fc13
MT
20064 struct task_struct *tsk = current;
20065
20066 #ifdef CONFIG_X86_32
20067- if (regs->flags & X86_VM_MASK) {
20068+ if (v8086_mode(regs)) {
20069 /*
20070 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
20071 * On nmi (interrupt 2), do_trap should not be called.
4c928ab7 20072@@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
58c5fc13
MT
20073 }
20074 #endif
20075
20076- if (!user_mode(regs))
20077+ if (!user_mode_novm(regs))
20078 goto kernel_trap;
20079
20080 #ifdef CONFIG_X86_32
4c928ab7 20081@@ -148,7 +142,7 @@ trap_signal:
58c5fc13
MT
20082 printk_ratelimit()) {
20083 printk(KERN_INFO
20084 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
20085- tsk->comm, tsk->pid, str,
20086+ tsk->comm, task_pid_nr(tsk), str,
20087 regs->ip, regs->sp, error_code);
20088 print_vma_addr(" in ", regs->ip);
20089 printk("\n");
4c928ab7 20090@@ -165,8 +159,20 @@ kernel_trap:
ae4e228f
MT
20091 if (!fixup_exception(regs)) {
20092 tsk->thread.error_code = error_code;
58c5fc13 20093 tsk->thread.trap_no = trapnr;
ae4e228f
MT
20094+
20095+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20096+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
20097+ str = "PAX: suspicious stack segment fault";
20098+#endif
20099+
58c5fc13
MT
20100 die(str, regs, error_code);
20101 }
20102+
20103+#ifdef CONFIG_PAX_REFCOUNT
20104+ if (trapnr == 4)
20105+ pax_report_refcount_overflow(regs);
20106+#endif
20107+
20108 return;
20109
20110 #ifdef CONFIG_X86_32
4c928ab7 20111@@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
58c5fc13
MT
20112 conditional_sti(regs);
20113
20114 #ifdef CONFIG_X86_32
20115- if (regs->flags & X86_VM_MASK)
20116+ if (v8086_mode(regs))
20117 goto gp_in_vm86;
20118 #endif
20119
20120 tsk = current;
20121- if (!user_mode(regs))
20122+ if (!user_mode_novm(regs))
20123 goto gp_in_kernel;
20124
20125+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
ae4e228f 20126+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
58c5fc13
MT
20127+ struct mm_struct *mm = tsk->mm;
20128+ unsigned long limit;
20129+
20130+ down_write(&mm->mmap_sem);
20131+ limit = mm->context.user_cs_limit;
20132+ if (limit < TASK_SIZE) {
20133+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
20134+ up_write(&mm->mmap_sem);
20135+ return;
20136+ }
20137+ up_write(&mm->mmap_sem);
20138+ }
20139+#endif
20140+
20141 tsk->thread.error_code = error_code;
20142 tsk->thread.trap_no = 13;
20143
4c928ab7 20144@@ -295,6 +317,13 @@ gp_in_kernel:
58c5fc13
MT
20145 if (notify_die(DIE_GPF, "general protection fault", regs,
20146 error_code, 13, SIGSEGV) == NOTIFY_STOP)
20147 return;
20148+
20149+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
ae4e228f 20150+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
58c5fc13
MT
20151+ die("PAX: suspicious general protection fault", regs, error_code);
20152+ else
20153+#endif
20154+
20155 die("general protection fault", regs, error_code);
20156 }
20157
5e856224 20158@@ -421,7 +450,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
ae4e228f
MT
20159 /* It's safe to allow irq's after DR6 has been saved */
20160 preempt_conditional_sti(regs);
58c5fc13 20161
ae4e228f
MT
20162- if (regs->flags & X86_VM_MASK) {
20163+ if (v8086_mode(regs)) {
20164 handle_vm86_trap((struct kernel_vm86_regs *) regs,
20165 error_code, 1);
bc901d79 20166 preempt_conditional_cli(regs);
5e856224 20167@@ -436,7 +465,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
ae4e228f
MT
20168 * We already checked v86 mode above, so we can check for kernel mode
20169 * by just checking the CPL of CS.
58c5fc13 20170 */
ae4e228f
MT
20171- if ((dr6 & DR_STEP) && !user_mode(regs)) {
20172+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
20173 tsk->thread.debugreg6 &= ~DR_STEP;
20174 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
20175 regs->flags &= ~X86_EFLAGS_TF;
5e856224 20176@@ -466,7 +495,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
58c5fc13 20177 return;
57199397
MT
20178 conditional_sti(regs);
20179
20180- if (!user_mode_vm(regs))
20181+ if (!user_mode(regs))
20182 {
20183 if (!fixup_exception(regs)) {
20184 task->thread.error_code = error_code;
fe2de317
MT
20185diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
20186index b9242ba..50c5edd 100644
20187--- a/arch/x86/kernel/verify_cpu.S
20188+++ b/arch/x86/kernel/verify_cpu.S
15a11c5b
MT
20189@@ -20,6 +20,7 @@
20190 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
20191 * arch/x86/kernel/trampoline_64.S: secondary processor verification
20192 * arch/x86/kernel/head_32.S: processor startup
20193+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
20194 *
20195 * verify_cpu, returns the status of longmode and SSE in register %eax.
20196 * 0: Success 1: Failure
fe2de317 20197diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
5e856224 20198index 328cb37..f37fee1 100644
fe2de317
MT
20199--- a/arch/x86/kernel/vm86_32.c
20200+++ b/arch/x86/kernel/vm86_32.c
ae4e228f
MT
20201@@ -41,6 +41,7 @@
20202 #include <linux/ptrace.h>
20203 #include <linux/audit.h>
20204 #include <linux/stddef.h>
20205+#include <linux/grsecurity.h>
20206
20207 #include <asm/uaccess.h>
20208 #include <asm/io.h>
5e856224
MT
20209@@ -109,6 +110,9 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
20210 /* convert vm86_regs to kernel_vm86_regs */
20211 static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
20212 const struct vm86_regs __user *user,
20213+ unsigned extra) __size_overflow(3);
20214+static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
20215+ const struct vm86_regs __user *user,
20216 unsigned extra)
20217 {
20218 int ret = 0;
20219@@ -148,7 +152,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
58c5fc13
MT
20220 do_exit(SIGSEGV);
20221 }
20222
20223- tss = &per_cpu(init_tss, get_cpu());
20224+ tss = init_tss + get_cpu();
20225 current->thread.sp0 = current->thread.saved_sp0;
20226 current->thread.sysenter_cs = __KERNEL_CS;
20227 load_sp0(tss, &current->thread);
5e856224 20228@@ -210,6 +214,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
ae4e228f
MT
20229 struct task_struct *tsk;
20230 int tmp, ret = -EPERM;
20231
20232+#ifdef CONFIG_GRKERNSEC_VM86
20233+ if (!capable(CAP_SYS_RAWIO)) {
20234+ gr_handle_vm86();
20235+ goto out;
20236+ }
20237+#endif
20238+
20239 tsk = current;
20240 if (tsk->thread.saved_sp0)
20241 goto out;
5e856224 20242@@ -240,6 +251,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
ae4e228f
MT
20243 int tmp, ret;
20244 struct vm86plus_struct __user *v86;
20245
20246+#ifdef CONFIG_GRKERNSEC_VM86
20247+ if (!capable(CAP_SYS_RAWIO)) {
20248+ gr_handle_vm86();
20249+ ret = -EPERM;
20250+ goto out;
20251+ }
20252+#endif
20253+
20254 tsk = current;
20255 switch (cmd) {
20256 case VM86_REQUEST_IRQ:
5e856224 20257@@ -326,7 +345,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
58c5fc13
MT
20258 tsk->thread.saved_fs = info->regs32->fs;
20259 tsk->thread.saved_gs = get_user_gs(info->regs32);
20260
20261- tss = &per_cpu(init_tss, get_cpu());
20262+ tss = init_tss + get_cpu();
20263 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20264 if (cpu_has_sep)
20265 tsk->thread.sysenter_cs = 0;
5e856224 20266@@ -533,7 +552,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
ae4e228f
MT
20267 goto cannot_handle;
20268 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20269 goto cannot_handle;
20270- intr_ptr = (unsigned long __user *) (i << 2);
20271+ intr_ptr = (__force unsigned long __user *) (i << 2);
20272 if (get_user(segoffs, intr_ptr))
20273 goto cannot_handle;
20274 if ((segoffs >> 16) == BIOSSEG)
fe2de317
MT
20275diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20276index 0f703f1..9e15f64 100644
20277--- a/arch/x86/kernel/vmlinux.lds.S
20278+++ b/arch/x86/kernel/vmlinux.lds.S
57199397 20279@@ -26,6 +26,13 @@
58c5fc13
MT
20280 #include <asm/page_types.h>
20281 #include <asm/cache.h>
20282 #include <asm/boot.h>
20283+#include <asm/segment.h>
20284+
58c5fc13
MT
20285+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20286+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20287+#else
20288+#define __KERNEL_TEXT_OFFSET 0
20289+#endif
20290
20291 #undef i386 /* in case the preprocessor is a 32bit one */
20292
6e9df6a3 20293@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
ae4e228f 20294
58c5fc13
MT
20295 PHDRS {
20296 text PT_LOAD FLAGS(5); /* R_E */
57199397
MT
20297+#ifdef CONFIG_X86_32
20298+ module PT_LOAD FLAGS(5); /* R_E */
20299+#endif
ae4e228f
MT
20300+#ifdef CONFIG_XEN
20301+ rodata PT_LOAD FLAGS(5); /* R_E */
20302+#else
58c5fc13 20303+ rodata PT_LOAD FLAGS(4); /* R__ */
ae4e228f 20304+#endif
16454cff 20305 data PT_LOAD FLAGS(6); /* RW_ */
6e9df6a3 20306-#ifdef CONFIG_X86_64
58c5fc13
MT
20307+ init.begin PT_LOAD FLAGS(6); /* RW_ */
20308 #ifdef CONFIG_SMP
ae4e228f 20309 percpu PT_LOAD FLAGS(6); /* RW_ */
58c5fc13
MT
20310 #endif
20311+ text.init PT_LOAD FLAGS(5); /* R_E */
20312+ text.exit PT_LOAD FLAGS(5); /* R_E */
20313 init PT_LOAD FLAGS(7); /* RWE */
20314-#endif
20315 note PT_NOTE FLAGS(0); /* ___ */
20316 }
20317
20318 SECTIONS
20319 {
20320 #ifdef CONFIG_X86_32
20321- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20322- phys_startup_32 = startup_32 - LOAD_OFFSET;
20323+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20324 #else
20325- . = __START_KERNEL;
20326- phys_startup_64 = startup_64 - LOAD_OFFSET;
20327+ . = __START_KERNEL;
20328 #endif
20329
20330 /* Text and read-only data */
ae4e228f
MT
20331- .text : AT(ADDR(.text) - LOAD_OFFSET) {
20332- _text = .;
58c5fc13 20333+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
ae4e228f 20334 /* bootstrapping code */
58c5fc13
MT
20335+#ifdef CONFIG_X86_32
20336+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20337+#else
20338+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20339+#endif
20340+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
ae4e228f
MT
20341+ _text = .;
20342 HEAD_TEXT
58c5fc13 20343 #ifdef CONFIG_X86_32
58c5fc13 20344 . = ALIGN(PAGE_SIZE);
6e9df6a3 20345@@ -108,13 +128,47 @@ SECTIONS
ae4e228f
MT
20346 IRQENTRY_TEXT
20347 *(.fixup)
20348 *(.gnu.warning)
20349- /* End of text section */
20350- _etext = .;
58c5fc13
MT
20351 } :text = 0x9090
20352
20353- NOTES :text :note
20354+ . += __KERNEL_TEXT_OFFSET;
fe2de317
MT
20355
20356- EXCEPTION_TABLE(16) :text = 0x9090
58c5fc13
MT
20357+#ifdef CONFIG_X86_32
20358+ . = ALIGN(PAGE_SIZE);
58c5fc13 20359+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
ae4e228f
MT
20360+
20361+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
58c5fc13
MT
20362+ MODULES_EXEC_VADDR = .;
20363+ BYTE(0)
ae4e228f 20364+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
57199397 20365+ . = ALIGN(HPAGE_SIZE);
58c5fc13 20366+ MODULES_EXEC_END = . - 1;
58c5fc13 20367+#endif
ae4e228f
MT
20368+
20369+ } :module
58c5fc13
MT
20370+#endif
20371+
57199397 20372+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
ae4e228f
MT
20373+ /* End of text section */
20374+ _etext = . - __KERNEL_TEXT_OFFSET;
57199397 20375+ }
15a11c5b 20376+
57199397
MT
20377+#ifdef CONFIG_X86_32
20378+ . = ALIGN(PAGE_SIZE);
20379+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20380+ *(.idt)
20381+ . = ALIGN(PAGE_SIZE);
20382+ *(.empty_zero_page)
bc901d79
MT
20383+ *(.initial_pg_fixmap)
20384+ *(.initial_pg_pmd)
20385+ *(.initial_page_table)
57199397
MT
20386+ *(.swapper_pg_dir)
20387+ } :rodata
20388+#endif
20389+
20390+ . = ALIGN(PAGE_SIZE);
20391+ NOTES :rodata :note
fe2de317 20392+
57199397
MT
20393+ EXCEPTION_TABLE(16) :rodata
20394
16454cff
MT
20395 #if defined(CONFIG_DEBUG_RODATA)
20396 /* .text should occupy whole number of pages */
6e9df6a3 20397@@ -126,16 +180,20 @@ SECTIONS
57199397
MT
20398
20399 /* Data */
20400 .data : AT(ADDR(.data) - LOAD_OFFSET) {
58c5fc13
MT
20401+
20402+#ifdef CONFIG_PAX_KERNEXEC
bc901d79 20403+ . = ALIGN(HPAGE_SIZE);
58c5fc13 20404+#else
bc901d79 20405+ . = ALIGN(PAGE_SIZE);
58c5fc13
MT
20406+#endif
20407+
20408 /* Start of data section */
20409 _sdata = .;
20410
20411 /* init_task */
20412 INIT_TASK_DATA(THREAD_SIZE)
20413
20414-#ifdef CONFIG_X86_32
20415- /* 32 bit has nosave before _edata */
20416 NOSAVE_DATA
20417-#endif
20418
20419 PAGE_ALIGNED_DATA(PAGE_SIZE)
ae4e228f 20420
6e9df6a3 20421@@ -176,12 +234,19 @@ SECTIONS
58c5fc13
MT
20422 #endif /* CONFIG_X86_64 */
20423
20424 /* Init code and data - will be freed after init */
20425- . = ALIGN(PAGE_SIZE);
20426 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20427+ BYTE(0)
20428+
20429+#ifdef CONFIG_PAX_KERNEXEC
57199397 20430+ . = ALIGN(HPAGE_SIZE);
58c5fc13
MT
20431+#else
20432+ . = ALIGN(PAGE_SIZE);
20433+#endif
20434+
20435 __init_begin = .; /* paired with __init_end */
20436- }
20437+ } :init.begin
20438
20439-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20440+#ifdef CONFIG_SMP
20441 /*
20442 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20443 * output PHDR, so the next output section - .init.text - should
6e9df6a3 20444@@ -190,12 +255,27 @@ SECTIONS
66a7e928 20445 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
58c5fc13
MT
20446 #endif
20447
ae4e228f 20448- INIT_TEXT_SECTION(PAGE_SIZE)
58c5fc13
MT
20449-#ifdef CONFIG_X86_64
20450- :init
20451-#endif
ae4e228f
MT
20452+ . = ALIGN(PAGE_SIZE);
20453+ init_begin = .;
20454+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20455+ VMLINUX_SYMBOL(_sinittext) = .;
20456+ INIT_TEXT
20457+ VMLINUX_SYMBOL(_einittext) = .;
20458+ . = ALIGN(PAGE_SIZE);
58c5fc13 20459+ } :text.init
bc901d79
MT
20460
20461- INIT_DATA_SECTION(16)
58c5fc13
MT
20462+ /*
20463+ * .exit.text is discard at runtime, not link time, to deal with
20464+ * references from .altinstructions and .eh_frame
20465+ */
57199397 20466+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
58c5fc13
MT
20467+ EXIT_TEXT
20468+ . = ALIGN(16);
20469+ } :text.exit
20470+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
bc901d79 20471+
ae4e228f
MT
20472+ . = ALIGN(PAGE_SIZE);
20473+ INIT_DATA_SECTION(16) :init
58c5fc13 20474
66a7e928
MT
20475 /*
20476 * Code and data for a variety of lowlevel trampolines, to be
6e9df6a3 20477@@ -269,19 +349,12 @@ SECTIONS
58c5fc13 20478 }
66a7e928 20479
bc901d79 20480 . = ALIGN(8);
58c5fc13
MT
20481- /*
20482- * .exit.text is discard at runtime, not link time, to deal with
20483- * references from .altinstructions and .eh_frame
20484- */
20485- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20486- EXIT_TEXT
20487- }
bc901d79 20488
58c5fc13
MT
20489 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20490 EXIT_DATA
20491 }
58c5fc13
MT
20492
20493-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20494+#ifndef CONFIG_SMP
15a11c5b 20495 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
58c5fc13
MT
20496 #endif
20497
6e9df6a3 20498@@ -300,16 +373,10 @@ SECTIONS
df50ba0c
MT
20499 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
20500 __smp_locks = .;
20501 *(.smp_locks)
20502- . = ALIGN(PAGE_SIZE);
20503 __smp_locks_end = .;
20504+ . = ALIGN(PAGE_SIZE);
58c5fc13
MT
20505 }
20506
20507-#ifdef CONFIG_X86_64
20508- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20509- NOSAVE_DATA
20510- }
20511-#endif
20512-
20513 /* BSS */
20514 . = ALIGN(PAGE_SIZE);
20515 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
6e9df6a3 20516@@ -325,6 +392,7 @@ SECTIONS
58c5fc13
MT
20517 __brk_base = .;
20518 . += 64 * 1024; /* 64k alignment slop space */
20519 *(.brk_reservation) /* areas brk users have reserved */
57199397 20520+ . = ALIGN(HPAGE_SIZE);
58c5fc13
MT
20521 __brk_limit = .;
20522 }
20523
6e9df6a3 20524@@ -351,13 +419,12 @@ SECTIONS
58c5fc13
MT
20525 * for the boot processor.
20526 */
df50ba0c 20527 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
58c5fc13
MT
20528-INIT_PER_CPU(gdt_page);
20529 INIT_PER_CPU(irq_stack_union);
20530
20531 /*
20532 * Build-time check on the image size:
20533 */
20534-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20535+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20536 "kernel image bigger than KERNEL_IMAGE_SIZE");
20537
20538 #ifdef CONFIG_SMP
fe2de317 20539diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
5e856224 20540index b07ba93..a212969 100644
fe2de317
MT
20541--- a/arch/x86/kernel/vsyscall_64.c
20542+++ b/arch/x86/kernel/vsyscall_64.c
4c928ab7 20543@@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
15a11c5b 20544 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
15a11c5b
MT
20545 };
20546
5e856224 20547-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
6e9df6a3
MT
20548+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
20549
20550 static int __init vsyscall_setup(char *str)
20551 {
20552 if (str) {
20553 if (!strcmp("emulate", str))
20554 vsyscall_mode = EMULATE;
20555- else if (!strcmp("native", str))
20556- vsyscall_mode = NATIVE;
20557 else if (!strcmp("none", str))
20558 vsyscall_mode = NONE;
20559 else
5e856224 20560@@ -207,7 +205,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
6e9df6a3
MT
20561
20562 tsk = current;
20563 if (seccomp_mode(&tsk->seccomp))
20564- do_exit(SIGKILL);
20565+ do_group_exit(SIGKILL);
20566
5e856224
MT
20567 /*
20568 * With a real vsyscall, page faults cause SIGSEGV. We want to
20569@@ -279,8 +277,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
6e9df6a3
MT
20570 return true;
20571
20572 sigsegv:
20573- force_sig(SIGSEGV, current);
20574- return true;
20575+ do_group_exit(SIGKILL);
20576 }
20577
20578 /*
5e856224 20579@@ -333,10 +330,7 @@ void __init map_vsyscall(void)
6e9df6a3
MT
20580 extern char __vvar_page;
20581 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
20582
20583- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
20584- vsyscall_mode == NATIVE
20585- ? PAGE_KERNEL_VSYSCALL
20586- : PAGE_KERNEL_VVAR);
20587+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
20588 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
20589 (unsigned long)VSYSCALL_START);
20590
fe2de317
MT
20591diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20592index 9796c2f..f686fbf 100644
20593--- a/arch/x86/kernel/x8664_ksyms_64.c
20594+++ b/arch/x86/kernel/x8664_ksyms_64.c
df50ba0c
MT
20595@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
20596 EXPORT_SYMBOL(copy_user_generic_string);
20597 EXPORT_SYMBOL(copy_user_generic_unrolled);
58c5fc13 20598 EXPORT_SYMBOL(__copy_user_nocache);
ae4e228f
MT
20599-EXPORT_SYMBOL(_copy_from_user);
20600-EXPORT_SYMBOL(_copy_to_user);
58c5fc13
MT
20601
20602 EXPORT_SYMBOL(copy_page);
ae4e228f 20603 EXPORT_SYMBOL(clear_page);
fe2de317 20604diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
4c928ab7 20605index 7110911..e8cdee5 100644
fe2de317
MT
20606--- a/arch/x86/kernel/xsave.c
20607+++ b/arch/x86/kernel/xsave.c
20608@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
ae4e228f 20609 fx_sw_user->xstate_size > fx_sw_user->extended_size)
6892158b 20610 return -EINVAL;
ae4e228f
MT
20611
20612- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
20613+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
20614 fx_sw_user->extended_size -
20615 FP_XSTATE_MAGIC2_SIZE));
6892158b 20616 if (err)
4c928ab7 20617@@ -266,7 +266,7 @@ fx_only:
ae4e228f
MT
20618 * the other extended state.
20619 */
20620 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
20621- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
6e9df6a3 20622+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
ae4e228f
MT
20623 }
20624
20625 /*
4c928ab7 20626@@ -295,7 +295,7 @@ int restore_i387_xstate(void __user *buf)
57199397 20627 if (use_xsave())
ae4e228f
MT
20628 err = restore_user_xstate(buf);
20629 else
20630- err = fxrstor_checking((__force struct i387_fxsave_struct *)
6e9df6a3 20631+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
ae4e228f
MT
20632 buf);
20633 if (unlikely(err)) {
20634 /*
5e856224
MT
20635diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
20636index 89b02bf..0f6511d 100644
20637--- a/arch/x86/kvm/cpuid.c
20638+++ b/arch/x86/kvm/cpuid.c
20639@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
20640 struct kvm_cpuid2 *cpuid,
20641 struct kvm_cpuid_entry2 __user *entries)
20642 {
20643- int r;
20644+ int r, i;
20645
20646 r = -E2BIG;
20647 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
20648 goto out;
20649 r = -EFAULT;
20650- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
20651- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20652+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20653 goto out;
20654+ for (i = 0; i < cpuid->nent; ++i) {
20655+ struct kvm_cpuid_entry2 cpuid_entry;
20656+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
20657+ goto out;
20658+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
20659+ }
20660 vcpu->arch.cpuid_nent = cpuid->nent;
20661 kvm_apic_set_version(vcpu);
20662 kvm_x86_ops->cpuid_update(vcpu);
20663@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
20664 struct kvm_cpuid2 *cpuid,
20665 struct kvm_cpuid_entry2 __user *entries)
20666 {
20667- int r;
20668+ int r, i;
20669
20670 r = -E2BIG;
20671 if (cpuid->nent < vcpu->arch.cpuid_nent)
20672 goto out;
20673 r = -EFAULT;
20674- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
20675- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20676+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20677 goto out;
20678+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
20679+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
20680+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
20681+ goto out;
20682+ }
20683 return 0;
20684
20685 out:
fe2de317 20686diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
5e856224 20687index 0982507..7f6d72f 100644
fe2de317
MT
20688--- a/arch/x86/kvm/emulate.c
20689+++ b/arch/x86/kvm/emulate.c
5e856224 20690@@ -250,6 +250,7 @@ struct gprefix {
4c928ab7
MT
20691
20692 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
ae4e228f
MT
20693 do { \
20694+ unsigned long _tmp; \
20695 __asm__ __volatile__ ( \
20696 _PRE_EFLAGS("0", "4", "2") \
20697 _op _suffix " %"_x"3,%1; " \
5e856224 20698@@ -264,8 +265,6 @@ struct gprefix {
ae4e228f 20699 /* Raw emulation: instruction has two explicit operands. */
4c928ab7 20700 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
ae4e228f
MT
20701 do { \
20702- unsigned long _tmp; \
20703- \
4c928ab7 20704 switch ((ctxt)->dst.bytes) { \
ae4e228f 20705 case 2: \
4c928ab7 20706 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
5e856224 20707@@ -281,7 +280,6 @@ struct gprefix {
ae4e228f 20708
4c928ab7 20709 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
ae4e228f
MT
20710 do { \
20711- unsigned long _tmp; \
4c928ab7 20712 switch ((ctxt)->dst.bytes) { \
ae4e228f 20713 case 1: \
4c928ab7 20714 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
fe2de317 20715diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
5e856224 20716index cfdc6e0..ab92e84 100644
fe2de317
MT
20717--- a/arch/x86/kvm/lapic.c
20718+++ b/arch/x86/kvm/lapic.c
5e856224 20719@@ -54,7 +54,7 @@
df50ba0c
MT
20720 #define APIC_BUS_CYCLE_NS 1
20721
20722 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
20723-#define apic_debug(fmt, arg...)
20724+#define apic_debug(fmt, arg...) do {} while (0)
20725
20726 #define APIC_LVT_NUM 6
20727 /* 14 is the version for Xeon and Pentium 8.4.8*/
fe2de317 20728diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
5e856224 20729index 1561028..0ed7f14 100644
fe2de317
MT
20730--- a/arch/x86/kvm/paging_tmpl.h
20731+++ b/arch/x86/kvm/paging_tmpl.h
6e9df6a3
MT
20732@@ -197,7 +197,7 @@ retry_walk:
20733 if (unlikely(kvm_is_error_hva(host_addr)))
20734 goto error;
20735
20736- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
20737+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
20738 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
20739 goto error;
20740
fe2de317 20741diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
5e856224 20742index e385214..f8df033 100644
fe2de317
MT
20743--- a/arch/x86/kvm/svm.c
20744+++ b/arch/x86/kvm/svm.c
5e856224 20745@@ -3420,7 +3420,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
58c5fc13
MT
20746 int cpu = raw_smp_processor_id();
20747
ae4e228f 20748 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
58c5fc13 20749+
ae4e228f
MT
20750+ pax_open_kernel();
20751 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
20752+ pax_close_kernel();
58c5fc13
MT
20753+
20754 load_TR_desc();
20755 }
20756
5e856224 20757@@ -3798,6 +3802,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
66a7e928 20758 #endif
8308f9c9
MT
20759 #endif
20760
20761+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20762+ __set_fs(current_thread_info()->addr_limit);
20763+#endif
20764+
20765 reload_tss(vcpu);
20766
20767 local_irq_disable();
fe2de317 20768diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
5e856224 20769index a7a6f60..04b745a 100644
fe2de317
MT
20770--- a/arch/x86/kvm/vmx.c
20771+++ b/arch/x86/kvm/vmx.c
5e856224 20772@@ -1306,7 +1306,11 @@ static void reload_tss(void)
bc901d79 20773 struct desc_struct *descs;
58c5fc13 20774
bc901d79 20775 descs = (void *)gdt->address;
58c5fc13 20776+
ae4e228f 20777+ pax_open_kernel();
58c5fc13 20778 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
ae4e228f 20779+ pax_close_kernel();
58c5fc13
MT
20780+
20781 load_TR_desc();
20782 }
20783
5e856224 20784@@ -2637,8 +2641,11 @@ static __init int hardware_setup(void)
58c5fc13
MT
20785 if (!cpu_has_vmx_flexpriority())
20786 flexpriority_enabled = 0;
20787
20788- if (!cpu_has_vmx_tpr_shadow())
20789- kvm_x86_ops->update_cr8_intercept = NULL;
20790+ if (!cpu_has_vmx_tpr_shadow()) {
ae4e228f 20791+ pax_open_kernel();
58c5fc13 20792+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
ae4e228f 20793+ pax_close_kernel();
58c5fc13
MT
20794+ }
20795
ae4e228f
MT
20796 if (enable_ept && !cpu_has_vmx_ept_2m_page())
20797 kvm_disable_largepages();
5e856224 20798@@ -3654,7 +3661,7 @@ static void vmx_set_constant_host_state(void)
57199397 20799 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
58c5fc13 20800
6e9df6a3
MT
20801 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
20802- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
20803+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
20804
20805 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
20806 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
5e856224 20807@@ -6192,6 +6199,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
58c5fc13
MT
20808 "jmp .Lkvm_vmx_return \n\t"
20809 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
20810 ".Lkvm_vmx_return: "
20811+
20812+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20813+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
20814+ ".Lkvm_vmx_return2: "
20815+#endif
20816+
20817 /* Save guest registers, load host registers, keep flags */
66a7e928
MT
20818 "mov %0, %c[wordsize](%%"R"sp) \n\t"
20819 "pop %0 \n\t"
5e856224 20820@@ -6240,6 +6253,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
58c5fc13 20821 #endif
66a7e928
MT
20822 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
20823 [wordsize]"i"(sizeof(ulong))
58c5fc13
MT
20824+
20825+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20826+ ,[cs]"i"(__KERNEL_CS)
20827+#endif
20828+
20829 : "cc", "memory"
bc901d79 20830 , R"ax", R"bx", R"di", R"si"
58c5fc13 20831 #ifdef CONFIG_X86_64
5e856224 20832@@ -6268,7 +6286,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
6e9df6a3
MT
20833 }
20834 }
58c5fc13
MT
20835
20836- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
6892158b 20837+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
71d190be
MT
20838+
20839+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
8308f9c9 20840+ loadsegment(fs, __KERNEL_PERCPU);
71d190be
MT
20841+#endif
20842+
20843+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20844+ __set_fs(current_thread_info()->addr_limit);
20845+#endif
20846+
6e9df6a3 20847 vmx->loaded_vmcs->launched = 1;
58c5fc13 20848
bc901d79 20849 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
fe2de317 20850diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
5e856224 20851index 8d1c6c6..6e6d611 100644
fe2de317
MT
20852--- a/arch/x86/kvm/x86.c
20853+++ b/arch/x86/kvm/x86.c
5e856224 20854@@ -873,6 +873,7 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
4c928ab7
MT
20855 return kvm_set_msr(vcpu, index, *data);
20856 }
20857
20858+static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) __size_overflow(2);
20859 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
20860 {
20861 int version;
5e856224
MT
20862@@ -1307,12 +1308,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
20863 return 0;
20864 }
20865
20866+static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) __size_overflow(2);
20867 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
6e9df6a3
MT
20868 {
20869 struct kvm *kvm = vcpu->kvm;
20870 int lm = is_long_mode(vcpu);
20871- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20872- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20873+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20874+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20875 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
20876 : kvm->arch.xen_hvm_config.blob_size_32;
20877 u32 page_num = data & ~PAGE_MASK;
5e856224 20878@@ -2145,6 +2147,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
ae4e228f
MT
20879 if (n < msr_list.nmsrs)
20880 goto out;
20881 r = -EFAULT;
20882+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
20883+ goto out;
20884 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
20885 num_msrs_to_save * sizeof(u32)))
20886 goto out;
5e856224 20887@@ -2266,7 +2270,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
58c5fc13
MT
20888 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
20889 struct kvm_interrupt *irq)
20890 {
20891- if (irq->irq < 0 || irq->irq >= 256)
20892+ if (irq->irq >= 256)
20893 return -EINVAL;
20894 if (irqchip_in_kernel(vcpu->kvm))
20895 return -ENXIO;
5e856224 20896@@ -3499,6 +3503,9 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
4c928ab7
MT
20897
20898 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
20899 struct kvm_vcpu *vcpu, u32 access,
20900+ struct x86_exception *exception) __size_overflow(1,3);
20901+static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
20902+ struct kvm_vcpu *vcpu, u32 access,
20903 struct x86_exception *exception)
20904 {
20905 void *data = val;
5e856224 20906@@ -3530,6 +3537,9 @@ out:
4c928ab7
MT
20907 /* used for instruction fetching */
20908 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
20909 gva_t addr, void *val, unsigned int bytes,
20910+ struct x86_exception *exception) __size_overflow(2,4);
20911+static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
20912+ gva_t addr, void *val, unsigned int bytes,
20913 struct x86_exception *exception)
20914 {
20915 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5e856224 20916@@ -3554,6 +3564,9 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
4c928ab7
MT
20917
20918 static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
20919 gva_t addr, void *val, unsigned int bytes,
20920+ struct x86_exception *exception) __size_overflow(2,4);
20921+static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
20922+ gva_t addr, void *val, unsigned int bytes,
20923 struct x86_exception *exception)
20924 {
20925 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5e856224 20926@@ -3667,12 +3680,16 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
4c928ab7
MT
20927 }
20928
20929 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
20930+ void *val, int bytes) __size_overflow(2);
20931+static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
20932 void *val, int bytes)
20933 {
20934 return !kvm_read_guest(vcpu->kvm, gpa, val, bytes);
20935 }
20936
20937 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
20938+ void *val, int bytes) __size_overflow(2);
20939+static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
20940 void *val, int bytes)
20941 {
20942 return emulator_write_phys(vcpu, gpa, val, bytes);
5e856224 20943@@ -3823,6 +3840,12 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
4c928ab7
MT
20944 const void *old,
20945 const void *new,
20946 unsigned int bytes,
20947+ struct x86_exception *exception) __size_overflow(5);
20948+static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
20949+ unsigned long addr,
20950+ const void *old,
20951+ const void *new,
20952+ unsigned int bytes,
20953 struct x86_exception *exception)
20954 {
20955 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5e856224 20956@@ -4782,7 +4805,7 @@ static void kvm_set_mmio_spte_mask(void)
6e9df6a3 20957 kvm_mmu_set_mmio_spte_mask(mask);
ae4e228f 20958 }
58c5fc13
MT
20959
20960-int kvm_arch_init(void *opaque)
20961+int kvm_arch_init(const void *opaque)
20962 {
ae4e228f 20963 int r;
15a11c5b 20964 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
4c928ab7 20965diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
5e856224 20966index cb80c29..aeee86c 100644
4c928ab7
MT
20967--- a/arch/x86/kvm/x86.h
20968+++ b/arch/x86/kvm/x86.h
5e856224 20969@@ -116,11 +116,11 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);
4c928ab7
MT
20970
20971 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
20972 gva_t addr, void *val, unsigned int bytes,
20973- struct x86_exception *exception);
20974+ struct x86_exception *exception) __size_overflow(2,4);
20975
20976 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
20977 gva_t addr, void *val, unsigned int bytes,
20978- struct x86_exception *exception);
20979+ struct x86_exception *exception) __size_overflow(2,4);
20980
5e856224
MT
20981 extern u64 host_xcr0;
20982
fe2de317 20983diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
5e856224 20984index 642d880..44e0f3f 100644
fe2de317
MT
20985--- a/arch/x86/lguest/boot.c
20986+++ b/arch/x86/lguest/boot.c
5e856224 20987@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
15a11c5b
MT
20988 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
20989 * Launcher to reboot us.
20990 */
20991-static void lguest_restart(char *reason)
20992+static __noreturn void lguest_restart(char *reason)
20993 {
20994 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
20995+ BUG();
20996 }
20997
20998 /*G:050
fe2de317
MT
20999diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
21000index 042f682..c92afb6 100644
21001--- a/arch/x86/lib/atomic64_32.c
21002+++ b/arch/x86/lib/atomic64_32.c
66a7e928 21003@@ -8,18 +8,30 @@
8308f9c9
MT
21004
21005 long long atomic64_read_cx8(long long, const atomic64_t *v);
21006 EXPORT_SYMBOL(atomic64_read_cx8);
21007+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
21008+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
21009 long long atomic64_set_cx8(long long, const atomic64_t *v);
21010 EXPORT_SYMBOL(atomic64_set_cx8);
66a7e928
MT
21011+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
21012+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
8308f9c9 21013 long long atomic64_xchg_cx8(long long, unsigned high);
66a7e928
MT
21014 EXPORT_SYMBOL(atomic64_xchg_cx8);
21015 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
21016 EXPORT_SYMBOL(atomic64_add_return_cx8);
21017+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
21018+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
21019 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
8308f9c9 21020 EXPORT_SYMBOL(atomic64_sub_return_cx8);
66a7e928
MT
21021+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
21022+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
8308f9c9
MT
21023 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
21024 EXPORT_SYMBOL(atomic64_inc_return_cx8);
21025+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
21026+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
21027 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
21028 EXPORT_SYMBOL(atomic64_dec_return_cx8);
66a7e928
MT
21029+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
21030+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
8308f9c9 21031 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
66a7e928
MT
21032 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
21033 int atomic64_inc_not_zero_cx8(atomic64_t *v);
21034@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
21035 #ifndef CONFIG_X86_CMPXCHG64
21036 long long atomic64_read_386(long long, const atomic64_t *v);
21037 EXPORT_SYMBOL(atomic64_read_386);
21038+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
21039+EXPORT_SYMBOL(atomic64_read_unchecked_386);
21040 long long atomic64_set_386(long long, const atomic64_t *v);
21041 EXPORT_SYMBOL(atomic64_set_386);
21042+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
21043+EXPORT_SYMBOL(atomic64_set_unchecked_386);
21044 long long atomic64_xchg_386(long long, unsigned high);
21045 EXPORT_SYMBOL(atomic64_xchg_386);
21046 long long atomic64_add_return_386(long long a, atomic64_t *v);
21047 EXPORT_SYMBOL(atomic64_add_return_386);
21048+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
21049+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
21050 long long atomic64_sub_return_386(long long a, atomic64_t *v);
21051 EXPORT_SYMBOL(atomic64_sub_return_386);
21052+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
21053+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
21054 long long atomic64_inc_return_386(long long a, atomic64_t *v);
21055 EXPORT_SYMBOL(atomic64_inc_return_386);
21056+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
21057+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
21058 long long atomic64_dec_return_386(long long a, atomic64_t *v);
21059 EXPORT_SYMBOL(atomic64_dec_return_386);
21060+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
21061+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
21062 long long atomic64_add_386(long long a, atomic64_t *v);
21063 EXPORT_SYMBOL(atomic64_add_386);
21064+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
21065+EXPORT_SYMBOL(atomic64_add_unchecked_386);
21066 long long atomic64_sub_386(long long a, atomic64_t *v);
21067 EXPORT_SYMBOL(atomic64_sub_386);
21068+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
21069+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
21070 long long atomic64_inc_386(long long a, atomic64_t *v);
21071 EXPORT_SYMBOL(atomic64_inc_386);
21072+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
21073+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
21074 long long atomic64_dec_386(long long a, atomic64_t *v);
21075 EXPORT_SYMBOL(atomic64_dec_386);
21076+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
21077+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
21078 long long atomic64_dec_if_positive_386(atomic64_t *v);
21079 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
21080 int atomic64_inc_not_zero_386(atomic64_t *v);
fe2de317
MT
21081diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
21082index e8e7e0d..56fd1b0 100644
21083--- a/arch/x86/lib/atomic64_386_32.S
21084+++ b/arch/x86/lib/atomic64_386_32.S
66a7e928
MT
21085@@ -48,6 +48,10 @@ BEGIN(read)
21086 movl (v), %eax
21087 movl 4(v), %edx
21088 RET_ENDP
21089+BEGIN(read_unchecked)
21090+ movl (v), %eax
21091+ movl 4(v), %edx
21092+RET_ENDP
21093 #undef v
21094
21095 #define v %esi
21096@@ -55,6 +59,10 @@ BEGIN(set)
21097 movl %ebx, (v)
21098 movl %ecx, 4(v)
21099 RET_ENDP
21100+BEGIN(set_unchecked)
21101+ movl %ebx, (v)
21102+ movl %ecx, 4(v)
21103+RET_ENDP
21104 #undef v
21105
21106 #define v %esi
21107@@ -70,6 +78,20 @@ RET_ENDP
21108 BEGIN(add)
21109 addl %eax, (v)
21110 adcl %edx, 4(v)
21111+
21112+#ifdef CONFIG_PAX_REFCOUNT
21113+ jno 0f
21114+ subl %eax, (v)
21115+ sbbl %edx, 4(v)
21116+ int $4
21117+0:
21118+ _ASM_EXTABLE(0b, 0b)
21119+#endif
21120+
21121+RET_ENDP
21122+BEGIN(add_unchecked)
21123+ addl %eax, (v)
21124+ adcl %edx, 4(v)
21125 RET_ENDP
21126 #undef v
21127
21128@@ -77,6 +99,24 @@ RET_ENDP
21129 BEGIN(add_return)
21130 addl (v), %eax
21131 adcl 4(v), %edx
21132+
21133+#ifdef CONFIG_PAX_REFCOUNT
21134+ into
21135+1234:
21136+ _ASM_EXTABLE(1234b, 2f)
21137+#endif
21138+
21139+ movl %eax, (v)
21140+ movl %edx, 4(v)
21141+
21142+#ifdef CONFIG_PAX_REFCOUNT
21143+2:
21144+#endif
21145+
21146+RET_ENDP
21147+BEGIN(add_return_unchecked)
21148+ addl (v), %eax
21149+ adcl 4(v), %edx
21150 movl %eax, (v)
21151 movl %edx, 4(v)
21152 RET_ENDP
21153@@ -86,6 +126,20 @@ RET_ENDP
21154 BEGIN(sub)
21155 subl %eax, (v)
21156 sbbl %edx, 4(v)
21157+
21158+#ifdef CONFIG_PAX_REFCOUNT
21159+ jno 0f
21160+ addl %eax, (v)
21161+ adcl %edx, 4(v)
21162+ int $4
21163+0:
21164+ _ASM_EXTABLE(0b, 0b)
21165+#endif
21166+
21167+RET_ENDP
21168+BEGIN(sub_unchecked)
21169+ subl %eax, (v)
21170+ sbbl %edx, 4(v)
21171 RET_ENDP
21172 #undef v
21173
21174@@ -96,6 +150,27 @@ BEGIN(sub_return)
21175 sbbl $0, %edx
21176 addl (v), %eax
21177 adcl 4(v), %edx
21178+
21179+#ifdef CONFIG_PAX_REFCOUNT
21180+ into
21181+1234:
21182+ _ASM_EXTABLE(1234b, 2f)
21183+#endif
21184+
21185+ movl %eax, (v)
21186+ movl %edx, 4(v)
21187+
21188+#ifdef CONFIG_PAX_REFCOUNT
21189+2:
21190+#endif
21191+
21192+RET_ENDP
21193+BEGIN(sub_return_unchecked)
21194+ negl %edx
21195+ negl %eax
21196+ sbbl $0, %edx
21197+ addl (v), %eax
21198+ adcl 4(v), %edx
21199 movl %eax, (v)
21200 movl %edx, 4(v)
21201 RET_ENDP
21202@@ -105,6 +180,20 @@ RET_ENDP
21203 BEGIN(inc)
21204 addl $1, (v)
21205 adcl $0, 4(v)
21206+
21207+#ifdef CONFIG_PAX_REFCOUNT
21208+ jno 0f
21209+ subl $1, (v)
21210+ sbbl $0, 4(v)
21211+ int $4
21212+0:
21213+ _ASM_EXTABLE(0b, 0b)
21214+#endif
21215+
21216+RET_ENDP
21217+BEGIN(inc_unchecked)
21218+ addl $1, (v)
21219+ adcl $0, 4(v)
21220 RET_ENDP
21221 #undef v
21222
21223@@ -114,6 +203,26 @@ BEGIN(inc_return)
21224 movl 4(v), %edx
21225 addl $1, %eax
21226 adcl $0, %edx
21227+
21228+#ifdef CONFIG_PAX_REFCOUNT
21229+ into
21230+1234:
21231+ _ASM_EXTABLE(1234b, 2f)
21232+#endif
21233+
21234+ movl %eax, (v)
21235+ movl %edx, 4(v)
21236+
21237+#ifdef CONFIG_PAX_REFCOUNT
21238+2:
21239+#endif
21240+
21241+RET_ENDP
21242+BEGIN(inc_return_unchecked)
21243+ movl (v), %eax
21244+ movl 4(v), %edx
21245+ addl $1, %eax
21246+ adcl $0, %edx
21247 movl %eax, (v)
21248 movl %edx, 4(v)
21249 RET_ENDP
21250@@ -123,6 +232,20 @@ RET_ENDP
21251 BEGIN(dec)
21252 subl $1, (v)
21253 sbbl $0, 4(v)
21254+
21255+#ifdef CONFIG_PAX_REFCOUNT
21256+ jno 0f
21257+ addl $1, (v)
21258+ adcl $0, 4(v)
21259+ int $4
21260+0:
21261+ _ASM_EXTABLE(0b, 0b)
21262+#endif
21263+
21264+RET_ENDP
21265+BEGIN(dec_unchecked)
21266+ subl $1, (v)
21267+ sbbl $0, 4(v)
21268 RET_ENDP
21269 #undef v
21270
21271@@ -132,6 +255,26 @@ BEGIN(dec_return)
21272 movl 4(v), %edx
21273 subl $1, %eax
21274 sbbl $0, %edx
21275+
21276+#ifdef CONFIG_PAX_REFCOUNT
21277+ into
21278+1234:
21279+ _ASM_EXTABLE(1234b, 2f)
21280+#endif
21281+
21282+ movl %eax, (v)
21283+ movl %edx, 4(v)
21284+
21285+#ifdef CONFIG_PAX_REFCOUNT
21286+2:
21287+#endif
21288+
21289+RET_ENDP
21290+BEGIN(dec_return_unchecked)
21291+ movl (v), %eax
21292+ movl 4(v), %edx
21293+ subl $1, %eax
21294+ sbbl $0, %edx
21295 movl %eax, (v)
21296 movl %edx, 4(v)
21297 RET_ENDP
21298@@ -143,6 +286,13 @@ BEGIN(add_unless)
21299 adcl %edx, %edi
21300 addl (v), %eax
21301 adcl 4(v), %edx
21302+
21303+#ifdef CONFIG_PAX_REFCOUNT
21304+ into
21305+1234:
21306+ _ASM_EXTABLE(1234b, 2f)
21307+#endif
21308+
21309 cmpl %eax, %esi
21310 je 3f
21311 1:
21312@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
21313 1:
21314 addl $1, %eax
21315 adcl $0, %edx
21316+
21317+#ifdef CONFIG_PAX_REFCOUNT
21318+ into
21319+1234:
21320+ _ASM_EXTABLE(1234b, 2f)
21321+#endif
21322+
21323 movl %eax, (v)
21324 movl %edx, 4(v)
21325 movl $1, %eax
21326@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
21327 movl 4(v), %edx
21328 subl $1, %eax
21329 sbbl $0, %edx
21330+
21331+#ifdef CONFIG_PAX_REFCOUNT
21332+ into
21333+1234:
21334+ _ASM_EXTABLE(1234b, 1f)
21335+#endif
21336+
21337 js 1f
21338 movl %eax, (v)
21339 movl %edx, 4(v)
fe2de317 21340diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
5e856224 21341index 391a083..3a2cf39 100644
fe2de317
MT
21342--- a/arch/x86/lib/atomic64_cx8_32.S
21343+++ b/arch/x86/lib/atomic64_cx8_32.S
6e9df6a3 21344@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
15a11c5b
MT
21345 CFI_STARTPROC
21346
21347 read64 %ecx
6e9df6a3 21348+ pax_force_retaddr
15a11c5b 21349 ret
8308f9c9
MT
21350 CFI_ENDPROC
21351 ENDPROC(atomic64_read_cx8)
21352
21353+ENTRY(atomic64_read_unchecked_cx8)
21354+ CFI_STARTPROC
21355+
21356+ read64 %ecx
6e9df6a3 21357+ pax_force_retaddr
8308f9c9
MT
21358+ ret
21359+ CFI_ENDPROC
66a7e928 21360+ENDPROC(atomic64_read_unchecked_cx8)
8308f9c9
MT
21361+
21362 ENTRY(atomic64_set_cx8)
21363 CFI_STARTPROC
21364
6e9df6a3 21365@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
15a11c5b
MT
21366 cmpxchg8b (%esi)
21367 jne 1b
21368
6e9df6a3 21369+ pax_force_retaddr
15a11c5b 21370 ret
66a7e928
MT
21371 CFI_ENDPROC
21372 ENDPROC(atomic64_set_cx8)
21373
21374+ENTRY(atomic64_set_unchecked_cx8)
21375+ CFI_STARTPROC
21376+
21377+1:
21378+/* we don't need LOCK_PREFIX since aligned 64-bit writes
21379+ * are atomic on 586 and newer */
21380+ cmpxchg8b (%esi)
21381+ jne 1b
21382+
6e9df6a3 21383+ pax_force_retaddr
66a7e928
MT
21384+ ret
21385+ CFI_ENDPROC
21386+ENDPROC(atomic64_set_unchecked_cx8)
21387+
21388 ENTRY(atomic64_xchg_cx8)
21389 CFI_STARTPROC
21390
6e9df6a3 21391@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
15a11c5b
MT
21392 cmpxchg8b (%esi)
21393 jne 1b
21394
6e9df6a3 21395+ pax_force_retaddr
15a11c5b 21396 ret
8308f9c9
MT
21397 CFI_ENDPROC
21398 ENDPROC(atomic64_xchg_cx8)
21399
21400-.macro addsub_return func ins insc
21401-ENTRY(atomic64_\func\()_return_cx8)
21402+.macro addsub_return func ins insc unchecked=""
21403+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21404 CFI_STARTPROC
21405 SAVE ebp
21406 SAVE ebx
6e9df6a3 21407@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
6892158b
MT
21408 movl %edx, %ecx
21409 \ins\()l %esi, %ebx
21410 \insc\()l %edi, %ecx
21411+
8308f9c9 21412+.ifb \unchecked
6892158b
MT
21413+#ifdef CONFIG_PAX_REFCOUNT
21414+ into
21415+2:
21416+ _ASM_EXTABLE(2b, 3f)
21417+#endif
8308f9c9 21418+.endif
6892158b
MT
21419+
21420 LOCK_PREFIX
21421 cmpxchg8b (%ebp)
21422 jne 1b
21423-
21424-10:
21425 movl %ebx, %eax
21426 movl %ecx, %edx
21427+
8308f9c9 21428+.ifb \unchecked
6892158b
MT
21429+#ifdef CONFIG_PAX_REFCOUNT
21430+3:
21431+#endif
8308f9c9 21432+.endif
6892158b
MT
21433+
21434 RESTORE edi
21435 RESTORE esi
21436 RESTORE ebx
66a7e928 21437 RESTORE ebp
6e9df6a3 21438+ pax_force_retaddr
66a7e928
MT
21439 ret
21440 CFI_ENDPROC
21441-ENDPROC(atomic64_\func\()_return_cx8)
21442+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21443 .endm
8308f9c9
MT
21444
21445 addsub_return add add adc
21446 addsub_return sub sub sbb
21447+addsub_return add add adc _unchecked
66a7e928 21448+addsub_return sub sub sbb _unchecked
8308f9c9
MT
21449
21450-.macro incdec_return func ins insc
21451-ENTRY(atomic64_\func\()_return_cx8)
5e856224 21452+.macro incdec_return func ins insc unchecked=""
8308f9c9
MT
21453+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21454 CFI_STARTPROC
21455 SAVE ebx
21456
6e9df6a3 21457@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
6892158b
MT
21458 movl %edx, %ecx
21459 \ins\()l $1, %ebx
21460 \insc\()l $0, %ecx
21461+
8308f9c9 21462+.ifb \unchecked
6892158b
MT
21463+#ifdef CONFIG_PAX_REFCOUNT
21464+ into
21465+2:
21466+ _ASM_EXTABLE(2b, 3f)
21467+#endif
8308f9c9 21468+.endif
6892158b
MT
21469+
21470 LOCK_PREFIX
21471 cmpxchg8b (%esi)
21472 jne 1b
21473
21474-10:
21475 movl %ebx, %eax
21476 movl %ecx, %edx
21477+
8308f9c9 21478+.ifb \unchecked
6892158b
MT
21479+#ifdef CONFIG_PAX_REFCOUNT
21480+3:
21481+#endif
8308f9c9 21482+.endif
6892158b
MT
21483+
21484 RESTORE ebx
6e9df6a3 21485+ pax_force_retaddr
6892158b
MT
21486 ret
21487 CFI_ENDPROC
66a7e928
MT
21488-ENDPROC(atomic64_\func\()_return_cx8)
21489+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21490 .endm
8308f9c9
MT
21491
21492 incdec_return inc add adc
21493 incdec_return dec sub sbb
21494+incdec_return inc add adc _unchecked
66a7e928 21495+incdec_return dec sub sbb _unchecked
8308f9c9
MT
21496
21497 ENTRY(atomic64_dec_if_positive_cx8)
21498 CFI_STARTPROC
6e9df6a3 21499@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
66a7e928
MT
21500 movl %edx, %ecx
21501 subl $1, %ebx
21502 sbb $0, %ecx
21503+
21504+#ifdef CONFIG_PAX_REFCOUNT
21505+ into
21506+1234:
21507+ _ASM_EXTABLE(1234b, 2f)
21508+#endif
21509+
21510 js 2f
21511 LOCK_PREFIX
21512 cmpxchg8b (%esi)
6e9df6a3 21513@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
15a11c5b
MT
21514 movl %ebx, %eax
21515 movl %ecx, %edx
21516 RESTORE ebx
6e9df6a3 21517+ pax_force_retaddr
15a11c5b
MT
21518 ret
21519 CFI_ENDPROC
21520 ENDPROC(atomic64_dec_if_positive_cx8)
6e9df6a3 21521@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
6892158b
MT
21522 movl %edx, %ecx
21523 addl %esi, %ebx
21524 adcl %edi, %ecx
21525+
21526+#ifdef CONFIG_PAX_REFCOUNT
21527+ into
21528+1234:
66a7e928 21529+ _ASM_EXTABLE(1234b, 3f)
6892158b
MT
21530+#endif
21531+
21532 LOCK_PREFIX
21533 cmpxchg8b (%ebp)
21534 jne 1b
6e9df6a3 21535@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
15a11c5b
MT
21536 CFI_ADJUST_CFA_OFFSET -8
21537 RESTORE ebx
21538 RESTORE ebp
6e9df6a3 21539+ pax_force_retaddr
15a11c5b
MT
21540 ret
21541 4:
21542 cmpl %edx, 4(%esp)
6e9df6a3 21543@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
6892158b
MT
21544 movl %edx, %ecx
21545 addl $1, %ebx
21546 adcl $0, %ecx
21547+
21548+#ifdef CONFIG_PAX_REFCOUNT
21549+ into
21550+1234:
66a7e928 21551+ _ASM_EXTABLE(1234b, 3f)
6892158b
MT
21552+#endif
21553+
21554 LOCK_PREFIX
21555 cmpxchg8b (%esi)
21556 jne 1b
6e9df6a3 21557@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
15a11c5b
MT
21558 movl $1, %eax
21559 3:
21560 RESTORE ebx
6e9df6a3 21561+ pax_force_retaddr
15a11c5b
MT
21562 ret
21563 4:
21564 testl %edx, %edx
fe2de317
MT
21565diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21566index 78d16a5..fbcf666 100644
21567--- a/arch/x86/lib/checksum_32.S
21568+++ b/arch/x86/lib/checksum_32.S
58c5fc13
MT
21569@@ -28,7 +28,8 @@
21570 #include <linux/linkage.h>
21571 #include <asm/dwarf2.h>
21572 #include <asm/errno.h>
21573-
21574+#include <asm/segment.h>
21575+
21576 /*
21577 * computes a partial checksum, e.g. for TCP/UDP fragments
21578 */
fe2de317 21579@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
58c5fc13
MT
21580
21581 #define ARGBASE 16
21582 #define FP 12
21583-
21584-ENTRY(csum_partial_copy_generic)
21585+
21586+ENTRY(csum_partial_copy_generic_to_user)
21587 CFI_STARTPROC
bc901d79
MT
21588+
21589+#ifdef CONFIG_PAX_MEMORY_UDEREF
66a7e928
MT
21590+ pushl_cfi %gs
21591+ popl_cfi %es
58c5fc13 21592+ jmp csum_partial_copy_generic
bc901d79 21593+#endif
58c5fc13
MT
21594+
21595+ENTRY(csum_partial_copy_generic_from_user)
bc901d79
MT
21596+
21597+#ifdef CONFIG_PAX_MEMORY_UDEREF
66a7e928
MT
21598+ pushl_cfi %gs
21599+ popl_cfi %ds
bc901d79 21600+#endif
58c5fc13
MT
21601+
21602+ENTRY(csum_partial_copy_generic)
21603 subl $4,%esp
21604 CFI_ADJUST_CFA_OFFSET 4
66a7e928
MT
21605 pushl_cfi %edi
21606@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
58c5fc13
MT
21607 jmp 4f
21608 SRC(1: movw (%esi), %bx )
21609 addl $2, %esi
21610-DST( movw %bx, (%edi) )
21611+DST( movw %bx, %es:(%edi) )
21612 addl $2, %edi
21613 addw %bx, %ax
21614 adcl $0, %eax
66a7e928 21615@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
58c5fc13
MT
21616 SRC(1: movl (%esi), %ebx )
21617 SRC( movl 4(%esi), %edx )
21618 adcl %ebx, %eax
21619-DST( movl %ebx, (%edi) )
21620+DST( movl %ebx, %es:(%edi) )
21621 adcl %edx, %eax
21622-DST( movl %edx, 4(%edi) )
21623+DST( movl %edx, %es:4(%edi) )
21624
21625 SRC( movl 8(%esi), %ebx )
21626 SRC( movl 12(%esi), %edx )
21627 adcl %ebx, %eax
21628-DST( movl %ebx, 8(%edi) )
21629+DST( movl %ebx, %es:8(%edi) )
21630 adcl %edx, %eax
21631-DST( movl %edx, 12(%edi) )
21632+DST( movl %edx, %es:12(%edi) )
21633
21634 SRC( movl 16(%esi), %ebx )
21635 SRC( movl 20(%esi), %edx )
21636 adcl %ebx, %eax
21637-DST( movl %ebx, 16(%edi) )
21638+DST( movl %ebx, %es:16(%edi) )
21639 adcl %edx, %eax
21640-DST( movl %edx, 20(%edi) )
21641+DST( movl %edx, %es:20(%edi) )
21642
21643 SRC( movl 24(%esi), %ebx )
21644 SRC( movl 28(%esi), %edx )
21645 adcl %ebx, %eax
21646-DST( movl %ebx, 24(%edi) )
21647+DST( movl %ebx, %es:24(%edi) )
21648 adcl %edx, %eax
21649-DST( movl %edx, 28(%edi) )
21650+DST( movl %edx, %es:28(%edi) )
21651
21652 lea 32(%esi), %esi
21653 lea 32(%edi), %edi
66a7e928 21654@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
58c5fc13
MT
21655 shrl $2, %edx # This clears CF
21656 SRC(3: movl (%esi), %ebx )
21657 adcl %ebx, %eax
21658-DST( movl %ebx, (%edi) )
21659+DST( movl %ebx, %es:(%edi) )
21660 lea 4(%esi), %esi
21661 lea 4(%edi), %edi
21662 dec %edx
66a7e928 21663@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
58c5fc13
MT
21664 jb 5f
21665 SRC( movw (%esi), %cx )
21666 leal 2(%esi), %esi
21667-DST( movw %cx, (%edi) )
21668+DST( movw %cx, %es:(%edi) )
21669 leal 2(%edi), %edi
21670 je 6f
21671 shll $16,%ecx
21672 SRC(5: movb (%esi), %cl )
21673-DST( movb %cl, (%edi) )
21674+DST( movb %cl, %es:(%edi) )
21675 6: addl %ecx, %eax
21676 adcl $0, %eax
21677 7:
66a7e928 21678@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
58c5fc13
MT
21679
21680 6001:
21681 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21682- movl $-EFAULT, (%ebx)
21683+ movl $-EFAULT, %ss:(%ebx)
21684
21685 # zero the complete destination - computing the rest
21686 # is too much work
66a7e928 21687@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
58c5fc13
MT
21688
21689 6002:
21690 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21691- movl $-EFAULT,(%ebx)
21692+ movl $-EFAULT,%ss:(%ebx)
21693 jmp 5000b
21694
21695 .previous
21696
66a7e928
MT
21697+ pushl_cfi %ss
21698+ popl_cfi %ds
21699+ pushl_cfi %ss
21700+ popl_cfi %es
21701 popl_cfi %ebx
58c5fc13 21702 CFI_RESTORE ebx
66a7e928
MT
21703 popl_cfi %esi
21704@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
21705 popl_cfi %ecx # equivalent to addl $4,%esp
58c5fc13
MT
21706 ret
21707 CFI_ENDPROC
21708-ENDPROC(csum_partial_copy_generic)
21709+ENDPROC(csum_partial_copy_generic_to_user)
21710
21711 #else
21712
21713 /* Version for PentiumII/PPro */
21714
21715 #define ROUND1(x) \
21716+ nop; nop; nop; \
21717 SRC(movl x(%esi), %ebx ) ; \
21718 addl %ebx, %eax ; \
21719- DST(movl %ebx, x(%edi) ) ;
21720+ DST(movl %ebx, %es:x(%edi)) ;
21721
21722 #define ROUND(x) \
21723+ nop; nop; nop; \
21724 SRC(movl x(%esi), %ebx ) ; \
21725 adcl %ebx, %eax ; \
21726- DST(movl %ebx, x(%edi) ) ;
21727+ DST(movl %ebx, %es:x(%edi)) ;
21728
21729 #define ARGBASE 12
21730-
21731-ENTRY(csum_partial_copy_generic)
21732+
21733+ENTRY(csum_partial_copy_generic_to_user)
21734 CFI_STARTPROC
bc901d79
MT
21735+
21736+#ifdef CONFIG_PAX_MEMORY_UDEREF
66a7e928
MT
21737+ pushl_cfi %gs
21738+ popl_cfi %es
58c5fc13 21739+ jmp csum_partial_copy_generic
bc901d79 21740+#endif
58c5fc13
MT
21741+
21742+ENTRY(csum_partial_copy_generic_from_user)
bc901d79
MT
21743+
21744+#ifdef CONFIG_PAX_MEMORY_UDEREF
66a7e928
MT
21745+ pushl_cfi %gs
21746+ popl_cfi %ds
bc901d79 21747+#endif
58c5fc13
MT
21748+
21749+ENTRY(csum_partial_copy_generic)
66a7e928 21750 pushl_cfi %ebx
58c5fc13 21751 CFI_REL_OFFSET ebx, 0
66a7e928
MT
21752 pushl_cfi %edi
21753@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
58c5fc13
MT
21754 subl %ebx, %edi
21755 lea -1(%esi),%edx
21756 andl $-32,%edx
21757- lea 3f(%ebx,%ebx), %ebx
21758+ lea 3f(%ebx,%ebx,2), %ebx
21759 testl %esi, %esi
21760 jmp *%ebx
21761 1: addl $64,%esi
66a7e928 21762@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
58c5fc13
MT
21763 jb 5f
21764 SRC( movw (%esi), %dx )
21765 leal 2(%esi), %esi
21766-DST( movw %dx, (%edi) )
21767+DST( movw %dx, %es:(%edi) )
21768 leal 2(%edi), %edi
21769 je 6f
21770 shll $16,%edx
21771 5:
21772 SRC( movb (%esi), %dl )
21773-DST( movb %dl, (%edi) )
21774+DST( movb %dl, %es:(%edi) )
21775 6: addl %edx, %eax
21776 adcl $0, %eax
21777 7:
21778 .section .fixup, "ax"
21779 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21780- movl $-EFAULT, (%ebx)
21781+ movl $-EFAULT, %ss:(%ebx)
21782 # zero the complete destination (computing the rest is too much work)
21783 movl ARGBASE+8(%esp),%edi # dst
21784 movl ARGBASE+12(%esp),%ecx # len
66a7e928 21785@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
58c5fc13
MT
21786 rep; stosb
21787 jmp 7b
21788 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21789- movl $-EFAULT, (%ebx)
21790+ movl $-EFAULT, %ss:(%ebx)
21791 jmp 7b
21792 .previous
21793
bc901d79 21794+#ifdef CONFIG_PAX_MEMORY_UDEREF
66a7e928
MT
21795+ pushl_cfi %ss
21796+ popl_cfi %ds
21797+ pushl_cfi %ss
21798+ popl_cfi %es
bc901d79
MT
21799+#endif
21800+
66a7e928 21801 popl_cfi %esi
58c5fc13 21802 CFI_RESTORE esi
66a7e928
MT
21803 popl_cfi %edi
21804@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
58c5fc13
MT
21805 CFI_RESTORE ebx
21806 ret
21807 CFI_ENDPROC
21808-ENDPROC(csum_partial_copy_generic)
21809+ENDPROC(csum_partial_copy_generic_to_user)
21810
21811 #undef ROUND
21812 #undef ROUND1
fe2de317
MT
21813diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21814index f2145cf..cea889d 100644
21815--- a/arch/x86/lib/clear_page_64.S
21816+++ b/arch/x86/lib/clear_page_64.S
6e9df6a3 21817@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
15a11c5b
MT
21818 movl $4096/8,%ecx
21819 xorl %eax,%eax
21820 rep stosq
6e9df6a3 21821+ pax_force_retaddr
15a11c5b
MT
21822 ret
21823 CFI_ENDPROC
21824 ENDPROC(clear_page_c)
6e9df6a3 21825@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
15a11c5b
MT
21826 movl $4096,%ecx
21827 xorl %eax,%eax
21828 rep stosb
6e9df6a3 21829+ pax_force_retaddr
15a11c5b
MT
21830 ret
21831 CFI_ENDPROC
21832 ENDPROC(clear_page_c_e)
6e9df6a3 21833@@ -43,6 +45,7 @@ ENTRY(clear_page)
15a11c5b
MT
21834 leaq 64(%rdi),%rdi
21835 jnz .Lloop
21836 nop
6e9df6a3 21837+ pax_force_retaddr
15a11c5b
MT
21838 ret
21839 CFI_ENDPROC
21840 .Lclear_page_end:
6e9df6a3 21841@@ -58,7 +61,7 @@ ENDPROC(clear_page)
58c5fc13
MT
21842
21843 #include <asm/cpufeature.h>
21844
21845- .section .altinstr_replacement,"ax"
21846+ .section .altinstr_replacement,"a"
21847 1: .byte 0xeb /* jmp <disp8> */
21848 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
15a11c5b 21849 2: .byte 0xeb /* jmp <disp8> */
fe2de317
MT
21850diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
21851index 1e572c5..2a162cd 100644
21852--- a/arch/x86/lib/cmpxchg16b_emu.S
21853+++ b/arch/x86/lib/cmpxchg16b_emu.S
6e9df6a3
MT
21854@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
21855
21856 popf
21857 mov $1, %al
21858+ pax_force_retaddr
21859 ret
21860
21861 not_same:
21862 popf
21863 xor %al,%al
21864+ pax_force_retaddr
21865 ret
21866
21867 CFI_ENDPROC
fe2de317
MT
21868diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21869index 01c805b..dccb07f 100644
21870--- a/arch/x86/lib/copy_page_64.S
21871+++ b/arch/x86/lib/copy_page_64.S
6e9df6a3 21872@@ -9,6 +9,7 @@ copy_page_c:
15a11c5b
MT
21873 CFI_STARTPROC
21874 movl $4096/8,%ecx
21875 rep movsq
6e9df6a3 21876+ pax_force_retaddr
15a11c5b
MT
21877 ret
21878 CFI_ENDPROC
21879 ENDPROC(copy_page_c)
fe2de317
MT
21880@@ -39,7 +40,7 @@ ENTRY(copy_page)
21881 movq 16 (%rsi), %rdx
21882 movq 24 (%rsi), %r8
21883 movq 32 (%rsi), %r9
21884- movq 40 (%rsi), %r10
21885+ movq 40 (%rsi), %r13
21886 movq 48 (%rsi), %r11
21887 movq 56 (%rsi), %r12
21888
21889@@ -50,7 +51,7 @@ ENTRY(copy_page)
21890 movq %rdx, 16 (%rdi)
21891 movq %r8, 24 (%rdi)
21892 movq %r9, 32 (%rdi)
21893- movq %r10, 40 (%rdi)
21894+ movq %r13, 40 (%rdi)
21895 movq %r11, 48 (%rdi)
21896 movq %r12, 56 (%rdi)
21897
21898@@ -69,7 +70,7 @@ ENTRY(copy_page)
21899 movq 16 (%rsi), %rdx
21900 movq 24 (%rsi), %r8
21901 movq 32 (%rsi), %r9
21902- movq 40 (%rsi), %r10
21903+ movq 40 (%rsi), %r13
21904 movq 48 (%rsi), %r11
21905 movq 56 (%rsi), %r12
21906
21907@@ -78,7 +79,7 @@ ENTRY(copy_page)
21908 movq %rdx, 16 (%rdi)
21909 movq %r8, 24 (%rdi)
21910 movq %r9, 32 (%rdi)
21911- movq %r10, 40 (%rdi)
21912+ movq %r13, 40 (%rdi)
21913 movq %r11, 48 (%rdi)
21914 movq %r12, 56 (%rdi)
21915
6e9df6a3 21916@@ -95,6 +96,7 @@ ENTRY(copy_page)
15a11c5b
MT
21917 CFI_RESTORE r13
21918 addq $3*8,%rsp
21919 CFI_ADJUST_CFA_OFFSET -3*8
6e9df6a3 21920+ pax_force_retaddr
15a11c5b
MT
21921 ret
21922 .Lcopy_page_end:
21923 CFI_ENDPROC
6e9df6a3 21924@@ -105,7 +107,7 @@ ENDPROC(copy_page)
58c5fc13
MT
21925
21926 #include <asm/cpufeature.h>
21927
21928- .section .altinstr_replacement,"ax"
21929+ .section .altinstr_replacement,"a"
21930 1: .byte 0xeb /* jmp <disp8> */
21931 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21932 2:
fe2de317
MT
21933diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21934index 0248402..821c786 100644
21935--- a/arch/x86/lib/copy_user_64.S
21936+++ b/arch/x86/lib/copy_user_64.S
15a11c5b 21937@@ -16,6 +16,7 @@
df50ba0c
MT
21938 #include <asm/thread_info.h>
21939 #include <asm/cpufeature.h>
15a11c5b 21940 #include <asm/alternative-asm.h>
df50ba0c
MT
21941+#include <asm/pgtable.h>
21942
15a11c5b
MT
21943 /*
21944 * By placing feature2 after feature1 in altinstructions section, we logically
21945@@ -29,7 +30,7 @@
58c5fc13
MT
21946 .byte 0xe9 /* 32bit jump */
21947 .long \orig-1f /* by default jump to orig */
21948 1:
21949- .section .altinstr_replacement,"ax"
21950+ .section .altinstr_replacement,"a"
21951 2: .byte 0xe9 /* near jump with 32bit immediate */
15a11c5b
MT
21952 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
21953 3: .byte 0xe9 /* near jump with 32bit immediate */
6e9df6a3 21954@@ -71,47 +72,20 @@
58c5fc13
MT
21955 #endif
21956 .endm
21957
21958-/* Standard copy_to_user with segment limit checking */
ae4e228f 21959-ENTRY(_copy_to_user)
58c5fc13
MT
21960- CFI_STARTPROC
21961- GET_THREAD_INFO(%rax)
21962- movq %rdi,%rcx
21963- addq %rdx,%rcx
21964- jc bad_to_user
21965- cmpq TI_addr_limit(%rax),%rcx
15a11c5b
MT
21966- ja bad_to_user
21967- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21968- copy_user_generic_unrolled,copy_user_generic_string, \
21969- copy_user_enhanced_fast_string
58c5fc13 21970- CFI_ENDPROC
ae4e228f 21971-ENDPROC(_copy_to_user)
58c5fc13
MT
21972-
21973-/* Standard copy_from_user with segment limit checking */
ae4e228f 21974-ENTRY(_copy_from_user)
58c5fc13
MT
21975- CFI_STARTPROC
21976- GET_THREAD_INFO(%rax)
21977- movq %rsi,%rcx
21978- addq %rdx,%rcx
21979- jc bad_from_user
21980- cmpq TI_addr_limit(%rax),%rcx
15a11c5b
MT
21981- ja bad_from_user
21982- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21983- copy_user_generic_unrolled,copy_user_generic_string, \
21984- copy_user_enhanced_fast_string
58c5fc13 21985- CFI_ENDPROC
ae4e228f 21986-ENDPROC(_copy_from_user)
58c5fc13 21987-
df50ba0c
MT
21988 .section .fixup,"ax"
21989 /* must zero dest */
58c5fc13
MT
21990 ENTRY(bad_from_user)
21991 bad_from_user:
21992 CFI_STARTPROC
21993+ testl %edx,%edx
21994+ js bad_to_user
21995 movl %edx,%ecx
21996 xorl %eax,%eax
21997 rep
15a11c5b
MT
21998 stosb
21999 bad_to_user:
22000 movl %edx,%eax
6e9df6a3 22001+ pax_force_retaddr
15a11c5b
MT
22002 ret
22003 CFI_ENDPROC
22004 ENDPROC(bad_from_user)
fe2de317
MT
22005@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
22006 jz 17f
22007 1: movq (%rsi),%r8
22008 2: movq 1*8(%rsi),%r9
22009-3: movq 2*8(%rsi),%r10
22010+3: movq 2*8(%rsi),%rax
22011 4: movq 3*8(%rsi),%r11
22012 5: movq %r8,(%rdi)
22013 6: movq %r9,1*8(%rdi)
22014-7: movq %r10,2*8(%rdi)
22015+7: movq %rax,2*8(%rdi)
22016 8: movq %r11,3*8(%rdi)
22017 9: movq 4*8(%rsi),%r8
22018 10: movq 5*8(%rsi),%r9
22019-11: movq 6*8(%rsi),%r10
22020+11: movq 6*8(%rsi),%rax
22021 12: movq 7*8(%rsi),%r11
22022 13: movq %r8,4*8(%rdi)
22023 14: movq %r9,5*8(%rdi)
22024-15: movq %r10,6*8(%rdi)
22025+15: movq %rax,6*8(%rdi)
22026 16: movq %r11,7*8(%rdi)
22027 leaq 64(%rsi),%rsi
22028 leaq 64(%rdi),%rdi
6e9df6a3 22029@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
15a11c5b
MT
22030 decl %ecx
22031 jnz 21b
22032 23: xor %eax,%eax
6e9df6a3 22033+ pax_force_retaddr
15a11c5b
MT
22034 ret
22035
22036 .section .fixup,"ax"
6e9df6a3 22037@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
15a11c5b
MT
22038 3: rep
22039 movsb
22040 4: xorl %eax,%eax
6e9df6a3 22041+ pax_force_retaddr
15a11c5b
MT
22042 ret
22043
22044 .section .fixup,"ax"
6e9df6a3 22045@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
15a11c5b
MT
22046 1: rep
22047 movsb
22048 2: xorl %eax,%eax
6e9df6a3 22049+ pax_force_retaddr
15a11c5b
MT
22050 ret
22051
22052 .section .fixup,"ax"
fe2de317
MT
22053diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
22054index cb0c112..e3a6895 100644
22055--- a/arch/x86/lib/copy_user_nocache_64.S
22056+++ b/arch/x86/lib/copy_user_nocache_64.S
6e9df6a3
MT
22057@@ -8,12 +8,14 @@
22058
22059 #include <linux/linkage.h>
22060 #include <asm/dwarf2.h>
22061+#include <asm/alternative-asm.h>
22062
22063 #define FIX_ALIGNMENT 1
22064
df50ba0c
MT
22065 #include <asm/current.h>
22066 #include <asm/asm-offsets.h>
22067 #include <asm/thread_info.h>
22068+#include <asm/pgtable.h>
22069
22070 .macro ALIGN_DESTINATION
22071 #ifdef FIX_ALIGNMENT
6e9df6a3 22072@@ -50,6 +52,15 @@
df50ba0c
MT
22073 */
22074 ENTRY(__copy_user_nocache)
22075 CFI_STARTPROC
22076+
22077+#ifdef CONFIG_PAX_MEMORY_UDEREF
22078+ mov $PAX_USER_SHADOW_BASE,%rcx
22079+ cmp %rcx,%rsi
22080+ jae 1f
22081+ add %rcx,%rsi
22082+1:
22083+#endif
22084+
22085 cmpl $8,%edx
22086 jb 20f /* less then 8 bytes, go to byte copy loop */
22087 ALIGN_DESTINATION
fe2de317
MT
22088@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
22089 jz 17f
22090 1: movq (%rsi),%r8
22091 2: movq 1*8(%rsi),%r9
22092-3: movq 2*8(%rsi),%r10
22093+3: movq 2*8(%rsi),%rax
22094 4: movq 3*8(%rsi),%r11
22095 5: movnti %r8,(%rdi)
22096 6: movnti %r9,1*8(%rdi)
22097-7: movnti %r10,2*8(%rdi)
22098+7: movnti %rax,2*8(%rdi)
22099 8: movnti %r11,3*8(%rdi)
22100 9: movq 4*8(%rsi),%r8
22101 10: movq 5*8(%rsi),%r9
22102-11: movq 6*8(%rsi),%r10
22103+11: movq 6*8(%rsi),%rax
22104 12: movq 7*8(%rsi),%r11
22105 13: movnti %r8,4*8(%rdi)
22106 14: movnti %r9,5*8(%rdi)
22107-15: movnti %r10,6*8(%rdi)
22108+15: movnti %rax,6*8(%rdi)
22109 16: movnti %r11,7*8(%rdi)
22110 leaq 64(%rsi),%rsi
22111 leaq 64(%rdi),%rdi
6e9df6a3 22112@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
15a11c5b
MT
22113 jnz 21b
22114 23: xorl %eax,%eax
22115 sfence
6e9df6a3 22116+ pax_force_retaddr
15a11c5b
MT
22117 ret
22118
22119 .section .fixup,"ax"
fe2de317
MT
22120diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
22121index fb903b7..c92b7f7 100644
22122--- a/arch/x86/lib/csum-copy_64.S
22123+++ b/arch/x86/lib/csum-copy_64.S
6e9df6a3
MT
22124@@ -8,6 +8,7 @@
22125 #include <linux/linkage.h>
22126 #include <asm/dwarf2.h>
22127 #include <asm/errno.h>
22128+#include <asm/alternative-asm.h>
22129
22130 /*
22131 * Checksum copy with exception handling.
22132@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
15a11c5b
MT
22133 CFI_RESTORE rbp
22134 addq $7*8, %rsp
22135 CFI_ADJUST_CFA_OFFSET -7*8
fe2de317 22136+ pax_force_retaddr 0, 1
15a11c5b
MT
22137 ret
22138 CFI_RESTORE_STATE
22139
fe2de317
MT
22140diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
22141index 459b58a..9570bc7 100644
22142--- a/arch/x86/lib/csum-wrappers_64.c
22143+++ b/arch/x86/lib/csum-wrappers_64.c
22144@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
df50ba0c
MT
22145 len -= 2;
22146 }
22147 }
6e9df6a3 22148- isum = csum_partial_copy_generic((__force const void *)src,
8308f9c9
MT
22149+
22150+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
22151+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22152+ src += PAX_USER_SHADOW_BASE;
8308f9c9
MT
22153+#endif
22154+
6e9df6a3 22155+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
df50ba0c
MT
22156 dst, len, isum, errp, NULL);
22157 if (unlikely(*errp))
6e9df6a3 22158 goto out_err;
fe2de317 22159@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
df50ba0c
MT
22160 }
22161
22162 *errp = 0;
6e9df6a3 22163- return csum_partial_copy_generic(src, (void __force *)dst,
8308f9c9
MT
22164+
22165+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
22166+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
22167+ dst += PAX_USER_SHADOW_BASE;
8308f9c9
MT
22168+#endif
22169+
6e9df6a3 22170+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
df50ba0c
MT
22171 len, isum, NULL, errp);
22172 }
6e9df6a3 22173 EXPORT_SYMBOL(csum_partial_copy_to_user);
fe2de317
MT
22174diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
22175index 51f1504..ddac4c1 100644
22176--- a/arch/x86/lib/getuser.S
22177+++ b/arch/x86/lib/getuser.S
6e9df6a3 22178@@ -33,15 +33,38 @@
58c5fc13
MT
22179 #include <asm/asm-offsets.h>
22180 #include <asm/thread_info.h>
22181 #include <asm/asm.h>
22182+#include <asm/segment.h>
df50ba0c 22183+#include <asm/pgtable.h>
6e9df6a3 22184+#include <asm/alternative-asm.h>
bc901d79
MT
22185+
22186+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16454cff 22187+#define __copyuser_seg gs;
bc901d79
MT
22188+#else
22189+#define __copyuser_seg
22190+#endif
58c5fc13
MT
22191
22192 .text
22193 ENTRY(__get_user_1)
ae4e228f 22194 CFI_STARTPROC
58c5fc13 22195+
bc901d79 22196+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
ae4e228f
MT
22197 GET_THREAD_INFO(%_ASM_DX)
22198 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22199 jae bad_get_user
bc901d79 22200-1: movzb (%_ASM_AX),%edx
df50ba0c
MT
22201+
22202+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22203+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22204+ cmp %_ASM_DX,%_ASM_AX
22205+ jae 1234f
22206+ add %_ASM_DX,%_ASM_AX
22207+1234:
22208+#endif
22209+
58c5fc13
MT
22210+#endif
22211+
16454cff 22212+1: __copyuser_seg movzb (%_ASM_AX),%edx
58c5fc13 22213 xor %eax,%eax
6e9df6a3 22214+ pax_force_retaddr
58c5fc13
MT
22215 ret
22216 CFI_ENDPROC
6e9df6a3
MT
22217 ENDPROC(__get_user_1)
22218@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
ae4e228f
MT
22219 ENTRY(__get_user_2)
22220 CFI_STARTPROC
22221 add $1,%_ASM_AX
58c5fc13 22222+
bc901d79 22223+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
ae4e228f
MT
22224 jc bad_get_user
22225 GET_THREAD_INFO(%_ASM_DX)
22226 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22227 jae bad_get_user
bc901d79 22228-2: movzwl -1(%_ASM_AX),%edx
df50ba0c
MT
22229+
22230+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22231+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22232+ cmp %_ASM_DX,%_ASM_AX
22233+ jae 1234f
22234+ add %_ASM_DX,%_ASM_AX
22235+1234:
22236+#endif
22237+
58c5fc13
MT
22238+#endif
22239+
16454cff 22240+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
58c5fc13 22241 xor %eax,%eax
6e9df6a3 22242+ pax_force_retaddr
58c5fc13
MT
22243 ret
22244 CFI_ENDPROC
6e9df6a3
MT
22245 ENDPROC(__get_user_2)
22246@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
ae4e228f
MT
22247 ENTRY(__get_user_4)
22248 CFI_STARTPROC
22249 add $3,%_ASM_AX
58c5fc13 22250+
bc901d79 22251+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
ae4e228f
MT
22252 jc bad_get_user
22253 GET_THREAD_INFO(%_ASM_DX)
22254 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22255 jae bad_get_user
bc901d79 22256-3: mov -3(%_ASM_AX),%edx
df50ba0c
MT
22257+
22258+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22259+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22260+ cmp %_ASM_DX,%_ASM_AX
22261+ jae 1234f
22262+ add %_ASM_DX,%_ASM_AX
22263+1234:
22264+#endif
22265+
58c5fc13
MT
22266+#endif
22267+
16454cff 22268+3: __copyuser_seg mov -3(%_ASM_AX),%edx
58c5fc13 22269 xor %eax,%eax
6e9df6a3 22270+ pax_force_retaddr
58c5fc13
MT
22271 ret
22272 CFI_ENDPROC
6e9df6a3
MT
22273 ENDPROC(__get_user_4)
22274@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
df50ba0c
MT
22275 GET_THREAD_INFO(%_ASM_DX)
22276 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22277 jae bad_get_user
22278+
22279+#ifdef CONFIG_PAX_MEMORY_UDEREF
22280+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22281+ cmp %_ASM_DX,%_ASM_AX
22282+ jae 1234f
22283+ add %_ASM_DX,%_ASM_AX
22284+1234:
22285+#endif
22286+
22287 4: movq -7(%_ASM_AX),%_ASM_DX
22288 xor %eax,%eax
6e9df6a3
MT
22289+ pax_force_retaddr
22290 ret
22291 CFI_ENDPROC
22292 ENDPROC(__get_user_8)
22293@@ -91,6 +152,7 @@ bad_get_user:
22294 CFI_STARTPROC
22295 xor %edx,%edx
22296 mov $(-EFAULT),%_ASM_AX
22297+ pax_force_retaddr
df50ba0c 22298 ret
6e9df6a3
MT
22299 CFI_ENDPROC
22300 END(bad_get_user)
fe2de317 22301diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
5e856224 22302index 5a1f9f3..ba9f577 100644
fe2de317
MT
22303--- a/arch/x86/lib/insn.c
22304+++ b/arch/x86/lib/insn.c
c52201e0 22305@@ -21,6 +21,11 @@
57199397
MT
22306 #include <linux/string.h>
22307 #include <asm/inat.h>
22308 #include <asm/insn.h>
c52201e0 22309+#ifdef __KERNEL__
57199397 22310+#include <asm/pgtable_types.h>
c52201e0
MT
22311+#else
22312+#define ktla_ktva(addr) addr
22313+#endif
57199397 22314
4c928ab7
MT
22315 /* Verify next sizeof(t) bytes can be on the same instruction */
22316 #define validate_next(t, insn, n) \
22317@@ -49,8 +54,8 @@
57199397
MT
22318 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
22319 {
22320 memset(insn, 0, sizeof(*insn));
22321- insn->kaddr = kaddr;
22322- insn->next_byte = kaddr;
22323+ insn->kaddr = ktla_ktva(kaddr);
22324+ insn->next_byte = ktla_ktva(kaddr);
22325 insn->x86_64 = x86_64 ? 1 : 0;
22326 insn->opnd_bytes = 4;
22327 if (x86_64)
fe2de317
MT
22328diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
22329index 05a95e7..326f2fa 100644
22330--- a/arch/x86/lib/iomap_copy_64.S
22331+++ b/arch/x86/lib/iomap_copy_64.S
6e9df6a3
MT
22332@@ -17,6 +17,7 @@
22333
22334 #include <linux/linkage.h>
22335 #include <asm/dwarf2.h>
22336+#include <asm/alternative-asm.h>
22337
22338 /*
22339 * override generic version in lib/iomap_copy.c
22340@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
15a11c5b
MT
22341 CFI_STARTPROC
22342 movl %edx,%ecx
22343 rep movsd
6e9df6a3 22344+ pax_force_retaddr
15a11c5b
MT
22345 ret
22346 CFI_ENDPROC
22347 ENDPROC(__iowrite32_copy)
fe2de317
MT
22348diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
22349index efbf2a0..8893637 100644
22350--- a/arch/x86/lib/memcpy_64.S
22351+++ b/arch/x86/lib/memcpy_64.S
6e9df6a3 22352@@ -34,6 +34,7 @@
15a11c5b
MT
22353 rep movsq
22354 movl %edx, %ecx
22355 rep movsb
6e9df6a3 22356+ pax_force_retaddr
15a11c5b
MT
22357 ret
22358 .Lmemcpy_e:
22359 .previous
6e9df6a3 22360@@ -51,6 +52,7 @@
15a11c5b
MT
22361
22362 movl %edx, %ecx
22363 rep movsb
6e9df6a3 22364+ pax_force_retaddr
15a11c5b
MT
22365 ret
22366 .Lmemcpy_e_e:
22367 .previous
fe2de317
MT
22368@@ -81,13 +83,13 @@ ENTRY(memcpy)
22369 */
22370 movq 0*8(%rsi), %r8
22371 movq 1*8(%rsi), %r9
22372- movq 2*8(%rsi), %r10
22373+ movq 2*8(%rsi), %rcx
22374 movq 3*8(%rsi), %r11
22375 leaq 4*8(%rsi), %rsi
22376
22377 movq %r8, 0*8(%rdi)
15a11c5b 22378 movq %r9, 1*8(%rdi)
fe2de317
MT
22379- movq %r10, 2*8(%rdi)
22380+ movq %rcx, 2*8(%rdi)
22381 movq %r11, 3*8(%rdi)
22382 leaq 4*8(%rdi), %rdi
22383 jae .Lcopy_forward_loop
22384@@ -110,12 +112,12 @@ ENTRY(memcpy)
22385 subq $0x20, %rdx
22386 movq -1*8(%rsi), %r8
22387 movq -2*8(%rsi), %r9
22388- movq -3*8(%rsi), %r10
22389+ movq -3*8(%rsi), %rcx
22390 movq -4*8(%rsi), %r11
22391 leaq -4*8(%rsi), %rsi
22392 movq %r8, -1*8(%rdi)
22393 movq %r9, -2*8(%rdi)
22394- movq %r10, -3*8(%rdi)
22395+ movq %rcx, -3*8(%rdi)
22396 movq %r11, -4*8(%rdi)
22397 leaq -4*8(%rdi), %rdi
22398 jae .Lcopy_backward_loop
22399@@ -135,12 +137,13 @@ ENTRY(memcpy)
22400 */
22401 movq 0*8(%rsi), %r8
22402 movq 1*8(%rsi), %r9
22403- movq -2*8(%rsi, %rdx), %r10
22404+ movq -2*8(%rsi, %rdx), %rcx
22405 movq -1*8(%rsi, %rdx), %r11
22406 movq %r8, 0*8(%rdi)
22407 movq %r9, 1*8(%rdi)
22408- movq %r10, -2*8(%rdi, %rdx)
22409+ movq %rcx, -2*8(%rdi, %rdx)
15a11c5b 22410 movq %r11, -1*8(%rdi, %rdx)
6e9df6a3 22411+ pax_force_retaddr
15a11c5b
MT
22412 retq
22413 .p2align 4
22414 .Lless_16bytes:
6e9df6a3 22415@@ -153,6 +156,7 @@ ENTRY(memcpy)
15a11c5b
MT
22416 movq -1*8(%rsi, %rdx), %r9
22417 movq %r8, 0*8(%rdi)
22418 movq %r9, -1*8(%rdi, %rdx)
6e9df6a3 22419+ pax_force_retaddr
15a11c5b
MT
22420 retq
22421 .p2align 4
22422 .Lless_8bytes:
6e9df6a3 22423@@ -166,6 +170,7 @@ ENTRY(memcpy)
15a11c5b
MT
22424 movl -4(%rsi, %rdx), %r8d
22425 movl %ecx, (%rdi)
22426 movl %r8d, -4(%rdi, %rdx)
6e9df6a3 22427+ pax_force_retaddr
15a11c5b
MT
22428 retq
22429 .p2align 4
22430 .Lless_3bytes:
6e9df6a3 22431@@ -183,6 +188,7 @@ ENTRY(memcpy)
15a11c5b
MT
22432 jnz .Lloop_1
22433
22434 .Lend:
6e9df6a3 22435+ pax_force_retaddr
15a11c5b
MT
22436 retq
22437 CFI_ENDPROC
22438 ENDPROC(memcpy)
fe2de317
MT
22439diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
22440index ee16461..c39c199 100644
22441--- a/arch/x86/lib/memmove_64.S
22442+++ b/arch/x86/lib/memmove_64.S
22443@@ -61,13 +61,13 @@ ENTRY(memmove)
22444 5:
22445 sub $0x20, %rdx
22446 movq 0*8(%rsi), %r11
22447- movq 1*8(%rsi), %r10
22448+ movq 1*8(%rsi), %rcx
22449 movq 2*8(%rsi), %r9
22450 movq 3*8(%rsi), %r8
22451 leaq 4*8(%rsi), %rsi
22452
22453 movq %r11, 0*8(%rdi)
22454- movq %r10, 1*8(%rdi)
22455+ movq %rcx, 1*8(%rdi)
22456 movq %r9, 2*8(%rdi)
22457 movq %r8, 3*8(%rdi)
22458 leaq 4*8(%rdi), %rdi
22459@@ -81,10 +81,10 @@ ENTRY(memmove)
22460 4:
22461 movq %rdx, %rcx
22462 movq -8(%rsi, %rdx), %r11
22463- lea -8(%rdi, %rdx), %r10
22464+ lea -8(%rdi, %rdx), %r9
22465 shrq $3, %rcx
22466 rep movsq
22467- movq %r11, (%r10)
22468+ movq %r11, (%r9)
22469 jmp 13f
22470 .Lmemmove_end_forward:
22471
22472@@ -95,14 +95,14 @@ ENTRY(memmove)
22473 7:
22474 movq %rdx, %rcx
22475 movq (%rsi), %r11
22476- movq %rdi, %r10
22477+ movq %rdi, %r9
22478 leaq -8(%rsi, %rdx), %rsi
22479 leaq -8(%rdi, %rdx), %rdi
22480 shrq $3, %rcx
22481 std
22482 rep movsq
22483 cld
22484- movq %r11, (%r10)
22485+ movq %r11, (%r9)
22486 jmp 13f
22487
22488 /*
22489@@ -127,13 +127,13 @@ ENTRY(memmove)
22490 8:
22491 subq $0x20, %rdx
22492 movq -1*8(%rsi), %r11
22493- movq -2*8(%rsi), %r10
22494+ movq -2*8(%rsi), %rcx
22495 movq -3*8(%rsi), %r9
22496 movq -4*8(%rsi), %r8
22497 leaq -4*8(%rsi), %rsi
22498
22499 movq %r11, -1*8(%rdi)
22500- movq %r10, -2*8(%rdi)
22501+ movq %rcx, -2*8(%rdi)
22502 movq %r9, -3*8(%rdi)
22503 movq %r8, -4*8(%rdi)
22504 leaq -4*8(%rdi), %rdi
22505@@ -151,11 +151,11 @@ ENTRY(memmove)
22506 * Move data from 16 bytes to 31 bytes.
22507 */
22508 movq 0*8(%rsi), %r11
22509- movq 1*8(%rsi), %r10
22510+ movq 1*8(%rsi), %rcx
22511 movq -2*8(%rsi, %rdx), %r9
22512 movq -1*8(%rsi, %rdx), %r8
22513 movq %r11, 0*8(%rdi)
22514- movq %r10, 1*8(%rdi)
22515+ movq %rcx, 1*8(%rdi)
22516 movq %r9, -2*8(%rdi, %rdx)
22517 movq %r8, -1*8(%rdi, %rdx)
22518 jmp 13f
22519@@ -167,9 +167,9 @@ ENTRY(memmove)
22520 * Move data from 8 bytes to 15 bytes.
22521 */
22522 movq 0*8(%rsi), %r11
22523- movq -1*8(%rsi, %rdx), %r10
22524+ movq -1*8(%rsi, %rdx), %r9
22525 movq %r11, 0*8(%rdi)
22526- movq %r10, -1*8(%rdi, %rdx)
22527+ movq %r9, -1*8(%rdi, %rdx)
22528 jmp 13f
22529 10:
22530 cmpq $4, %rdx
22531@@ -178,9 +178,9 @@ ENTRY(memmove)
22532 * Move data from 4 bytes to 7 bytes.
22533 */
22534 movl (%rsi), %r11d
22535- movl -4(%rsi, %rdx), %r10d
22536+ movl -4(%rsi, %rdx), %r9d
22537 movl %r11d, (%rdi)
22538- movl %r10d, -4(%rdi, %rdx)
22539+ movl %r9d, -4(%rdi, %rdx)
22540 jmp 13f
22541 11:
22542 cmp $2, %rdx
22543@@ -189,9 +189,9 @@ ENTRY(memmove)
22544 * Move data from 2 bytes to 3 bytes.
22545 */
22546 movw (%rsi), %r11w
22547- movw -2(%rsi, %rdx), %r10w
22548+ movw -2(%rsi, %rdx), %r9w
22549 movw %r11w, (%rdi)
22550- movw %r10w, -2(%rdi, %rdx)
22551+ movw %r9w, -2(%rdi, %rdx)
22552 jmp 13f
22553 12:
22554 cmp $1, %rdx
6e9df6a3 22555@@ -202,6 +202,7 @@ ENTRY(memmove)
15a11c5b
MT
22556 movb (%rsi), %r11b
22557 movb %r11b, (%rdi)
22558 13:
6e9df6a3 22559+ pax_force_retaddr
15a11c5b
MT
22560 retq
22561 CFI_ENDPROC
22562
6e9df6a3 22563@@ -210,6 +211,7 @@ ENTRY(memmove)
15a11c5b
MT
22564 /* Forward moving data. */
22565 movq %rdx, %rcx
22566 rep movsb
6e9df6a3 22567+ pax_force_retaddr
15a11c5b
MT
22568 retq
22569 .Lmemmove_end_forward_efs:
22570 .previous
fe2de317
MT
22571diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22572index 79bd454..dff325a 100644
22573--- a/arch/x86/lib/memset_64.S
22574+++ b/arch/x86/lib/memset_64.S
6e9df6a3 22575@@ -31,6 +31,7 @@
15a11c5b
MT
22576 movl %r8d,%ecx
22577 rep stosb
22578 movq %r9,%rax
6e9df6a3 22579+ pax_force_retaddr
15a11c5b
MT
22580 ret
22581 .Lmemset_e:
22582 .previous
6e9df6a3 22583@@ -53,6 +54,7 @@
15a11c5b
MT
22584 movl %edx,%ecx
22585 rep stosb
22586 movq %r9,%rax
6e9df6a3 22587+ pax_force_retaddr
15a11c5b
MT
22588 ret
22589 .Lmemset_e_e:
22590 .previous
fe2de317
MT
22591@@ -60,13 +62,13 @@
22592 ENTRY(memset)
22593 ENTRY(__memset)
22594 CFI_STARTPROC
22595- movq %rdi,%r10
22596 movq %rdx,%r11
22597
22598 /* expand byte value */
22599 movzbl %sil,%ecx
22600 movabs $0x0101010101010101,%rax
22601 mul %rcx /* with rax, clobbers rdx */
22602+ movq %rdi,%rdx
22603
22604 /* align dst */
22605 movl %edi,%r9d
22606@@ -120,7 +122,8 @@ ENTRY(__memset)
22607 jnz .Lloop_1
15a11c5b
MT
22608
22609 .Lende:
fe2de317
MT
22610- movq %r10,%rax
22611+ movq %rdx,%rax
6e9df6a3 22612+ pax_force_retaddr
15a11c5b
MT
22613 ret
22614
22615 CFI_RESTORE_STATE
fe2de317
MT
22616diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22617index c9f2d9b..e7fd2c0 100644
22618--- a/arch/x86/lib/mmx_32.c
22619+++ b/arch/x86/lib/mmx_32.c
22620@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
58c5fc13
MT
22621 {
22622 void *p;
22623 int i;
22624+ unsigned long cr0;
22625
22626 if (unlikely(in_interrupt()))
22627 return __memcpy(to, from, len);
fe2de317 22628@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
58c5fc13
MT
22629 kernel_fpu_begin();
22630
22631 __asm__ __volatile__ (
22632- "1: prefetch (%0)\n" /* This set is 28 bytes */
22633- " prefetch 64(%0)\n"
22634- " prefetch 128(%0)\n"
22635- " prefetch 192(%0)\n"
22636- " prefetch 256(%0)\n"
22637+ "1: prefetch (%1)\n" /* This set is 28 bytes */
22638+ " prefetch 64(%1)\n"
22639+ " prefetch 128(%1)\n"
22640+ " prefetch 192(%1)\n"
22641+ " prefetch 256(%1)\n"
22642 "2: \n"
22643 ".section .fixup, \"ax\"\n"
22644- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22645+ "3: \n"
22646+
22647+#ifdef CONFIG_PAX_KERNEXEC
22648+ " movl %%cr0, %0\n"
22649+ " movl %0, %%eax\n"
22650+ " andl $0xFFFEFFFF, %%eax\n"
22651+ " movl %%eax, %%cr0\n"
22652+#endif
22653+
22654+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22655+
22656+#ifdef CONFIG_PAX_KERNEXEC
22657+ " movl %0, %%cr0\n"
22658+#endif
22659+
22660 " jmp 2b\n"
22661 ".previous\n"
22662 _ASM_EXTABLE(1b, 3b)
22663- : : "r" (from));
22664+ : "=&r" (cr0) : "r" (from) : "ax");
22665
22666 for ( ; i > 5; i--) {
22667 __asm__ __volatile__ (
22668- "1: prefetch 320(%0)\n"
22669- "2: movq (%0), %%mm0\n"
22670- " movq 8(%0), %%mm1\n"
22671- " movq 16(%0), %%mm2\n"
22672- " movq 24(%0), %%mm3\n"
22673- " movq %%mm0, (%1)\n"
22674- " movq %%mm1, 8(%1)\n"
22675- " movq %%mm2, 16(%1)\n"
22676- " movq %%mm3, 24(%1)\n"
22677- " movq 32(%0), %%mm0\n"
22678- " movq 40(%0), %%mm1\n"
22679- " movq 48(%0), %%mm2\n"
22680- " movq 56(%0), %%mm3\n"
22681- " movq %%mm0, 32(%1)\n"
22682- " movq %%mm1, 40(%1)\n"
22683- " movq %%mm2, 48(%1)\n"
22684- " movq %%mm3, 56(%1)\n"
22685+ "1: prefetch 320(%1)\n"
22686+ "2: movq (%1), %%mm0\n"
22687+ " movq 8(%1), %%mm1\n"
22688+ " movq 16(%1), %%mm2\n"
22689+ " movq 24(%1), %%mm3\n"
22690+ " movq %%mm0, (%2)\n"
22691+ " movq %%mm1, 8(%2)\n"
22692+ " movq %%mm2, 16(%2)\n"
22693+ " movq %%mm3, 24(%2)\n"
22694+ " movq 32(%1), %%mm0\n"
22695+ " movq 40(%1), %%mm1\n"
22696+ " movq 48(%1), %%mm2\n"
22697+ " movq 56(%1), %%mm3\n"
22698+ " movq %%mm0, 32(%2)\n"
22699+ " movq %%mm1, 40(%2)\n"
22700+ " movq %%mm2, 48(%2)\n"
22701+ " movq %%mm3, 56(%2)\n"
22702 ".section .fixup, \"ax\"\n"
22703- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22704+ "3:\n"
22705+
22706+#ifdef CONFIG_PAX_KERNEXEC
22707+ " movl %%cr0, %0\n"
22708+ " movl %0, %%eax\n"
22709+ " andl $0xFFFEFFFF, %%eax\n"
22710+ " movl %%eax, %%cr0\n"
22711+#endif
22712+
22713+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22714+
22715+#ifdef CONFIG_PAX_KERNEXEC
22716+ " movl %0, %%cr0\n"
22717+#endif
22718+
22719 " jmp 2b\n"
22720 ".previous\n"
22721 _ASM_EXTABLE(1b, 3b)
22722- : : "r" (from), "r" (to) : "memory");
22723+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22724
22725 from += 64;
22726 to += 64;
22727@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22728 static void fast_copy_page(void *to, void *from)
22729 {
22730 int i;
22731+ unsigned long cr0;
22732
22733 kernel_fpu_begin();
22734
fe2de317 22735@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
58c5fc13
MT
22736 * but that is for later. -AV
22737 */
22738 __asm__ __volatile__(
22739- "1: prefetch (%0)\n"
22740- " prefetch 64(%0)\n"
22741- " prefetch 128(%0)\n"
22742- " prefetch 192(%0)\n"
22743- " prefetch 256(%0)\n"
22744+ "1: prefetch (%1)\n"
22745+ " prefetch 64(%1)\n"
22746+ " prefetch 128(%1)\n"
22747+ " prefetch 192(%1)\n"
22748+ " prefetch 256(%1)\n"
22749 "2: \n"
22750 ".section .fixup, \"ax\"\n"
22751- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22752+ "3: \n"
22753+
22754+#ifdef CONFIG_PAX_KERNEXEC
22755+ " movl %%cr0, %0\n"
22756+ " movl %0, %%eax\n"
22757+ " andl $0xFFFEFFFF, %%eax\n"
22758+ " movl %%eax, %%cr0\n"
22759+#endif
22760+
22761+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22762+
22763+#ifdef CONFIG_PAX_KERNEXEC
22764+ " movl %0, %%cr0\n"
22765+#endif
22766+
22767 " jmp 2b\n"
22768 ".previous\n"
22769- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22770+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22771
22772 for (i = 0; i < (4096-320)/64; i++) {
22773 __asm__ __volatile__ (
22774- "1: prefetch 320(%0)\n"
22775- "2: movq (%0), %%mm0\n"
22776- " movntq %%mm0, (%1)\n"
22777- " movq 8(%0), %%mm1\n"
22778- " movntq %%mm1, 8(%1)\n"
22779- " movq 16(%0), %%mm2\n"
22780- " movntq %%mm2, 16(%1)\n"
22781- " movq 24(%0), %%mm3\n"
22782- " movntq %%mm3, 24(%1)\n"
22783- " movq 32(%0), %%mm4\n"
22784- " movntq %%mm4, 32(%1)\n"
22785- " movq 40(%0), %%mm5\n"
22786- " movntq %%mm5, 40(%1)\n"
22787- " movq 48(%0), %%mm6\n"
22788- " movntq %%mm6, 48(%1)\n"
22789- " movq 56(%0), %%mm7\n"
22790- " movntq %%mm7, 56(%1)\n"
22791+ "1: prefetch 320(%1)\n"
22792+ "2: movq (%1), %%mm0\n"
22793+ " movntq %%mm0, (%2)\n"
22794+ " movq 8(%1), %%mm1\n"
22795+ " movntq %%mm1, 8(%2)\n"
22796+ " movq 16(%1), %%mm2\n"
22797+ " movntq %%mm2, 16(%2)\n"
22798+ " movq 24(%1), %%mm3\n"
22799+ " movntq %%mm3, 24(%2)\n"
22800+ " movq 32(%1), %%mm4\n"
22801+ " movntq %%mm4, 32(%2)\n"
22802+ " movq 40(%1), %%mm5\n"
22803+ " movntq %%mm5, 40(%2)\n"
22804+ " movq 48(%1), %%mm6\n"
22805+ " movntq %%mm6, 48(%2)\n"
22806+ " movq 56(%1), %%mm7\n"
22807+ " movntq %%mm7, 56(%2)\n"
22808 ".section .fixup, \"ax\"\n"
22809- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22810+ "3:\n"
22811+
22812+#ifdef CONFIG_PAX_KERNEXEC
22813+ " movl %%cr0, %0\n"
22814+ " movl %0, %%eax\n"
22815+ " andl $0xFFFEFFFF, %%eax\n"
22816+ " movl %%eax, %%cr0\n"
22817+#endif
22818+
22819+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22820+
22821+#ifdef CONFIG_PAX_KERNEXEC
22822+ " movl %0, %%cr0\n"
22823+#endif
22824+
22825 " jmp 2b\n"
22826 ".previous\n"
22827- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22828+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22829
22830 from += 64;
22831 to += 64;
22832@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22833 static void fast_copy_page(void *to, void *from)
22834 {
22835 int i;
22836+ unsigned long cr0;
22837
22838 kernel_fpu_begin();
22839
22840 __asm__ __volatile__ (
22841- "1: prefetch (%0)\n"
22842- " prefetch 64(%0)\n"
22843- " prefetch 128(%0)\n"
22844- " prefetch 192(%0)\n"
22845- " prefetch 256(%0)\n"
22846+ "1: prefetch (%1)\n"
22847+ " prefetch 64(%1)\n"
22848+ " prefetch 128(%1)\n"
22849+ " prefetch 192(%1)\n"
22850+ " prefetch 256(%1)\n"
22851 "2: \n"
22852 ".section .fixup, \"ax\"\n"
22853- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22854+ "3: \n"
22855+
22856+#ifdef CONFIG_PAX_KERNEXEC
22857+ " movl %%cr0, %0\n"
22858+ " movl %0, %%eax\n"
22859+ " andl $0xFFFEFFFF, %%eax\n"
22860+ " movl %%eax, %%cr0\n"
22861+#endif
22862+
22863+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22864+
22865+#ifdef CONFIG_PAX_KERNEXEC
22866+ " movl %0, %%cr0\n"
22867+#endif
22868+
22869 " jmp 2b\n"
22870 ".previous\n"
22871- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22872+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22873
22874 for (i = 0; i < 4096/64; i++) {
22875 __asm__ __volatile__ (
22876- "1: prefetch 320(%0)\n"
22877- "2: movq (%0), %%mm0\n"
22878- " movq 8(%0), %%mm1\n"
22879- " movq 16(%0), %%mm2\n"
22880- " movq 24(%0), %%mm3\n"
22881- " movq %%mm0, (%1)\n"
22882- " movq %%mm1, 8(%1)\n"
22883- " movq %%mm2, 16(%1)\n"
22884- " movq %%mm3, 24(%1)\n"
22885- " movq 32(%0), %%mm0\n"
22886- " movq 40(%0), %%mm1\n"
22887- " movq 48(%0), %%mm2\n"
22888- " movq 56(%0), %%mm3\n"
22889- " movq %%mm0, 32(%1)\n"
22890- " movq %%mm1, 40(%1)\n"
22891- " movq %%mm2, 48(%1)\n"
22892- " movq %%mm3, 56(%1)\n"
22893+ "1: prefetch 320(%1)\n"
22894+ "2: movq (%1), %%mm0\n"
22895+ " movq 8(%1), %%mm1\n"
22896+ " movq 16(%1), %%mm2\n"
22897+ " movq 24(%1), %%mm3\n"
22898+ " movq %%mm0, (%2)\n"
22899+ " movq %%mm1, 8(%2)\n"
22900+ " movq %%mm2, 16(%2)\n"
22901+ " movq %%mm3, 24(%2)\n"
22902+ " movq 32(%1), %%mm0\n"
22903+ " movq 40(%1), %%mm1\n"
22904+ " movq 48(%1), %%mm2\n"
22905+ " movq 56(%1), %%mm3\n"
22906+ " movq %%mm0, 32(%2)\n"
22907+ " movq %%mm1, 40(%2)\n"
22908+ " movq %%mm2, 48(%2)\n"
22909+ " movq %%mm3, 56(%2)\n"
22910 ".section .fixup, \"ax\"\n"
22911- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22912+ "3:\n"
22913+
22914+#ifdef CONFIG_PAX_KERNEXEC
22915+ " movl %%cr0, %0\n"
22916+ " movl %0, %%eax\n"
22917+ " andl $0xFFFEFFFF, %%eax\n"
22918+ " movl %%eax, %%cr0\n"
22919+#endif
22920+
22921+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22922+
22923+#ifdef CONFIG_PAX_KERNEXEC
22924+ " movl %0, %%cr0\n"
22925+#endif
22926+
22927 " jmp 2b\n"
22928 ".previous\n"
22929 _ASM_EXTABLE(1b, 3b)
22930- : : "r" (from), "r" (to) : "memory");
22931+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22932
22933 from += 64;
22934 to += 64;
fe2de317
MT
22935diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22936index 69fa106..adda88b 100644
22937--- a/arch/x86/lib/msr-reg.S
22938+++ b/arch/x86/lib/msr-reg.S
6e9df6a3
MT
22939@@ -3,6 +3,7 @@
22940 #include <asm/dwarf2.h>
22941 #include <asm/asm.h>
22942 #include <asm/msr.h>
22943+#include <asm/alternative-asm.h>
22944
22945 #ifdef CONFIG_X86_64
22946 /*
fe2de317
MT
22947@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22948 CFI_STARTPROC
22949 pushq_cfi %rbx
22950 pushq_cfi %rbp
22951- movq %rdi, %r10 /* Save pointer */
22952+ movq %rdi, %r9 /* Save pointer */
22953 xorl %r11d, %r11d /* Return value */
22954 movl (%rdi), %eax
22955 movl 4(%rdi), %ecx
22956@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22957 movl 28(%rdi), %edi
22958 CFI_REMEMBER_STATE
22959 1: \op
22960-2: movl %eax, (%r10)
22961+2: movl %eax, (%r9)
22962 movl %r11d, %eax /* Return value */
22963- movl %ecx, 4(%r10)
22964- movl %edx, 8(%r10)
22965- movl %ebx, 12(%r10)
22966- movl %ebp, 20(%r10)
22967- movl %esi, 24(%r10)
22968- movl %edi, 28(%r10)
22969+ movl %ecx, 4(%r9)
22970+ movl %edx, 8(%r9)
22971+ movl %ebx, 12(%r9)
22972+ movl %ebp, 20(%r9)
22973+ movl %esi, 24(%r9)
22974+ movl %edi, 28(%r9)
6e9df6a3
MT
22975 popq_cfi %rbp
22976 popq_cfi %rbx
22977+ pax_force_retaddr
22978 ret
22979 3:
22980 CFI_RESTORE_STATE
fe2de317
MT
22981diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22982index 36b0d15..d381858 100644
22983--- a/arch/x86/lib/putuser.S
22984+++ b/arch/x86/lib/putuser.S
6e9df6a3 22985@@ -15,7 +15,9 @@
58c5fc13
MT
22986 #include <asm/thread_info.h>
22987 #include <asm/errno.h>
22988 #include <asm/asm.h>
df50ba0c 22989-
58c5fc13 22990+#include <asm/segment.h>
df50ba0c 22991+#include <asm/pgtable.h>
6e9df6a3 22992+#include <asm/alternative-asm.h>
58c5fc13
MT
22993
22994 /*
df50ba0c 22995 * __put_user_X
6e9df6a3 22996@@ -29,52 +31,119 @@
ae4e228f
MT
22997 * as they get called from within inline assembly.
22998 */
22999
23000-#define ENTER CFI_STARTPROC ; \
23001- GET_THREAD_INFO(%_ASM_BX)
6e9df6a3 23002-#define EXIT ret ; \
ae4e228f 23003+#define ENTER CFI_STARTPROC
6e9df6a3 23004+#define EXIT pax_force_retaddr; ret ; \
ae4e228f
MT
23005 CFI_ENDPROC
23006
57199397
MT
23007+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23008+#define _DEST %_ASM_CX,%_ASM_BX
23009+#else
23010+#define _DEST %_ASM_CX
23011+#endif
bc901d79
MT
23012+
23013+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16454cff 23014+#define __copyuser_seg gs;
bc901d79
MT
23015+#else
23016+#define __copyuser_seg
23017+#endif
57199397 23018+
ae4e228f
MT
23019 .text
23020 ENTRY(__put_user_1)
58c5fc13 23021 ENTER
58c5fc13 23022+
bc901d79 23023+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
ae4e228f
MT
23024+ GET_THREAD_INFO(%_ASM_BX)
23025 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
23026 jae bad_put_user
57199397 23027-1: movb %al,(%_ASM_CX)
df50ba0c
MT
23028+
23029+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23030+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23031+ cmp %_ASM_BX,%_ASM_CX
57199397
MT
23032+ jb 1234f
23033+ xor %ebx,%ebx
df50ba0c
MT
23034+1234:
23035+#endif
23036+
58c5fc13
MT
23037+#endif
23038+
16454cff 23039+1: __copyuser_seg movb %al,(_DEST)
58c5fc13
MT
23040 xor %eax,%eax
23041 EXIT
23042 ENDPROC(__put_user_1)
ae4e228f
MT
23043
23044 ENTRY(__put_user_2)
23045 ENTER
58c5fc13 23046+
bc901d79 23047+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
ae4e228f
MT
23048+ GET_THREAD_INFO(%_ASM_BX)
23049 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23050 sub $1,%_ASM_BX
23051 cmp %_ASM_BX,%_ASM_CX
23052 jae bad_put_user
57199397 23053-2: movw %ax,(%_ASM_CX)
df50ba0c
MT
23054+
23055+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23056+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23057+ cmp %_ASM_BX,%_ASM_CX
57199397
MT
23058+ jb 1234f
23059+ xor %ebx,%ebx
df50ba0c
MT
23060+1234:
23061+#endif
23062+
58c5fc13
MT
23063+#endif
23064+
16454cff 23065+2: __copyuser_seg movw %ax,(_DEST)
58c5fc13
MT
23066 xor %eax,%eax
23067 EXIT
23068 ENDPROC(__put_user_2)
ae4e228f
MT
23069
23070 ENTRY(__put_user_4)
23071 ENTER
58c5fc13 23072+
bc901d79 23073+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
ae4e228f
MT
23074+ GET_THREAD_INFO(%_ASM_BX)
23075 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23076 sub $3,%_ASM_BX
23077 cmp %_ASM_BX,%_ASM_CX
23078 jae bad_put_user
57199397 23079-3: movl %eax,(%_ASM_CX)
df50ba0c
MT
23080+
23081+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23082+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23083+ cmp %_ASM_BX,%_ASM_CX
57199397
MT
23084+ jb 1234f
23085+ xor %ebx,%ebx
df50ba0c
MT
23086+1234:
23087+#endif
23088+
58c5fc13
MT
23089+#endif
23090+
16454cff 23091+3: __copyuser_seg movl %eax,(_DEST)
58c5fc13
MT
23092 xor %eax,%eax
23093 EXIT
23094 ENDPROC(__put_user_4)
ae4e228f
MT
23095
23096 ENTRY(__put_user_8)
23097 ENTER
58c5fc13 23098+
bc901d79 23099+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
ae4e228f
MT
23100+ GET_THREAD_INFO(%_ASM_BX)
23101 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23102 sub $7,%_ASM_BX
23103 cmp %_ASM_BX,%_ASM_CX
23104 jae bad_put_user
57199397 23105-4: mov %_ASM_AX,(%_ASM_CX)
df50ba0c
MT
23106+
23107+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23108+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23109+ cmp %_ASM_BX,%_ASM_CX
57199397
MT
23110+ jb 1234f
23111+ xor %ebx,%ebx
df50ba0c
MT
23112+1234:
23113+#endif
23114+
58c5fc13
MT
23115+#endif
23116+
16454cff 23117+4: __copyuser_seg mov %_ASM_AX,(_DEST)
58c5fc13 23118 #ifdef CONFIG_X86_32
57199397 23119-5: movl %edx,4(%_ASM_CX)
16454cff 23120+5: __copyuser_seg movl %edx,4(_DEST)
58c5fc13 23121 #endif
58c5fc13
MT
23122 xor %eax,%eax
23123 EXIT
fe2de317
MT
23124diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
23125index 1cad221..de671ee 100644
23126--- a/arch/x86/lib/rwlock.S
23127+++ b/arch/x86/lib/rwlock.S
23128@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
23129 FRAME
23130 0: LOCK_PREFIX
23131 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
23132+
23133+#ifdef CONFIG_PAX_REFCOUNT
23134+ jno 1234f
23135+ LOCK_PREFIX
23136+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
23137+ int $4
23138+1234:
23139+ _ASM_EXTABLE(1234b, 1234b)
23140+#endif
23141+
23142 1: rep; nop
23143 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
23144 jne 1b
23145 LOCK_PREFIX
6e9df6a3 23146 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
fe2de317
MT
23147+
23148+#ifdef CONFIG_PAX_REFCOUNT
23149+ jno 1234f
23150+ LOCK_PREFIX
23151+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
23152+ int $4
23153+1234:
23154+ _ASM_EXTABLE(1234b, 1234b)
23155+#endif
23156+
6e9df6a3
MT
23157 jnz 0b
23158 ENDFRAME
23159+ pax_force_retaddr
15a11c5b
MT
23160 ret
23161 CFI_ENDPROC
23162 END(__write_lock_failed)
fe2de317
MT
23163@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
23164 FRAME
23165 0: LOCK_PREFIX
23166 READ_LOCK_SIZE(inc) (%__lock_ptr)
23167+
23168+#ifdef CONFIG_PAX_REFCOUNT
23169+ jno 1234f
23170+ LOCK_PREFIX
23171+ READ_LOCK_SIZE(dec) (%__lock_ptr)
23172+ int $4
23173+1234:
23174+ _ASM_EXTABLE(1234b, 1234b)
23175+#endif
23176+
23177 1: rep; nop
23178 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
23179 js 1b
23180 LOCK_PREFIX
6e9df6a3 23181 READ_LOCK_SIZE(dec) (%__lock_ptr)
fe2de317
MT
23182+
23183+#ifdef CONFIG_PAX_REFCOUNT
23184+ jno 1234f
23185+ LOCK_PREFIX
23186+ READ_LOCK_SIZE(inc) (%__lock_ptr)
23187+ int $4
23188+1234:
23189+ _ASM_EXTABLE(1234b, 1234b)
23190+#endif
23191+
6e9df6a3
MT
23192 js 0b
23193 ENDFRAME
23194+ pax_force_retaddr
15a11c5b
MT
23195 ret
23196 CFI_ENDPROC
23197 END(__read_lock_failed)
fe2de317
MT
23198diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
23199index 5dff5f0..cadebf4 100644
23200--- a/arch/x86/lib/rwsem.S
23201+++ b/arch/x86/lib/rwsem.S
6e9df6a3
MT
23202@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
23203 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
23204 CFI_RESTORE __ASM_REG(dx)
15a11c5b 23205 restore_common_regs
6e9df6a3 23206+ pax_force_retaddr
15a11c5b
MT
23207 ret
23208 CFI_ENDPROC
23209 ENDPROC(call_rwsem_down_read_failed)
6e9df6a3 23210@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
15a11c5b
MT
23211 movq %rax,%rdi
23212 call rwsem_down_write_failed
23213 restore_common_regs
6e9df6a3 23214+ pax_force_retaddr
15a11c5b
MT
23215 ret
23216 CFI_ENDPROC
23217 ENDPROC(call_rwsem_down_write_failed)
6e9df6a3 23218@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
15a11c5b
MT
23219 movq %rax,%rdi
23220 call rwsem_wake
23221 restore_common_regs
6e9df6a3
MT
23222-1: ret
23223+1: pax_force_retaddr
23224+ ret
15a11c5b
MT
23225 CFI_ENDPROC
23226 ENDPROC(call_rwsem_wake)
6e9df6a3
MT
23227
23228@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
23229 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
23230 CFI_RESTORE __ASM_REG(dx)
15a11c5b 23231 restore_common_regs
6e9df6a3 23232+ pax_force_retaddr
15a11c5b
MT
23233 ret
23234 CFI_ENDPROC
23235 ENDPROC(call_rwsem_downgrade_wake)
fe2de317
MT
23236diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
23237index a63efd6..ccecad8 100644
23238--- a/arch/x86/lib/thunk_64.S
23239+++ b/arch/x86/lib/thunk_64.S
6e9df6a3
MT
23240@@ -8,6 +8,7 @@
23241 #include <linux/linkage.h>
23242 #include <asm/dwarf2.h>
23243 #include <asm/calling.h>
23244+#include <asm/alternative-asm.h>
23245
23246 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
23247 .macro THUNK name, func, put_ret_addr_in_rdi=0
23248@@ -41,5 +42,6 @@
15a11c5b
MT
23249 SAVE_ARGS
23250 restore:
23251 RESTORE_ARGS
6e9df6a3
MT
23252+ pax_force_retaddr
23253 ret
15a11c5b 23254 CFI_ENDPROC
fe2de317 23255diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
4c928ab7 23256index e218d5d..a99a1eb 100644
fe2de317
MT
23257--- a/arch/x86/lib/usercopy_32.c
23258+++ b/arch/x86/lib/usercopy_32.c
bc901d79
MT
23259@@ -43,7 +43,7 @@ do { \
23260 __asm__ __volatile__( \
23261 " testl %1,%1\n" \
23262 " jz 2f\n" \
58c5fc13 23263- "0: lodsb\n" \
16454cff 23264+ "0: "__copyuser_seg"lodsb\n" \
bc901d79
MT
23265 " stosb\n" \
23266 " testb %%al,%%al\n" \
23267 " jz 1f\n" \
23268@@ -128,10 +128,12 @@ do { \
23269 int __d0; \
23270 might_fault(); \
23271 __asm__ __volatile__( \
23272+ __COPYUSER_SET_ES \
23273 "0: rep; stosl\n" \
23274 " movl %2,%0\n" \
23275 "1: rep; stosb\n" \
23276 "2:\n" \
23277+ __COPYUSER_RESTORE_ES \
23278 ".section .fixup,\"ax\"\n" \
23279 "3: lea 0(%2,%0,4),%0\n" \
23280 " jmp 2b\n" \
fe2de317 23281@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
58c5fc13
MT
23282 might_fault();
23283
23284 __asm__ __volatile__(
bc901d79 23285+ __COPYUSER_SET_ES
58c5fc13
MT
23286 " testl %0, %0\n"
23287 " jz 3f\n"
bc901d79 23288 " andl %0,%%ecx\n"
fe2de317 23289@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
58c5fc13
MT
23290 " subl %%ecx,%0\n"
23291 " addl %0,%%eax\n"
23292 "1:\n"
bc901d79 23293+ __COPYUSER_RESTORE_ES
58c5fc13
MT
23294 ".section .fixup,\"ax\"\n"
23295 "2: xorl %%eax,%%eax\n"
23296 " jmp 1b\n"
bc901d79 23297@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
58c5fc13
MT
23298
23299 #ifdef CONFIG_X86_INTEL_USERCOPY
23300 static unsigned long
23301-__copy_user_intel(void __user *to, const void *from, unsigned long size)
23302+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
bc901d79
MT
23303 {
23304 int d0, d1;
23305 __asm__ __volatile__(
fe2de317 23306@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
bc901d79
MT
23307 " .align 2,0x90\n"
23308 "3: movl 0(%4), %%eax\n"
23309 "4: movl 4(%4), %%edx\n"
23310- "5: movl %%eax, 0(%3)\n"
23311- "6: movl %%edx, 4(%3)\n"
16454cff
MT
23312+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
23313+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
bc901d79
MT
23314 "7: movl 8(%4), %%eax\n"
23315 "8: movl 12(%4),%%edx\n"
23316- "9: movl %%eax, 8(%3)\n"
23317- "10: movl %%edx, 12(%3)\n"
16454cff
MT
23318+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
23319+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
bc901d79
MT
23320 "11: movl 16(%4), %%eax\n"
23321 "12: movl 20(%4), %%edx\n"
23322- "13: movl %%eax, 16(%3)\n"
23323- "14: movl %%edx, 20(%3)\n"
16454cff
MT
23324+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
23325+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
bc901d79
MT
23326 "15: movl 24(%4), %%eax\n"
23327 "16: movl 28(%4), %%edx\n"
23328- "17: movl %%eax, 24(%3)\n"
23329- "18: movl %%edx, 28(%3)\n"
16454cff
MT
23330+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
23331+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
bc901d79
MT
23332 "19: movl 32(%4), %%eax\n"
23333 "20: movl 36(%4), %%edx\n"
23334- "21: movl %%eax, 32(%3)\n"
23335- "22: movl %%edx, 36(%3)\n"
16454cff
MT
23336+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
23337+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
bc901d79
MT
23338 "23: movl 40(%4), %%eax\n"
23339 "24: movl 44(%4), %%edx\n"
23340- "25: movl %%eax, 40(%3)\n"
23341- "26: movl %%edx, 44(%3)\n"
16454cff
MT
23342+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
23343+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
bc901d79
MT
23344 "27: movl 48(%4), %%eax\n"
23345 "28: movl 52(%4), %%edx\n"
23346- "29: movl %%eax, 48(%3)\n"
23347- "30: movl %%edx, 52(%3)\n"
16454cff
MT
23348+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
23349+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
bc901d79
MT
23350 "31: movl 56(%4), %%eax\n"
23351 "32: movl 60(%4), %%edx\n"
23352- "33: movl %%eax, 56(%3)\n"
23353- "34: movl %%edx, 60(%3)\n"
16454cff
MT
23354+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
23355+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
bc901d79
MT
23356 " addl $-64, %0\n"
23357 " addl $64, %4\n"
23358 " addl $64, %3\n"
4c928ab7 23359@@ -278,10 +282,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
bc901d79
MT
23360 " shrl $2, %0\n"
23361 " andl $3, %%eax\n"
23362 " cld\n"
23363+ __COPYUSER_SET_ES
23364 "99: rep; movsl\n"
23365 "36: movl %%eax, %0\n"
23366 "37: rep; movsb\n"
23367 "100:\n"
23368+ __COPYUSER_RESTORE_ES
4c928ab7
MT
23369 ".section .fixup,\"ax\"\n"
23370 "101: lea 0(%%eax,%0,4),%0\n"
23371 " jmp 100b\n"
23372@@ -334,46 +340,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23373 }
23374
23375 static unsigned long
58c5fc13 23376+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
bc901d79
MT
23377+{
23378+ int d0, d1;
23379+ __asm__ __volatile__(
23380+ " .align 2,0x90\n"
16454cff 23381+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
bc901d79
MT
23382+ " cmpl $67, %0\n"
23383+ " jbe 3f\n"
16454cff 23384+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
bc901d79 23385+ " .align 2,0x90\n"
16454cff
MT
23386+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23387+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
bc901d79
MT
23388+ "5: movl %%eax, 0(%3)\n"
23389+ "6: movl %%edx, 4(%3)\n"
16454cff
MT
23390+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23391+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
bc901d79
MT
23392+ "9: movl %%eax, 8(%3)\n"
23393+ "10: movl %%edx, 12(%3)\n"
16454cff
MT
23394+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23395+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
bc901d79
MT
23396+ "13: movl %%eax, 16(%3)\n"
23397+ "14: movl %%edx, 20(%3)\n"
16454cff
MT
23398+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23399+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
bc901d79
MT
23400+ "17: movl %%eax, 24(%3)\n"
23401+ "18: movl %%edx, 28(%3)\n"
16454cff
MT
23402+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23403+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
bc901d79
MT
23404+ "21: movl %%eax, 32(%3)\n"
23405+ "22: movl %%edx, 36(%3)\n"
16454cff
MT
23406+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23407+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
bc901d79
MT
23408+ "25: movl %%eax, 40(%3)\n"
23409+ "26: movl %%edx, 44(%3)\n"
16454cff
MT
23410+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23411+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
bc901d79
MT
23412+ "29: movl %%eax, 48(%3)\n"
23413+ "30: movl %%edx, 52(%3)\n"
16454cff
MT
23414+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23415+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
bc901d79
MT
23416+ "33: movl %%eax, 56(%3)\n"
23417+ "34: movl %%edx, 60(%3)\n"
23418+ " addl $-64, %0\n"
23419+ " addl $64, %4\n"
23420+ " addl $64, %3\n"
23421+ " cmpl $63, %0\n"
23422+ " ja 1b\n"
23423+ "35: movl %0, %%eax\n"
23424+ " shrl $2, %0\n"
23425+ " andl $3, %%eax\n"
23426+ " cld\n"
16454cff 23427+ "99: rep; "__copyuser_seg" movsl\n"
bc901d79 23428+ "36: movl %%eax, %0\n"
16454cff 23429+ "37: rep; "__copyuser_seg" movsb\n"
bc901d79 23430+ "100:\n"
4c928ab7
MT
23431+ ".section .fixup,\"ax\"\n"
23432+ "101: lea 0(%%eax,%0,4),%0\n"
23433+ " jmp 100b\n"
23434+ ".previous\n"
23435+ ".section __ex_table,\"a\"\n"
23436+ " .align 4\n"
23437+ " .long 1b,100b\n"
23438+ " .long 2b,100b\n"
23439+ " .long 3b,100b\n"
23440+ " .long 4b,100b\n"
23441+ " .long 5b,100b\n"
23442+ " .long 6b,100b\n"
23443+ " .long 7b,100b\n"
23444+ " .long 8b,100b\n"
23445+ " .long 9b,100b\n"
23446+ " .long 10b,100b\n"
23447+ " .long 11b,100b\n"
23448+ " .long 12b,100b\n"
23449+ " .long 13b,100b\n"
23450+ " .long 14b,100b\n"
23451+ " .long 15b,100b\n"
23452+ " .long 16b,100b\n"
23453+ " .long 17b,100b\n"
23454+ " .long 18b,100b\n"
23455+ " .long 19b,100b\n"
23456+ " .long 20b,100b\n"
23457+ " .long 21b,100b\n"
23458+ " .long 22b,100b\n"
23459+ " .long 23b,100b\n"
23460+ " .long 24b,100b\n"
23461+ " .long 25b,100b\n"
23462+ " .long 26b,100b\n"
23463+ " .long 27b,100b\n"
23464+ " .long 28b,100b\n"
23465+ " .long 29b,100b\n"
23466+ " .long 30b,100b\n"
23467+ " .long 31b,100b\n"
23468+ " .long 32b,100b\n"
23469+ " .long 33b,100b\n"
23470+ " .long 34b,100b\n"
23471+ " .long 35b,100b\n"
23472+ " .long 36b,100b\n"
23473+ " .long 37b,100b\n"
23474+ " .long 99b,101b\n"
23475+ ".previous"
23476+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
23477+ : "1"(to), "2"(from), "0"(size)
23478+ : "eax", "edx", "memory");
23479+ return size;
23480+}
23481+
23482+static unsigned long
23483+__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
23484+static unsigned long
23485 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23486 {
58c5fc13
MT
23487 int d0, d1;
23488 __asm__ __volatile__(
58c5fc13 23489 " .align 2,0x90\n"
bc901d79 23490- "0: movl 32(%4), %%eax\n"
16454cff 23491+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
58c5fc13 23492 " cmpl $67, %0\n"
bc901d79
MT
23493 " jbe 2f\n"
23494- "1: movl 64(%4), %%eax\n"
16454cff 23495+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
58c5fc13 23496 " .align 2,0x90\n"
bc901d79
MT
23497- "2: movl 0(%4), %%eax\n"
23498- "21: movl 4(%4), %%edx\n"
16454cff
MT
23499+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23500+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
bc901d79
MT
23501 " movl %%eax, 0(%3)\n"
23502 " movl %%edx, 4(%3)\n"
23503- "3: movl 8(%4), %%eax\n"
23504- "31: movl 12(%4),%%edx\n"
16454cff
MT
23505+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23506+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
bc901d79
MT
23507 " movl %%eax, 8(%3)\n"
23508 " movl %%edx, 12(%3)\n"
23509- "4: movl 16(%4), %%eax\n"
23510- "41: movl 20(%4), %%edx\n"
16454cff
MT
23511+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23512+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
bc901d79
MT
23513 " movl %%eax, 16(%3)\n"
23514 " movl %%edx, 20(%3)\n"
23515- "10: movl 24(%4), %%eax\n"
23516- "51: movl 28(%4), %%edx\n"
16454cff
MT
23517+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23518+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
bc901d79
MT
23519 " movl %%eax, 24(%3)\n"
23520 " movl %%edx, 28(%3)\n"
23521- "11: movl 32(%4), %%eax\n"
23522- "61: movl 36(%4), %%edx\n"
16454cff
MT
23523+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23524+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
bc901d79
MT
23525 " movl %%eax, 32(%3)\n"
23526 " movl %%edx, 36(%3)\n"
23527- "12: movl 40(%4), %%eax\n"
23528- "71: movl 44(%4), %%edx\n"
16454cff
MT
23529+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23530+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
bc901d79
MT
23531 " movl %%eax, 40(%3)\n"
23532 " movl %%edx, 44(%3)\n"
23533- "13: movl 48(%4), %%eax\n"
23534- "81: movl 52(%4), %%edx\n"
16454cff
MT
23535+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23536+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
bc901d79
MT
23537 " movl %%eax, 48(%3)\n"
23538 " movl %%edx, 52(%3)\n"
23539- "14: movl 56(%4), %%eax\n"
23540- "91: movl 60(%4), %%edx\n"
16454cff
MT
23541+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23542+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
bc901d79
MT
23543 " movl %%eax, 56(%3)\n"
23544 " movl %%edx, 60(%3)\n"
58c5fc13 23545 " addl $-64, %0\n"
4c928ab7 23546@@ -385,9 +500,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
bc901d79
MT
23547 " shrl $2, %0\n"
23548 " andl $3, %%eax\n"
23549 " cld\n"
23550- "6: rep; movsl\n"
16454cff 23551+ "6: rep; "__copyuser_seg" movsl\n"
58c5fc13 23552 " movl %%eax,%0\n"
bc901d79 23553- "7: rep; movsb\n"
16454cff 23554+ "7: rep; "__copyuser_seg" movsb\n"
58c5fc13 23555 "8:\n"
58c5fc13
MT
23556 ".section .fixup,\"ax\"\n"
23557 "9: lea 0(%%eax,%0,4),%0\n"
4c928ab7
MT
23558@@ -434,47 +549,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23559 */
23560
23561 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23562+ const void __user *from, unsigned long size) __size_overflow(3);
23563+static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23564 const void __user *from, unsigned long size)
23565 {
23566 int d0, d1;
58c5fc13
MT
23567
23568 __asm__ __volatile__(
58c5fc13 23569 " .align 2,0x90\n"
bc901d79 23570- "0: movl 32(%4), %%eax\n"
16454cff 23571+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
58c5fc13 23572 " cmpl $67, %0\n"
bc901d79
MT
23573 " jbe 2f\n"
23574- "1: movl 64(%4), %%eax\n"
16454cff 23575+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
58c5fc13 23576 " .align 2,0x90\n"
bc901d79
MT
23577- "2: movl 0(%4), %%eax\n"
23578- "21: movl 4(%4), %%edx\n"
16454cff
MT
23579+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23580+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
bc901d79
MT
23581 " movnti %%eax, 0(%3)\n"
23582 " movnti %%edx, 4(%3)\n"
23583- "3: movl 8(%4), %%eax\n"
23584- "31: movl 12(%4),%%edx\n"
16454cff
MT
23585+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23586+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
bc901d79
MT
23587 " movnti %%eax, 8(%3)\n"
23588 " movnti %%edx, 12(%3)\n"
23589- "4: movl 16(%4), %%eax\n"
23590- "41: movl 20(%4), %%edx\n"
16454cff
MT
23591+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23592+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
bc901d79
MT
23593 " movnti %%eax, 16(%3)\n"
23594 " movnti %%edx, 20(%3)\n"
23595- "10: movl 24(%4), %%eax\n"
23596- "51: movl 28(%4), %%edx\n"
16454cff
MT
23597+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23598+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
bc901d79
MT
23599 " movnti %%eax, 24(%3)\n"
23600 " movnti %%edx, 28(%3)\n"
23601- "11: movl 32(%4), %%eax\n"
23602- "61: movl 36(%4), %%edx\n"
16454cff
MT
23603+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23604+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
bc901d79
MT
23605 " movnti %%eax, 32(%3)\n"
23606 " movnti %%edx, 36(%3)\n"
23607- "12: movl 40(%4), %%eax\n"
23608- "71: movl 44(%4), %%edx\n"
16454cff
MT
23609+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23610+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
bc901d79
MT
23611 " movnti %%eax, 40(%3)\n"
23612 " movnti %%edx, 44(%3)\n"
23613- "13: movl 48(%4), %%eax\n"
23614- "81: movl 52(%4), %%edx\n"
16454cff
MT
23615+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23616+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
bc901d79
MT
23617 " movnti %%eax, 48(%3)\n"
23618 " movnti %%edx, 52(%3)\n"
23619- "14: movl 56(%4), %%eax\n"
23620- "91: movl 60(%4), %%edx\n"
16454cff
MT
23621+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23622+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
bc901d79
MT
23623 " movnti %%eax, 56(%3)\n"
23624 " movnti %%edx, 60(%3)\n"
58c5fc13 23625 " addl $-64, %0\n"
4c928ab7 23626@@ -487,9 +604,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
bc901d79
MT
23627 " shrl $2, %0\n"
23628 " andl $3, %%eax\n"
23629 " cld\n"
23630- "6: rep; movsl\n"
16454cff 23631+ "6: rep; "__copyuser_seg" movsl\n"
58c5fc13 23632 " movl %%eax,%0\n"
bc901d79 23633- "7: rep; movsb\n"
16454cff 23634+ "7: rep; "__copyuser_seg" movsb\n"
58c5fc13 23635 "8:\n"
58c5fc13
MT
23636 ".section .fixup,\"ax\"\n"
23637 "9: lea 0(%%eax,%0,4),%0\n"
4c928ab7
MT
23638@@ -531,47 +648,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23639 }
23640
23641 static unsigned long __copy_user_intel_nocache(void *to,
23642+ const void __user *from, unsigned long size) __size_overflow(3);
23643+static unsigned long __copy_user_intel_nocache(void *to,
23644 const void __user *from, unsigned long size)
23645 {
23646 int d0, d1;
58c5fc13
MT
23647
23648 __asm__ __volatile__(
58c5fc13 23649 " .align 2,0x90\n"
bc901d79 23650- "0: movl 32(%4), %%eax\n"
16454cff 23651+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
58c5fc13 23652 " cmpl $67, %0\n"
bc901d79
MT
23653 " jbe 2f\n"
23654- "1: movl 64(%4), %%eax\n"
16454cff 23655+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
58c5fc13 23656 " .align 2,0x90\n"
bc901d79
MT
23657- "2: movl 0(%4), %%eax\n"
23658- "21: movl 4(%4), %%edx\n"
16454cff
MT
23659+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23660+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
bc901d79
MT
23661 " movnti %%eax, 0(%3)\n"
23662 " movnti %%edx, 4(%3)\n"
23663- "3: movl 8(%4), %%eax\n"
23664- "31: movl 12(%4),%%edx\n"
16454cff
MT
23665+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23666+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
bc901d79
MT
23667 " movnti %%eax, 8(%3)\n"
23668 " movnti %%edx, 12(%3)\n"
23669- "4: movl 16(%4), %%eax\n"
23670- "41: movl 20(%4), %%edx\n"
16454cff
MT
23671+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23672+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
bc901d79
MT
23673 " movnti %%eax, 16(%3)\n"
23674 " movnti %%edx, 20(%3)\n"
23675- "10: movl 24(%4), %%eax\n"
23676- "51: movl 28(%4), %%edx\n"
16454cff
MT
23677+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23678+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
bc901d79
MT
23679 " movnti %%eax, 24(%3)\n"
23680 " movnti %%edx, 28(%3)\n"
23681- "11: movl 32(%4), %%eax\n"
23682- "61: movl 36(%4), %%edx\n"
16454cff
MT
23683+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23684+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
bc901d79
MT
23685 " movnti %%eax, 32(%3)\n"
23686 " movnti %%edx, 36(%3)\n"
23687- "12: movl 40(%4), %%eax\n"
23688- "71: movl 44(%4), %%edx\n"
16454cff
MT
23689+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23690+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
bc901d79
MT
23691 " movnti %%eax, 40(%3)\n"
23692 " movnti %%edx, 44(%3)\n"
23693- "13: movl 48(%4), %%eax\n"
23694- "81: movl 52(%4), %%edx\n"
16454cff
MT
23695+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23696+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
bc901d79
MT
23697 " movnti %%eax, 48(%3)\n"
23698 " movnti %%edx, 52(%3)\n"
23699- "14: movl 56(%4), %%eax\n"
23700- "91: movl 60(%4), %%edx\n"
16454cff
MT
23701+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23702+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
bc901d79
MT
23703 " movnti %%eax, 56(%3)\n"
23704 " movnti %%edx, 60(%3)\n"
58c5fc13 23705 " addl $-64, %0\n"
4c928ab7 23706@@ -584,9 +703,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
bc901d79
MT
23707 " shrl $2, %0\n"
23708 " andl $3, %%eax\n"
23709 " cld\n"
23710- "6: rep; movsl\n"
16454cff 23711+ "6: rep; "__copyuser_seg" movsl\n"
58c5fc13 23712 " movl %%eax,%0\n"
bc901d79 23713- "7: rep; movsb\n"
16454cff 23714+ "7: rep; "__copyuser_seg" movsb\n"
58c5fc13 23715 "8:\n"
58c5fc13
MT
23716 ".section .fixup,\"ax\"\n"
23717 "9: lea 0(%%eax,%0,4),%0\n"
4c928ab7 23718@@ -629,32 +748,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
58c5fc13
MT
23719 */
23720 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23721 unsigned long size);
23722-unsigned long __copy_user_intel(void __user *to, const void *from,
23723+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23724+ unsigned long size);
23725+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23726 unsigned long size);
23727 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23728 const void __user *from, unsigned long size);
23729 #endif /* CONFIG_X86_INTEL_USERCOPY */
23730
23731 /* Generic arbitrary sized copy. */
23732-#define __copy_user(to, from, size) \
bc901d79
MT
23733+#define __copy_user(to, from, size, prefix, set, restore) \
23734 do { \
23735 int __d0, __d1, __d2; \
23736 __asm__ __volatile__( \
23737+ set \
23738 " cmp $7,%0\n" \
23739 " jbe 1f\n" \
23740 " movl %1,%0\n" \
23741 " negl %0\n" \
23742 " andl $7,%0\n" \
23743 " subl %0,%3\n" \
58c5fc13 23744- "4: rep; movsb\n" \
16454cff 23745+ "4: rep; "prefix"movsb\n" \
bc901d79
MT
23746 " movl %3,%0\n" \
23747 " shrl $2,%0\n" \
23748 " andl $3,%3\n" \
23749 " .align 2,0x90\n" \
58c5fc13 23750- "0: rep; movsl\n" \
16454cff 23751+ "0: rep; "prefix"movsl\n" \
bc901d79 23752 " movl %3,%0\n" \
58c5fc13 23753- "1: rep; movsb\n" \
16454cff 23754+ "1: rep; "prefix"movsb\n" \
bc901d79
MT
23755 "2:\n" \
23756+ restore \
23757 ".section .fixup,\"ax\"\n" \
23758 "5: addl %3,%0\n" \
23759 " jmp 2b\n" \
4c928ab7 23760@@ -682,14 +805,14 @@ do { \
bc901d79
MT
23761 " negl %0\n" \
23762 " andl $7,%0\n" \
23763 " subl %0,%3\n" \
58c5fc13 23764- "4: rep; movsb\n" \
16454cff 23765+ "4: rep; "__copyuser_seg"movsb\n" \
bc901d79
MT
23766 " movl %3,%0\n" \
23767 " shrl $2,%0\n" \
23768 " andl $3,%3\n" \
23769 " .align 2,0x90\n" \
58c5fc13 23770- "0: rep; movsl\n" \
16454cff 23771+ "0: rep; "__copyuser_seg"movsl\n" \
bc901d79 23772 " movl %3,%0\n" \
58c5fc13 23773- "1: rep; movsb\n" \
16454cff 23774+ "1: rep; "__copyuser_seg"movsb\n" \
bc901d79
MT
23775 "2:\n" \
23776 ".section .fixup,\"ax\"\n" \
23777 "5: addl %3,%0\n" \
4c928ab7 23778@@ -775,9 +898,9 @@ survive:
58c5fc13
MT
23779 }
23780 #endif
23781 if (movsl_is_ok(to, from, n))
23782- __copy_user(to, from, n);
bc901d79 23783+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
58c5fc13
MT
23784 else
23785- n = __copy_user_intel(to, from, n);
23786+ n = __generic_copy_to_user_intel(to, from, n);
23787 return n;
23788 }
23789 EXPORT_SYMBOL(__copy_to_user_ll);
4c928ab7 23790@@ -797,10 +920,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
58c5fc13
MT
23791 unsigned long n)
23792 {
23793 if (movsl_is_ok(to, from, n))
23794- __copy_user(to, from, n);
bc901d79 23795+ __copy_user(to, from, n, __copyuser_seg, "", "");
58c5fc13
MT
23796 else
23797- n = __copy_user_intel((void __user *)to,
23798- (const void *)from, n);
23799+ n = __generic_copy_from_user_intel(to, from, n);
23800 return n;
23801 }
23802 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
4c928ab7 23803@@ -827,65 +949,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
58c5fc13
MT
23804 if (n > 64 && cpu_has_xmm2)
23805 n = __copy_user_intel_nocache(to, from, n);
23806 else
23807- __copy_user(to, from, n);
bc901d79 23808+ __copy_user(to, from, n, __copyuser_seg, "", "");
58c5fc13
MT
23809 #else
23810- __copy_user(to, from, n);
bc901d79 23811+ __copy_user(to, from, n, __copyuser_seg, "", "");
58c5fc13
MT
23812 #endif
23813 return n;
23814 }
23815 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23816
23817-/**
23818- * copy_to_user: - Copy a block of data into user space.
23819- * @to: Destination address, in user space.
23820- * @from: Source address, in kernel space.
23821- * @n: Number of bytes to copy.
23822- *
23823- * Context: User context only. This function may sleep.
23824- *
23825- * Copy data from kernel space to user space.
23826- *
23827- * Returns number of bytes that could not be copied.
23828- * On success, this will be zero.
23829- */
23830-unsigned long
23831-copy_to_user(void __user *to, const void *from, unsigned long n)
fe2de317 23832-{
58c5fc13
MT
23833- if (access_ok(VERIFY_WRITE, to, n))
23834- n = __copy_to_user(to, from, n);
23835- return n;
fe2de317 23836-}
58c5fc13 23837-EXPORT_SYMBOL(copy_to_user);
fe2de317 23838-
58c5fc13
MT
23839-/**
23840- * copy_from_user: - Copy a block of data from user space.
23841- * @to: Destination address, in kernel space.
23842- * @from: Source address, in user space.
23843- * @n: Number of bytes to copy.
23844- *
23845- * Context: User context only. This function may sleep.
23846- *
23847- * Copy data from user space to kernel space.
23848- *
23849- * Returns number of bytes that could not be copied.
23850- * On success, this will be zero.
23851- *
23852- * If some data could not be copied, this function will pad the copied
23853- * data to the requested size using zero bytes.
23854- */
23855-unsigned long
ae4e228f 23856-_copy_from_user(void *to, const void __user *from, unsigned long n)
fe2de317 23857-{
58c5fc13
MT
23858- if (access_ok(VERIFY_READ, from, n))
23859- n = __copy_from_user(to, from, n);
23860- else
23861- memset(to, 0, n);
23862- return n;
fe2de317 23863-}
ae4e228f 23864-EXPORT_SYMBOL(_copy_from_user);
fe2de317
MT
23865-
23866 void copy_from_user_overflow(void)
23867 {
23868 WARN(1, "Buffer overflow detected!\n");
23869 }
23870 EXPORT_SYMBOL(copy_from_user_overflow);
23871+
23872+void copy_to_user_overflow(void)
23873+{
23874+ WARN(1, "Buffer overflow detected!\n");
23875+}
ae4e228f 23876+EXPORT_SYMBOL(copy_to_user_overflow);
fe2de317 23877+
ae4e228f 23878+#ifdef CONFIG_PAX_MEMORY_UDEREF
bc901d79 23879+void __set_fs(mm_segment_t x)
fe2de317 23880+{
bc901d79
MT
23881+ switch (x.seg) {
23882+ case 0:
23883+ loadsegment(gs, 0);
23884+ break;
23885+ case TASK_SIZE_MAX:
23886+ loadsegment(gs, __USER_DS);
23887+ break;
23888+ case -1UL:
23889+ loadsegment(gs, __KERNEL_DS);
23890+ break;
23891+ default:
23892+ BUG();
23893+ }
23894+ return;
fe2de317 23895+}
71d190be 23896+EXPORT_SYMBOL(__set_fs);
ae4e228f
MT
23897+
23898+void set_fs(mm_segment_t x)
23899+{
58c5fc13 23900+ current_thread_info()->addr_limit = x;
bc901d79 23901+ __set_fs(x);
58c5fc13 23902+}
58c5fc13 23903+EXPORT_SYMBOL(set_fs);
bc901d79 23904+#endif
fe2de317
MT
23905diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23906index b7c2849..8633ad8 100644
23907--- a/arch/x86/lib/usercopy_64.c
23908+++ b/arch/x86/lib/usercopy_64.c
8308f9c9 23909@@ -42,6 +42,12 @@ long
df50ba0c
MT
23910 __strncpy_from_user(char *dst, const char __user *src, long count)
23911 {
23912 long res;
8308f9c9
MT
23913+
23914+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
23915+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23916+ src += PAX_USER_SHADOW_BASE;
8308f9c9
MT
23917+#endif
23918+
df50ba0c
MT
23919 __do_strncpy_from_user(dst, src, count, res);
23920 return res;
23921 }
fe2de317 23922@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
df50ba0c
MT
23923 {
23924 long __d0;
23925 might_fault();
8308f9c9
MT
23926+
23927+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
23928+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23929+ addr += PAX_USER_SHADOW_BASE;
8308f9c9
MT
23930+#endif
23931+
df50ba0c
MT
23932 /* no memory constraint because it doesn't change any memory gcc knows
23933 about */
23934 asm volatile(
fe2de317
MT
23935@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
23936 }
23937 EXPORT_SYMBOL(strlen_user);
df50ba0c 23938
fe2de317
MT
23939-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23940+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
df50ba0c
MT
23941 {
23942- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
6e9df6a3
MT
23943- return copy_user_generic((__force void *)to, (__force void *)from, len);
23944- }
23945- return len;
df50ba0c 23946+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
8308f9c9
MT
23947+
23948+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
23949+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23950+ to += PAX_USER_SHADOW_BASE;
23951+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23952+ from += PAX_USER_SHADOW_BASE;
8308f9c9
MT
23953+#endif
23954+
6e9df6a3 23955+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
df50ba0c
MT
23956+ }
23957+ return len;
23958 }
23959 EXPORT_SYMBOL(copy_in_user);
23960
6e9df6a3
MT
23961@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
23962 * it is not necessary to optimize tail handling.
23963 */
23964 unsigned long
23965-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
fe2de317 23966+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
6e9df6a3
MT
23967 {
23968 char c;
23969 unsigned zero_len;
fe2de317 23970diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
5e856224 23971index 1fb85db..8b3540b 100644
fe2de317
MT
23972--- a/arch/x86/mm/extable.c
23973+++ b/arch/x86/mm/extable.c
23974@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
58c5fc13
MT
23975 const struct exception_table_entry *fixup;
23976
23977 #ifdef CONFIG_PNPBIOS
23978- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23979+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23980 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23981 extern u32 pnp_bios_is_utter_crap;
23982 pnp_bios_is_utter_crap = 1;
fe2de317 23983diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
5e856224 23984index f0b4caf..d92fd42 100644
fe2de317
MT
23985--- a/arch/x86/mm/fault.c
23986+++ b/arch/x86/mm/fault.c
6e9df6a3 23987@@ -13,11 +13,18 @@
ae4e228f 23988 #include <linux/perf_event.h> /* perf_sw_event */
bc901d79 23989 #include <linux/hugetlb.h> /* hstate_index_to_shift */
15a11c5b 23990 #include <linux/prefetch.h> /* prefetchw */
58c5fc13
MT
23991+#include <linux/unistd.h>
23992+#include <linux/compiler.h>
23993
23994 #include <asm/traps.h> /* dotraplinkage, ... */
23995 #include <asm/pgalloc.h> /* pgd_*(), ... */
23996 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
4c928ab7 23997 #include <asm/fixmap.h> /* VSYSCALL_START */
58c5fc13 23998+#include <asm/tlbflush.h>
df50ba0c
MT
23999+
24000+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24001+#include <asm/stacktrace.h>
df50ba0c 24002+#endif
58c5fc13
MT
24003
24004 /*
24005 * Page fault error code bits:
fe2de317 24006@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
58c5fc13
MT
24007 int ret = 0;
24008
24009 /* kprobe_running() needs smp_processor_id() */
24010- if (kprobes_built_in() && !user_mode_vm(regs)) {
24011+ if (kprobes_built_in() && !user_mode(regs)) {
24012 preempt_disable();
24013 if (kprobe_running() && kprobe_fault_handler(regs, 14))
24014 ret = 1;
fe2de317 24015@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
bc901d79
MT
24016 return !instr_lo || (instr_lo>>1) == 1;
24017 case 0x00:
24018 /* Prefetch instruction is 0x0F0D or 0x0F18 */
24019- if (probe_kernel_address(instr, opcode))
24020+ if (user_mode(regs)) {
6e9df6a3 24021+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
bc901d79
MT
24022+ return 0;
24023+ } else if (probe_kernel_address(instr, opcode))
24024 return 0;
24025
24026 *prefetch = (instr_lo == 0xF) &&
fe2de317 24027@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
bc901d79
MT
24028 while (instr < max_instr) {
24029 unsigned char opcode;
24030
24031- if (probe_kernel_address(instr, opcode))
24032+ if (user_mode(regs)) {
6e9df6a3 24033+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
bc901d79
MT
24034+ break;
24035+ } else if (probe_kernel_address(instr, opcode))
24036 break;
24037
24038 instr++;
fe2de317 24039@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
58c5fc13
MT
24040 force_sig_info(si_signo, &info, tsk);
24041 }
24042
6e9df6a3
MT
24043+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24044+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
24045+#endif
24046+
58c5fc13
MT
24047+#ifdef CONFIG_PAX_EMUTRAMP
24048+static int pax_handle_fetch_fault(struct pt_regs *regs);
24049+#endif
24050+
24051+#ifdef CONFIG_PAX_PAGEEXEC
24052+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
24053+{
24054+ pgd_t *pgd;
24055+ pud_t *pud;
24056+ pmd_t *pmd;
24057+
24058+ pgd = pgd_offset(mm, address);
24059+ if (!pgd_present(*pgd))
24060+ return NULL;
24061+ pud = pud_offset(pgd, address);
24062+ if (!pud_present(*pud))
24063+ return NULL;
24064+ pmd = pmd_offset(pud, address);
24065+ if (!pmd_present(*pmd))
24066+ return NULL;
24067+ return pmd;
24068+}
24069+#endif
24070+
24071 DEFINE_SPINLOCK(pgd_lock);
24072 LIST_HEAD(pgd_list);
24073
6e9df6a3 24074@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
16454cff
MT
24075 for (address = VMALLOC_START & PMD_MASK;
24076 address >= TASK_SIZE && address < FIXADDR_TOP;
df50ba0c 24077 address += PMD_SIZE) {
df50ba0c
MT
24078+
24079+#ifdef CONFIG_PAX_PER_CPU_PGD
24080+ unsigned long cpu;
24081+#else
24082 struct page *page;
24083+#endif
24084
16454cff 24085 spin_lock(&pgd_lock);
df50ba0c
MT
24086+
24087+#ifdef CONFIG_PAX_PER_CPU_PGD
4c928ab7 24088+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
df50ba0c 24089+ pgd_t *pgd = get_cpu_pgd(cpu);
bc901d79 24090+ pmd_t *ret;
df50ba0c
MT
24091+#else
24092 list_for_each_entry(page, &pgd_list, lru) {
df50ba0c 24093+ pgd_t *pgd = page_address(page);
bc901d79
MT
24094 spinlock_t *pgt_lock;
24095 pmd_t *ret;
24096
6e9df6a3 24097@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
bc901d79
MT
24098 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
24099
24100 spin_lock(pgt_lock);
24101- ret = vmalloc_sync_one(page_address(page), address);
df50ba0c
MT
24102+#endif
24103+
bc901d79
MT
24104+ ret = vmalloc_sync_one(pgd, address);
24105+
24106+#ifndef CONFIG_PAX_PER_CPU_PGD
24107 spin_unlock(pgt_lock);
24108+#endif
24109
24110 if (!ret)
df50ba0c 24111 break;
fe2de317 24112@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
df50ba0c
MT
24113 * an interrupt in the middle of a task switch..
24114 */
24115 pgd_paddr = read_cr3();
24116+
24117+#ifdef CONFIG_PAX_PER_CPU_PGD
24118+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
24119+#endif
24120+
24121 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
24122 if (!pmd_k)
24123 return -1;
fe2de317 24124@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
df50ba0c
MT
24125 * happen within a race in page table update. In the later
24126 * case just flush:
24127 */
24128+
24129+#ifdef CONFIG_PAX_PER_CPU_PGD
24130+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
24131+ pgd = pgd_offset_cpu(smp_processor_id(), address);
24132+#else
24133 pgd = pgd_offset(current->active_mm, address);
24134+#endif
24135+
24136 pgd_ref = pgd_offset_k(address);
24137 if (pgd_none(*pgd_ref))
24138 return -1;
4c928ab7 24139@@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
58c5fc13
MT
24140 static int is_errata100(struct pt_regs *regs, unsigned long address)
24141 {
24142 #ifdef CONFIG_X86_64
24143- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
24144+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
24145 return 1;
24146 #endif
24147 return 0;
4c928ab7 24148@@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
58c5fc13
MT
24149 }
24150
24151 static const char nx_warning[] = KERN_CRIT
24152-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
24153+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
24154
24155 static void
24156 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
4c928ab7 24157@@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
58c5fc13
MT
24158 if (!oops_may_print())
24159 return;
24160
24161- if (error_code & PF_INSTR) {
ae4e228f 24162+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
58c5fc13
MT
24163 unsigned int level;
24164
24165 pte_t *pte = lookup_address(address, &level);
24166
24167 if (pte && pte_present(*pte) && !pte_exec(*pte))
24168- printk(nx_warning, current_uid());
24169+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
fe2de317
MT
24170 }
24171
58c5fc13 24172+#ifdef CONFIG_PAX_KERNEXEC
ae4e228f 24173+ if (init_mm.start_code <= address && address < init_mm.end_code) {
58c5fc13 24174+ if (current->signal->curr_ip)
ae4e228f
MT
24175+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24176+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
58c5fc13
MT
24177+ else
24178+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24179+ current->comm, task_pid_nr(current), current_uid(), current_euid());
fe2de317 24180+ }
58c5fc13 24181+#endif
fe2de317 24182+
58c5fc13
MT
24183 printk(KERN_ALERT "BUG: unable to handle kernel ");
24184 if (address < PAGE_SIZE)
fe2de317 24185 printk(KERN_CONT "NULL pointer dereference");
5e856224 24186@@ -748,6 +829,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
6e9df6a3
MT
24187 }
24188 #endif
24189
58c5fc13 24190+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
6e9df6a3 24191+ if (pax_is_fetch_fault(regs, error_code, address)) {
58c5fc13
MT
24192+
24193+#ifdef CONFIG_PAX_EMUTRAMP
24194+ switch (pax_handle_fetch_fault(regs)) {
24195+ case 2:
24196+ return;
24197+ }
24198+#endif
24199+
6e9df6a3 24200+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
58c5fc13
MT
24201+ do_group_exit(SIGKILL);
24202+ }
24203+#endif
24204+
6e9df6a3
MT
24205 if (unlikely(show_unhandled_signals))
24206 show_signal_msg(regs, error_code, address, tsk);
58c5fc13 24207
5e856224 24208@@ -844,7 +940,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
6e9df6a3
MT
24209 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
24210 printk(KERN_ERR
24211 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
24212- tsk->comm, tsk->pid, address);
24213+ tsk->comm, task_pid_nr(tsk), address);
24214 code = BUS_MCEERR_AR;
24215 }
24216 #endif
5e856224 24217@@ -900,6 +996,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
58c5fc13
MT
24218 return 1;
24219 }
24220
24221+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24222+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
24223+{
24224+ pte_t *pte;
24225+ pmd_t *pmd;
24226+ spinlock_t *ptl;
24227+ unsigned char pte_mask;
24228+
ae4e228f 24229+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
58c5fc13
MT
24230+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
24231+ return 0;
24232+
24233+ /* PaX: it's our fault, let's handle it if we can */
24234+
24235+ /* PaX: take a look at read faults before acquiring any locks */
24236+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
24237+ /* instruction fetch attempt from a protected page in user mode */
24238+ up_read(&mm->mmap_sem);
24239+
24240+#ifdef CONFIG_PAX_EMUTRAMP
24241+ switch (pax_handle_fetch_fault(regs)) {
24242+ case 2:
24243+ return 1;
24244+ }
24245+#endif
24246+
24247+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24248+ do_group_exit(SIGKILL);
24249+ }
24250+
24251+ pmd = pax_get_pmd(mm, address);
24252+ if (unlikely(!pmd))
24253+ return 0;
24254+
24255+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
24256+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
24257+ pte_unmap_unlock(pte, ptl);
24258+ return 0;
24259+ }
24260+
24261+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
24262+ /* write attempt to a protected page in user mode */
24263+ pte_unmap_unlock(pte, ptl);
24264+ return 0;
24265+ }
24266+
24267+#ifdef CONFIG_SMP
24268+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
24269+#else
24270+ if (likely(address > get_limit(regs->cs)))
24271+#endif
24272+ {
24273+ set_pte(pte, pte_mkread(*pte));
24274+ __flush_tlb_one(address);
24275+ pte_unmap_unlock(pte, ptl);
24276+ up_read(&mm->mmap_sem);
24277+ return 1;
24278+ }
24279+
24280+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
24281+
24282+ /*
24283+ * PaX: fill DTLB with user rights and retry
24284+ */
24285+ __asm__ __volatile__ (
58c5fc13
MT
24286+ "orb %2,(%1)\n"
24287+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
24288+/*
24289+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
24290+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
24291+ * page fault when examined during a TLB load attempt. this is true not only
24292+ * for PTEs holding a non-present entry but also present entries that will
24293+ * raise a page fault (such as those set up by PaX, or the copy-on-write
24294+ * mechanism). in effect it means that we do *not* need to flush the TLBs
24295+ * for our target pages since their PTEs are simply not in the TLBs at all.
24296+
24297+ * the best thing in omitting it is that we gain around 15-20% speed in the
24298+ * fast path of the page fault handler and can get rid of tracing since we
24299+ * can no longer flush unintended entries.
24300+ */
24301+ "invlpg (%0)\n"
24302+#endif
16454cff 24303+ __copyuser_seg"testb $0,(%0)\n"
58c5fc13 24304+ "xorb %3,(%1)\n"
58c5fc13 24305+ :
bc901d79 24306+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
58c5fc13
MT
24307+ : "memory", "cc");
24308+ pte_unmap_unlock(pte, ptl);
24309+ up_read(&mm->mmap_sem);
24310+ return 1;
24311+}
24312+#endif
24313+
24314 /*
24315 * Handle a spurious fault caused by a stale TLB entry.
24316 *
5e856224 24317@@ -972,6 +1161,9 @@ int show_unhandled_signals = 1;
58c5fc13 24318 static inline int
bc901d79 24319 access_error(unsigned long error_code, struct vm_area_struct *vma)
58c5fc13 24320 {
ae4e228f 24321+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
58c5fc13
MT
24322+ return 1;
24323+
bc901d79 24324 if (error_code & PF_WRITE) {
58c5fc13
MT
24325 /* write, present and write, not present: */
24326 if (unlikely(!(vma->vm_flags & VM_WRITE)))
5e856224 24327@@ -1005,18 +1197,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
58c5fc13
MT
24328 {
24329 struct vm_area_struct *vma;
24330 struct task_struct *tsk;
24331- unsigned long address;
24332 struct mm_struct *mm;
58c5fc13 24333 int fault;
bc901d79 24334 int write = error_code & PF_WRITE;
15a11c5b 24335 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
bc901d79 24336 (write ? FAULT_FLAG_WRITE : 0);
58c5fc13 24337
fe2de317
MT
24338- tsk = current;
24339- mm = tsk->mm;
24340-
24341 /* Get the faulting address: */
24342- address = read_cr2();
df50ba0c
MT
24343+ unsigned long address = read_cr2();
24344+
24345+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24346+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
24347+ if (!search_exception_tables(regs->ip)) {
24348+ bad_area_nosemaphore(regs, error_code, address);
24349+ return;
24350+ }
24351+ if (address < PAX_USER_SHADOW_BASE) {
24352+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
6e9df6a3 24353+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
66a7e928 24354+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
df50ba0c
MT
24355+ } else
24356+ address -= PAX_USER_SHADOW_BASE;
24357+ }
24358+#endif
58c5fc13 24359+
fe2de317
MT
24360+ tsk = current;
24361+ mm = tsk->mm;
58c5fc13 24362
58c5fc13
MT
24363 /*
24364 * Detect and handle instructions that would cause a page fault for
5e856224 24365@@ -1077,7 +1283,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
58c5fc13
MT
24366 * User-mode registers count as a user access even for any
24367 * potential system fault or CPU buglet:
24368 */
24369- if (user_mode_vm(regs)) {
24370+ if (user_mode(regs)) {
24371 local_irq_enable();
24372 error_code |= PF_USER;
24373 } else {
5e856224 24374@@ -1132,6 +1338,11 @@ retry:
58c5fc13
MT
24375 might_sleep();
24376 }
24377
24378+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24379+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24380+ return;
24381+#endif
24382+
24383 vma = find_vma(mm, address);
24384 if (unlikely(!vma)) {
24385 bad_area(regs, error_code, address);
5e856224 24386@@ -1143,18 +1354,24 @@ retry:
58c5fc13
MT
24387 bad_area(regs, error_code, address);
24388 return;
24389 }
24390- if (error_code & PF_USER) {
24391- /*
24392- * Accessing the stack below %sp is always a bug.
24393- * The large cushion allows instructions like enter
24394- * and pusha to work. ("enter $65535, $31" pushes
24395- * 32 pointers and then decrements %sp by 65535.)
24396- */
24397- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24398- bad_area(regs, error_code, address);
24399- return;
24400- }
24401+ /*
24402+ * Accessing the stack below %sp is always a bug.
24403+ * The large cushion allows instructions like enter
24404+ * and pusha to work. ("enter $65535, $31" pushes
24405+ * 32 pointers and then decrements %sp by 65535.)
24406+ */
24407+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24408+ bad_area(regs, error_code, address);
24409+ return;
df50ba0c 24410 }
58c5fc13
MT
24411+
24412+#ifdef CONFIG_PAX_SEGMEXEC
24413+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24414+ bad_area(regs, error_code, address);
24415+ return;
df50ba0c 24416+ }
58c5fc13
MT
24417+#endif
24418+
24419 if (unlikely(expand_stack(vma, address))) {
24420 bad_area(regs, error_code, address);
24421 return;
5e856224 24422@@ -1209,3 +1426,292 @@ good_area:
58c5fc13
MT
24423
24424 up_read(&mm->mmap_sem);
24425 }
24426+
6e9df6a3
MT
24427+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24428+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24429+{
24430+ struct mm_struct *mm = current->mm;
24431+ unsigned long ip = regs->ip;
24432+
24433+ if (v8086_mode(regs))
24434+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24435+
24436+#ifdef CONFIG_PAX_PAGEEXEC
24437+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24438+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24439+ return true;
24440+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24441+ return true;
24442+ return false;
24443+ }
24444+#endif
24445+
24446+#ifdef CONFIG_PAX_SEGMEXEC
24447+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24448+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24449+ return true;
24450+ return false;
24451+ }
24452+#endif
24453+
24454+ return false;
24455+}
24456+#endif
24457+
58c5fc13
MT
24458+#ifdef CONFIG_PAX_EMUTRAMP
24459+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24460+{
24461+ int err;
24462+
4c928ab7
MT
24463+ do { /* PaX: libffi trampoline emulation */
24464+ unsigned char mov, jmp;
24465+ unsigned int addr1, addr2;
24466+
24467+#ifdef CONFIG_X86_64
24468+ if ((regs->ip + 9) >> 32)
24469+ break;
24470+#endif
24471+
24472+ err = get_user(mov, (unsigned char __user *)regs->ip);
24473+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24474+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24475+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24476+
24477+ if (err)
24478+ break;
24479+
24480+ if (mov == 0xB8 && jmp == 0xE9) {
24481+ regs->ax = addr1;
24482+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24483+ return 2;
24484+ }
24485+ } while (0);
24486+
58c5fc13
MT
24487+ do { /* PaX: gcc trampoline emulation #1 */
24488+ unsigned char mov1, mov2;
24489+ unsigned short jmp;
24490+ unsigned int addr1, addr2;
24491+
24492+#ifdef CONFIG_X86_64
24493+ if ((regs->ip + 11) >> 32)
24494+ break;
24495+#endif
24496+
24497+ err = get_user(mov1, (unsigned char __user *)regs->ip);
24498+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24499+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24500+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24501+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24502+
24503+ if (err)
24504+ break;
24505+
24506+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24507+ regs->cx = addr1;
24508+ regs->ax = addr2;
24509+ regs->ip = addr2;
24510+ return 2;
24511+ }
24512+ } while (0);
24513+
24514+ do { /* PaX: gcc trampoline emulation #2 */
24515+ unsigned char mov, jmp;
24516+ unsigned int addr1, addr2;
24517+
24518+#ifdef CONFIG_X86_64
24519+ if ((regs->ip + 9) >> 32)
24520+ break;
24521+#endif
24522+
24523+ err = get_user(mov, (unsigned char __user *)regs->ip);
24524+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24525+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24526+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24527+
24528+ if (err)
24529+ break;
24530+
24531+ if (mov == 0xB9 && jmp == 0xE9) {
24532+ regs->cx = addr1;
24533+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24534+ return 2;
24535+ }
24536+ } while (0);
24537+
24538+ return 1; /* PaX in action */
24539+}
24540+
24541+#ifdef CONFIG_X86_64
24542+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24543+{
24544+ int err;
24545+
4c928ab7
MT
24546+ do { /* PaX: libffi trampoline emulation */
24547+ unsigned short mov1, mov2, jmp1;
24548+ unsigned char stcclc, jmp2;
24549+ unsigned long addr1, addr2;
24550+
24551+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24552+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24553+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24554+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24555+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24556+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24557+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24558+
24559+ if (err)
24560+ break;
24561+
24562+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24563+ regs->r11 = addr1;
24564+ regs->r10 = addr2;
24565+ if (stcclc == 0xF8)
24566+ regs->flags &= ~X86_EFLAGS_CF;
24567+ else
24568+ regs->flags |= X86_EFLAGS_CF;
24569+ regs->ip = addr1;
24570+ return 2;
24571+ }
24572+ } while (0);
24573+
58c5fc13
MT
24574+ do { /* PaX: gcc trampoline emulation #1 */
24575+ unsigned short mov1, mov2, jmp1;
24576+ unsigned char jmp2;
24577+ unsigned int addr1;
24578+ unsigned long addr2;
24579+
24580+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24581+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24582+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24583+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24584+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24585+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24586+
24587+ if (err)
24588+ break;
24589+
24590+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24591+ regs->r11 = addr1;
24592+ regs->r10 = addr2;
24593+ regs->ip = addr1;
24594+ return 2;
24595+ }
24596+ } while (0);
24597+
24598+ do { /* PaX: gcc trampoline emulation #2 */
24599+ unsigned short mov1, mov2, jmp1;
24600+ unsigned char jmp2;
24601+ unsigned long addr1, addr2;
24602+
24603+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24604+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24605+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24606+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24607+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24608+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24609+
24610+ if (err)
24611+ break;
24612+
24613+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24614+ regs->r11 = addr1;
24615+ regs->r10 = addr2;
24616+ regs->ip = addr1;
24617+ return 2;
24618+ }
24619+ } while (0);
24620+
24621+ return 1; /* PaX in action */
24622+}
24623+#endif
24624+
24625+/*
24626+ * PaX: decide what to do with offenders (regs->ip = fault address)
24627+ *
24628+ * returns 1 when task should be killed
24629+ * 2 when gcc trampoline was detected
24630+ */
24631+static int pax_handle_fetch_fault(struct pt_regs *regs)
24632+{
24633+ if (v8086_mode(regs))
24634+ return 1;
24635+
24636+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24637+ return 1;
24638+
24639+#ifdef CONFIG_X86_32
24640+ return pax_handle_fetch_fault_32(regs);
24641+#else
24642+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24643+ return pax_handle_fetch_fault_32(regs);
24644+ else
24645+ return pax_handle_fetch_fault_64(regs);
24646+#endif
24647+}
24648+#endif
24649+
24650+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
6e9df6a3 24651+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
24652+{
24653+ long i;
24654+
24655+ printk(KERN_ERR "PAX: bytes at PC: ");
24656+ for (i = 0; i < 20; i++) {
24657+ unsigned char c;
6e9df6a3 24658+ if (get_user(c, (unsigned char __force_user *)pc+i))
58c5fc13
MT
24659+ printk(KERN_CONT "?? ");
24660+ else
24661+ printk(KERN_CONT "%02x ", c);
24662+ }
24663+ printk("\n");
24664+
24665+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
ae4e228f 24666+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
58c5fc13 24667+ unsigned long c;
6e9df6a3 24668+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
58c5fc13
MT
24669+#ifdef CONFIG_X86_32
24670+ printk(KERN_CONT "???????? ");
24671+#else
6e9df6a3
MT
24672+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24673+ printk(KERN_CONT "???????? ???????? ");
24674+ else
24675+ printk(KERN_CONT "???????????????? ");
58c5fc13 24676+#endif
6e9df6a3
MT
24677+ } else {
24678+#ifdef CONFIG_X86_64
24679+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24680+ printk(KERN_CONT "%08x ", (unsigned int)c);
24681+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24682+ } else
24683+#endif
24684+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24685+ }
58c5fc13
MT
24686+ }
24687+ printk("\n");
24688+}
24689+#endif
58c5fc13 24690+
ae4e228f
MT
24691+/**
24692+ * probe_kernel_write(): safely attempt to write to a location
24693+ * @dst: address to write to
24694+ * @src: pointer to the data that shall be written
24695+ * @size: size of the data chunk
24696+ *
24697+ * Safely write to address @dst from the buffer at @src. If a kernel fault
24698+ * happens, handle that and return -EFAULT.
24699+ */
24700+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24701+{
24702+ long ret;
24703+ mm_segment_t old_fs = get_fs();
24704+
24705+ set_fs(KERNEL_DS);
24706+ pagefault_disable();
24707+ pax_open_kernel();
6e9df6a3 24708+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
ae4e228f
MT
24709+ pax_close_kernel();
24710+ pagefault_enable();
24711+ set_fs(old_fs);
24712+
24713+ return ret ? -EFAULT : 0;
24714+}
fe2de317 24715diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
4c928ab7 24716index dd74e46..7d26398 100644
fe2de317
MT
24717--- a/arch/x86/mm/gup.c
24718+++ b/arch/x86/mm/gup.c
4c928ab7 24719@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
ae4e228f
MT
24720 addr = start;
24721 len = (unsigned long) nr_pages << PAGE_SHIFT;
24722 end = start + len;
24723- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24724+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24725 (void __user *)start, len)))
24726 return 0;
58c5fc13 24727
fe2de317
MT
24728diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24729index f4f29b1..5cac4fb 100644
24730--- a/arch/x86/mm/highmem_32.c
24731+++ b/arch/x86/mm/highmem_32.c
24732@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
58c5fc13
MT
24733 idx = type + KM_TYPE_NR*smp_processor_id();
24734 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24735 BUG_ON(!pte_none(*(kmap_pte-idx)));
24736+
ae4e228f 24737+ pax_open_kernel();
58c5fc13 24738 set_pte(kmap_pte-idx, mk_pte(page, prot));
ae4e228f 24739+ pax_close_kernel();
fe2de317
MT
24740+
24741 arch_flush_lazy_mmu_mode();
58c5fc13 24742
58c5fc13 24743 return (void *)vaddr;
fe2de317 24744diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
5e856224 24745index 8ecbb4b..a269cab 100644
fe2de317
MT
24746--- a/arch/x86/mm/hugetlbpage.c
24747+++ b/arch/x86/mm/hugetlbpage.c
24748@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
58c5fc13
MT
24749 struct hstate *h = hstate_file(file);
24750 struct mm_struct *mm = current->mm;
24751 struct vm_area_struct *vma;
24752- unsigned long start_addr;
24753+ unsigned long start_addr, pax_task_size = TASK_SIZE;
24754+
24755+#ifdef CONFIG_PAX_SEGMEXEC
24756+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24757+ pax_task_size = SEGMEXEC_TASK_SIZE;
24758+#endif
6892158b
MT
24759+
24760+ pax_task_size -= PAGE_SIZE;
58c5fc13
MT
24761
24762 if (len > mm->cached_hole_size) {
24763- start_addr = mm->free_area_cache;
24764+ start_addr = mm->free_area_cache;
24765 } else {
24766- start_addr = TASK_UNMAPPED_BASE;
24767- mm->cached_hole_size = 0;
24768+ start_addr = mm->mmap_base;
24769+ mm->cached_hole_size = 0;
24770 }
24771
24772 full_search:
6892158b 24773@@ -280,26 +287,27 @@ full_search:
58c5fc13
MT
24774
24775 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24776 /* At this point: (!vma || addr < vma->vm_end). */
24777- if (TASK_SIZE - len < addr) {
24778+ if (pax_task_size - len < addr) {
24779 /*
24780 * Start a new search - just in case we missed
24781 * some holes.
24782 */
24783- if (start_addr != TASK_UNMAPPED_BASE) {
24784- start_addr = TASK_UNMAPPED_BASE;
24785+ if (start_addr != mm->mmap_base) {
24786+ start_addr = mm->mmap_base;
24787 mm->cached_hole_size = 0;
24788 goto full_search;
24789 }
57199397
MT
24790 return -ENOMEM;
24791 }
24792- if (!vma || addr + len <= vma->vm_start) {
24793- mm->free_area_cache = addr + len;
24794- return addr;
24795- }
24796+ if (check_heap_stack_gap(vma, addr, len))
24797+ break;
24798 if (addr + mm->cached_hole_size < vma->vm_start)
24799 mm->cached_hole_size = vma->vm_start - addr;
24800 addr = ALIGN(vma->vm_end, huge_page_size(h));
24801 }
24802+
24803+ mm->free_area_cache = addr + len;
24804+ return addr;
24805 }
24806
24807 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
fe2de317 24808@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
57199397 24809 {
58c5fc13
MT
24810 struct hstate *h = hstate_file(file);
24811 struct mm_struct *mm = current->mm;
57199397 24812- struct vm_area_struct *vma, *prev_vma;
58c5fc13 24813- unsigned long base = mm->mmap_base, addr = addr0;
57199397 24814+ struct vm_area_struct *vma;
58c5fc13
MT
24815+ unsigned long base = mm->mmap_base, addr;
24816 unsigned long largest_hole = mm->cached_hole_size;
24817- int first_time = 1;
24818
24819 /* don't allow allocations above current base */
24820 if (mm->free_area_cache > base)
5e856224 24821@@ -321,14 +328,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
58c5fc13
MT
24822 largest_hole = 0;
24823 mm->free_area_cache = base;
24824 }
24825-try_again:
24826+
24827 /* make sure it can fit in the remaining address space */
24828 if (mm->free_area_cache < len)
24829 goto fail;
16454cff 24830
66a7e928 24831 /* either no address requested or can't fit in requested address hole */
16454cff
MT
24832- addr = (mm->free_area_cache - len) & huge_page_mask(h);
24833+ addr = (mm->free_area_cache - len);
57199397 24834 do {
16454cff 24835+ addr &= huge_page_mask(h);
57199397
MT
24836 /*
24837 * Lookup failure means no vma is above this address,
24838 * i.e. return with success:
5e856224 24839@@ -341,46 +349,47 @@ try_again:
57199397
MT
24840 * new region fits between prev_vma->vm_end and
24841 * vma->vm_start, use it:
24842 */
5e856224 24843- prev_vma = vma->vm_prev;
57199397
MT
24844- if (addr + len <= vma->vm_start &&
24845- (!prev_vma || (addr >= prev_vma->vm_end))) {
24846+ if (check_heap_stack_gap(vma, addr, len)) {
24847 /* remember the address as a hint for next time */
24848- mm->cached_hole_size = largest_hole;
24849- return (mm->free_area_cache = addr);
24850- } else {
24851- /* pull free_area_cache down to the first hole */
24852- if (mm->free_area_cache == vma->vm_end) {
24853- mm->free_area_cache = vma->vm_start;
24854- mm->cached_hole_size = largest_hole;
24855- }
24856+ mm->cached_hole_size = largest_hole;
24857+ return (mm->free_area_cache = addr);
24858+ }
24859+ /* pull free_area_cache down to the first hole */
24860+ if (mm->free_area_cache == vma->vm_end) {
24861+ mm->free_area_cache = vma->vm_start;
24862+ mm->cached_hole_size = largest_hole;
24863 }
24864
24865 /* remember the largest hole we saw so far */
24866 if (addr + largest_hole < vma->vm_start)
24867- largest_hole = vma->vm_start - addr;
24868+ largest_hole = vma->vm_start - addr;
24869
24870 /* try just below the current vma->vm_start */
16454cff
MT
24871- addr = (vma->vm_start - len) & huge_page_mask(h);
24872- } while (len <= vma->vm_start);
24873+ addr = skip_heap_stack_gap(vma, len);
24874+ } while (!IS_ERR_VALUE(addr));
58c5fc13
MT
24875
24876 fail:
24877 /*
24878- * if hint left us with no space for the requested
24879- * mapping then try again:
24880- */
24881- if (first_time) {
24882- mm->free_area_cache = base;
24883- largest_hole = 0;
24884- first_time = 0;
24885- goto try_again;
24886- }
24887- /*
24888 * A failed mmap() very likely causes application failure,
24889 * so fall back to the bottom-up function here. This scenario
24890 * can happen with large stack limits and large mmap()
24891 * allocations.
24892 */
24893- mm->free_area_cache = TASK_UNMAPPED_BASE;
24894+
24895+#ifdef CONFIG_PAX_SEGMEXEC
24896+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24897+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24898+ else
24899+#endif
24900+
24901+ mm->mmap_base = TASK_UNMAPPED_BASE;
24902+
24903+#ifdef CONFIG_PAX_RANDMMAP
24904+ if (mm->pax_flags & MF_PAX_RANDMMAP)
24905+ mm->mmap_base += mm->delta_mmap;
24906+#endif
24907+
24908+ mm->free_area_cache = mm->mmap_base;
24909 mm->cached_hole_size = ~0UL;
24910 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24911 len, pgoff, flags);
5e856224 24912@@ -388,6 +397,7 @@ fail:
58c5fc13
MT
24913 /*
24914 * Restore the topdown base:
24915 */
24916+ mm->mmap_base = base;
24917 mm->free_area_cache = base;
24918 mm->cached_hole_size = ~0UL;
24919
5e856224 24920@@ -401,10 +411,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
58c5fc13
MT
24921 struct hstate *h = hstate_file(file);
24922 struct mm_struct *mm = current->mm;
24923 struct vm_area_struct *vma;
24924+ unsigned long pax_task_size = TASK_SIZE;
24925
24926 if (len & ~huge_page_mask(h))
24927 return -EINVAL;
24928- if (len > TASK_SIZE)
24929+
24930+#ifdef CONFIG_PAX_SEGMEXEC
24931+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24932+ pax_task_size = SEGMEXEC_TASK_SIZE;
24933+#endif
24934+
6892158b
MT
24935+ pax_task_size -= PAGE_SIZE;
24936+
58c5fc13
MT
24937+ if (len > pax_task_size)
24938 return -ENOMEM;
24939
24940 if (flags & MAP_FIXED) {
5e856224 24941@@ -416,8 +435,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
58c5fc13
MT
24942 if (addr) {
24943 addr = ALIGN(addr, huge_page_size(h));
24944 vma = find_vma(mm, addr);
24945- if (TASK_SIZE - len >= addr &&
57199397
MT
24946- (!vma || addr + len <= vma->vm_start))
24947+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
58c5fc13
MT
24948 return addr;
24949 }
57199397 24950 if (mm->get_unmapped_area == arch_get_unmapped_area)
fe2de317 24951diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
5e856224 24952index 6cabf65..77e9c1c 100644
fe2de317
MT
24953--- a/arch/x86/mm/init.c
24954+++ b/arch/x86/mm/init.c
5e856224 24955@@ -17,6 +17,7 @@
4c928ab7
MT
24956 #include <asm/tlb.h>
24957 #include <asm/proto.h>
5e856224 24958 #include <asm/dma.h> /* for MAX_DMA_PFN */
4c928ab7
MT
24959+#include <asm/desc.h>
24960
24961 unsigned long __initdata pgt_buf_start;
24962 unsigned long __meminitdata pgt_buf_end;
5e856224 24963@@ -33,7 +34,7 @@ int direct_gbpages
fe2de317
MT
24964 static void __init find_early_table_space(unsigned long end, int use_pse,
24965 int use_gbpages)
24966 {
24967- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
24968+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
24969 phys_addr_t base;
24970
24971 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
5e856224 24972@@ -314,8 +315,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
fe2de317
MT
24973 */
24974 int devmem_is_allowed(unsigned long pagenr)
24975 {
24976+#ifdef CONFIG_GRKERNSEC_KMEM
24977+ /* allow BDA */
24978+ if (!pagenr)
24979+ return 1;
24980+ /* allow EBDA */
24981+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
24982+ return 1;
24983+#else
24984+ if (!pagenr)
24985+ return 1;
24986+#ifdef CONFIG_VM86
24987+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
24988+ return 1;
24989+#endif
24990+#endif
24991+
24992+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24993+ return 1;
24994+#ifdef CONFIG_GRKERNSEC_KMEM
24995+ /* throw out everything else below 1MB */
24996 if (pagenr <= 256)
24997- return 1;
24998+ return 0;
24999+#endif
25000 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
25001 return 0;
25002 if (!page_is_ram(pagenr))
5e856224 25003@@ -374,6 +396,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
fe2de317
MT
25004
25005 void free_initmem(void)
25006 {
25007+
25008+#ifdef CONFIG_PAX_KERNEXEC
25009+#ifdef CONFIG_X86_32
25010+ /* PaX: limit KERNEL_CS to actual size */
25011+ unsigned long addr, limit;
25012+ struct desc_struct d;
25013+ int cpu;
25014+
25015+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
25016+ limit = (limit - 1UL) >> PAGE_SHIFT;
25017+
25018+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
4c928ab7 25019+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
fe2de317
MT
25020+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
25021+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
25022+ }
25023+
25024+ /* PaX: make KERNEL_CS read-only */
25025+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
25026+ if (!paravirt_enabled())
25027+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
25028+/*
25029+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
25030+ pgd = pgd_offset_k(addr);
25031+ pud = pud_offset(pgd, addr);
25032+ pmd = pmd_offset(pud, addr);
25033+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25034+ }
25035+*/
25036+#ifdef CONFIG_X86_PAE
25037+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
25038+/*
25039+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
25040+ pgd = pgd_offset_k(addr);
25041+ pud = pud_offset(pgd, addr);
25042+ pmd = pmd_offset(pud, addr);
25043+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
25044+ }
25045+*/
25046+#endif
25047+
25048+#ifdef CONFIG_MODULES
25049+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
25050+#endif
25051+
25052+#else
25053+ pgd_t *pgd;
25054+ pud_t *pud;
25055+ pmd_t *pmd;
25056+ unsigned long addr, end;
25057+
25058+ /* PaX: make kernel code/rodata read-only, rest non-executable */
25059+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
25060+ pgd = pgd_offset_k(addr);
25061+ pud = pud_offset(pgd, addr);
25062+ pmd = pmd_offset(pud, addr);
25063+ if (!pmd_present(*pmd))
25064+ continue;
25065+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
25066+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25067+ else
25068+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
25069+ }
25070+
25071+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
25072+ end = addr + KERNEL_IMAGE_SIZE;
25073+ for (; addr < end; addr += PMD_SIZE) {
25074+ pgd = pgd_offset_k(addr);
25075+ pud = pud_offset(pgd, addr);
25076+ pmd = pmd_offset(pud, addr);
25077+ if (!pmd_present(*pmd))
25078+ continue;
25079+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
25080+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25081+ }
25082+#endif
25083+
25084+ flush_tlb_all();
25085+#endif
25086+
25087 free_init_pages("unused kernel memory",
25088 (unsigned long)(&__init_begin),
25089 (unsigned long)(&__init_end));
25090diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
5e856224 25091index 8663f6c..829ae76 100644
fe2de317
MT
25092--- a/arch/x86/mm/init_32.c
25093+++ b/arch/x86/mm/init_32.c
16454cff 25094@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
58c5fc13
MT
25095 }
25096
25097 /*
25098- * Creates a middle page table and puts a pointer to it in the
25099- * given global directory entry. This only returns the gd entry
25100- * in non-PAE compilation mode, since the middle layer is folded.
25101- */
25102-static pmd_t * __init one_md_table_init(pgd_t *pgd)
25103-{
25104- pud_t *pud;
25105- pmd_t *pmd_table;
25106-
25107-#ifdef CONFIG_X86_PAE
25108- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
25109- if (after_bootmem)
ae4e228f 25110- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
58c5fc13
MT
25111- else
25112- pmd_table = (pmd_t *)alloc_low_page();
25113- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
25114- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
25115- pud = pud_offset(pgd, 0);
25116- BUG_ON(pmd_table != pmd_offset(pud, 0));
25117-
25118- return pmd_table;
25119- }
25120-#endif
25121- pud = pud_offset(pgd, 0);
25122- pmd_table = pmd_offset(pud, 0);
25123-
25124- return pmd_table;
25125-}
25126-
25127-/*
25128 * Create a page table and place a pointer to it in a middle page
25129 * directory entry:
25130 */
fe2de317 25131@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
58c5fc13
MT
25132 page_table = (pte_t *)alloc_low_page();
25133
25134 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
25135+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25136+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
25137+#else
25138 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
25139+#endif
25140 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
25141 }
25142
25143 return pte_offset_kernel(pmd, 0);
25144 }
25145
25146+static pmd_t * __init one_md_table_init(pgd_t *pgd)
25147+{
25148+ pud_t *pud;
25149+ pmd_t *pmd_table;
25150+
25151+ pud = pud_offset(pgd, 0);
25152+ pmd_table = pmd_offset(pud, 0);
25153+
25154+ return pmd_table;
25155+}
25156+
25157 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
25158 {
25159 int pgd_idx = pgd_index(vaddr);
fe2de317 25160@@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
58c5fc13
MT
25161 int pgd_idx, pmd_idx;
25162 unsigned long vaddr;
25163 pgd_t *pgd;
25164+ pud_t *pud;
25165 pmd_t *pmd;
25166 pte_t *pte = NULL;
25167
fe2de317 25168@@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
58c5fc13
MT
25169 pgd = pgd_base + pgd_idx;
25170
25171 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
25172- pmd = one_md_table_init(pgd);
25173- pmd = pmd + pmd_index(vaddr);
25174+ pud = pud_offset(pgd, vaddr);
25175+ pmd = pmd_offset(pud, vaddr);
25176+
25177+#ifdef CONFIG_X86_PAE
25178+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25179+#endif
25180+
25181 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
25182 pmd++, pmd_idx++) {
25183 pte = page_table_kmap_check(one_page_table_init(pmd),
fe2de317 25184@@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
58c5fc13
MT
25185 }
25186 }
25187
25188-static inline int is_kernel_text(unsigned long addr)
25189+static inline int is_kernel_text(unsigned long start, unsigned long end)
25190 {
16454cff 25191- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
58c5fc13
MT
25192- return 1;
25193- return 0;
ae4e228f 25194+ if ((start > ktla_ktva((unsigned long)_etext) ||
58c5fc13
MT
25195+ end <= ktla_ktva((unsigned long)_stext)) &&
25196+ (start > ktla_ktva((unsigned long)_einittext) ||
25197+ end <= ktla_ktva((unsigned long)_sinittext)) &&
ae4e228f
MT
25198+
25199+#ifdef CONFIG_ACPI_SLEEP
25200+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
25201+#endif
25202+
58c5fc13
MT
25203+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
25204+ return 0;
25205+ return 1;
25206 }
25207
25208 /*
fe2de317 25209@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
df50ba0c 25210 unsigned long last_map_addr = end;
58c5fc13
MT
25211 unsigned long start_pfn, end_pfn;
25212 pgd_t *pgd_base = swapper_pg_dir;
25213- int pgd_idx, pmd_idx, pte_ofs;
25214+ unsigned int pgd_idx, pmd_idx, pte_ofs;
25215 unsigned long pfn;
25216 pgd_t *pgd;
25217+ pud_t *pud;
25218 pmd_t *pmd;
25219 pte_t *pte;
25220 unsigned pages_2m, pages_4k;
16454cff 25221@@ -281,8 +282,13 @@ repeat:
58c5fc13
MT
25222 pfn = start_pfn;
25223 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25224 pgd = pgd_base + pgd_idx;
25225- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
25226- pmd = one_md_table_init(pgd);
25227+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
25228+ pud = pud_offset(pgd, 0);
25229+ pmd = pmd_offset(pud, 0);
25230+
25231+#ifdef CONFIG_X86_PAE
25232+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25233+#endif
25234
25235 if (pfn >= end_pfn)
25236 continue;
16454cff 25237@@ -294,14 +300,13 @@ repeat:
58c5fc13
MT
25238 #endif
25239 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
25240 pmd++, pmd_idx++) {
25241- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
25242+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
25243
25244 /*
25245 * Map with big pages if possible, otherwise
25246 * create normal page tables:
25247 */
25248 if (use_pse) {
25249- unsigned int addr2;
25250 pgprot_t prot = PAGE_KERNEL_LARGE;
25251 /*
25252 * first pass will use the same initial
16454cff 25253@@ -311,11 +316,7 @@ repeat:
58c5fc13
MT
25254 __pgprot(PTE_IDENT_ATTR |
25255 _PAGE_PSE);
25256
25257- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
25258- PAGE_OFFSET + PAGE_SIZE-1;
25259-
25260- if (is_kernel_text(addr) ||
25261- is_kernel_text(addr2))
25262+ if (is_kernel_text(address, address + PMD_SIZE))
25263 prot = PAGE_KERNEL_LARGE_EXEC;
25264
25265 pages_2m++;
16454cff 25266@@ -332,7 +333,7 @@ repeat:
58c5fc13
MT
25267 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25268 pte += pte_ofs;
25269 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
25270- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
25271+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
25272 pgprot_t prot = PAGE_KERNEL;
25273 /*
25274 * first pass will use the same initial
16454cff 25275@@ -340,7 +341,7 @@ repeat:
58c5fc13
MT
25276 */
25277 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
25278
25279- if (is_kernel_text(addr))
25280+ if (is_kernel_text(address, address + PAGE_SIZE))
25281 prot = PAGE_KERNEL_EXEC;
25282
25283 pages_4k++;
5e856224 25284@@ -466,7 +467,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
58c5fc13
MT
25285
25286 pud = pud_offset(pgd, va);
25287 pmd = pmd_offset(pud, va);
25288- if (!pmd_present(*pmd))
25289+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
25290 break;
25291
25292 pte = pte_offset_kernel(pmd, va);
5e856224 25293@@ -518,12 +519,10 @@ void __init early_ioremap_page_table_range_init(void)
58c5fc13
MT
25294
25295 static void __init pagetable_init(void)
25296 {
25297- pgd_t *pgd_base = swapper_pg_dir;
25298-
25299- permanent_kmaps_init(pgd_base);
25300+ permanent_kmaps_init(swapper_pg_dir);
25301 }
25302
58c5fc13
MT
25303-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25304+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25305 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25306
25307 /* user-defined highmem size */
5e856224 25308@@ -735,6 +734,12 @@ void __init mem_init(void)
df50ba0c
MT
25309
25310 pci_iommu_alloc();
25311
25312+#ifdef CONFIG_PAX_PER_CPU_PGD
25313+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25314+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25315+ KERNEL_PGD_PTRS);
25316+#endif
25317+
25318 #ifdef CONFIG_FLATMEM
25319 BUG_ON(!mem_map);
25320 #endif
5e856224
MT
25321@@ -761,7 +766,7 @@ void __init mem_init(void)
25322 reservedpages++;
58c5fc13
MT
25323
25324 codesize = (unsigned long) &_etext - (unsigned long) &_text;
25325- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
25326+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
25327 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
25328
ae4e228f 25329 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
5e856224 25330@@ -802,10 +807,10 @@ void __init mem_init(void)
58c5fc13
MT
25331 ((unsigned long)&__init_end -
25332 (unsigned long)&__init_begin) >> 10,
25333
25334- (unsigned long)&_etext, (unsigned long)&_edata,
25335- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
25336+ (unsigned long)&_sdata, (unsigned long)&_edata,
25337+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
25338
25339- (unsigned long)&_text, (unsigned long)&_etext,
25340+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
25341 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
25342
25343 /*
5e856224 25344@@ -883,6 +888,7 @@ void set_kernel_text_rw(void)
ae4e228f
MT
25345 if (!kernel_set_to_readonly)
25346 return;
58c5fc13 25347
ae4e228f
MT
25348+ start = ktla_ktva(start);
25349 pr_debug("Set kernel text: %lx - %lx for read write\n",
25350 start, start+size);
25351
5e856224 25352@@ -897,6 +903,7 @@ void set_kernel_text_ro(void)
ae4e228f
MT
25353 if (!kernel_set_to_readonly)
25354 return;
25355
25356+ start = ktla_ktva(start);
25357 pr_debug("Set kernel text: %lx - %lx for read only\n",
25358 start, start+size);
25359
5e856224 25360@@ -925,6 +932,7 @@ void mark_rodata_ro(void)
ae4e228f
MT
25361 unsigned long start = PFN_ALIGN(_text);
25362 unsigned long size = PFN_ALIGN(_etext) - start;
25363
25364+ start = ktla_ktva(start);
25365 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25366 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25367 size >> 10);
fe2de317 25368diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
5e856224 25369index 436a030..4f97ffc 100644
fe2de317
MT
25370--- a/arch/x86/mm/init_64.c
25371+++ b/arch/x86/mm/init_64.c
25372@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
ae4e228f
MT
25373 * around without checking the pgd every time.
25374 */
25375
25376-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
25377+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
25378 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25379
25380 int force_personality32;
fe2de317 25381@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
16454cff 25382
bc901d79
MT
25383 for (address = start; address <= end; address += PGDIR_SIZE) {
25384 const pgd_t *pgd_ref = pgd_offset_k(address);
bc901d79
MT
25385+
25386+#ifdef CONFIG_PAX_PER_CPU_PGD
25387+ unsigned long cpu;
25388+#else
25389 struct page *page;
25390+#endif
25391
25392 if (pgd_none(*pgd_ref))
25393 continue;
25394
16454cff 25395 spin_lock(&pgd_lock);
bc901d79
MT
25396+
25397+#ifdef CONFIG_PAX_PER_CPU_PGD
4c928ab7 25398+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
bc901d79
MT
25399+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
25400+#else
25401 list_for_each_entry(page, &pgd_list, lru) {
25402 pgd_t *pgd;
25403 spinlock_t *pgt_lock;
fe2de317 25404@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
16454cff 25405 /* the pgt_lock only for Xen */
bc901d79
MT
25406 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
25407 spin_lock(pgt_lock);
25408+#endif
25409
25410 if (pgd_none(*pgd))
25411 set_pgd(pgd, *pgd_ref);
fe2de317 25412@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
bc901d79
MT
25413 BUG_ON(pgd_page_vaddr(*pgd)
25414 != pgd_page_vaddr(*pgd_ref));
25415
25416+#ifndef CONFIG_PAX_PER_CPU_PGD
25417 spin_unlock(pgt_lock);
25418+#endif
25419+
25420 }
16454cff 25421 spin_unlock(&pgd_lock);
bc901d79 25422 }
5e856224
MT
25423@@ -162,7 +176,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
25424 {
25425 if (pgd_none(*pgd)) {
25426 pud_t *pud = (pud_t *)spp_getpage();
25427- pgd_populate(&init_mm, pgd, pud);
25428+ pgd_populate_kernel(&init_mm, pgd, pud);
25429 if (pud != pud_offset(pgd, 0))
25430 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
25431 pud, pud_offset(pgd, 0));
25432@@ -174,7 +188,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
25433 {
25434 if (pud_none(*pud)) {
25435 pmd_t *pmd = (pmd_t *) spp_getpage();
25436- pud_populate(&init_mm, pud, pmd);
25437+ pud_populate_kernel(&init_mm, pud, pmd);
25438 if (pmd != pmd_offset(pud, 0))
25439 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
25440 pmd, pmd_offset(pud, 0));
fe2de317 25441@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
58c5fc13
MT
25442 pmd = fill_pmd(pud, vaddr);
25443 pte = fill_pte(pmd, vaddr);
25444
ae4e228f 25445+ pax_open_kernel();
58c5fc13 25446 set_pte(pte, new_pte);
ae4e228f 25447+ pax_close_kernel();
58c5fc13 25448
58c5fc13
MT
25449 /*
25450 * It's enough to flush this one mapping.
fe2de317 25451@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
58c5fc13
MT
25452 pgd = pgd_offset_k((unsigned long)__va(phys));
25453 if (pgd_none(*pgd)) {
25454 pud = (pud_t *) spp_getpage();
25455- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25456- _PAGE_USER));
25457+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25458 }
25459 pud = pud_offset(pgd, (unsigned long)__va(phys));
25460 if (pud_none(*pud)) {
25461 pmd = (pmd_t *) spp_getpage();
25462- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25463- _PAGE_USER));
25464+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25465 }
25466 pmd = pmd_offset(pud, phys);
25467 BUG_ON(!pmd_none(*pmd));
fe2de317 25468@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
6e9df6a3
MT
25469 if (pfn >= pgt_buf_top)
25470 panic("alloc_low_page: ran out of memory");
25471
25472- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25473+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25474 clear_page(adr);
25475 *phys = pfn * PAGE_SIZE;
25476 return adr;
fe2de317 25477@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
6e9df6a3
MT
25478
25479 phys = __pa(virt);
25480 left = phys & (PAGE_SIZE - 1);
25481- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25482+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25483 adr = (void *)(((unsigned long)adr) | left);
25484
25485 return adr;
5e856224
MT
25486@@ -546,7 +560,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
25487 unmap_low_page(pmd);
25488
25489 spin_lock(&init_mm.page_table_lock);
25490- pud_populate(&init_mm, pud, __va(pmd_phys));
25491+ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
25492 spin_unlock(&init_mm.page_table_lock);
25493 }
25494 __flush_tlb_all();
25495@@ -592,7 +606,7 @@ kernel_physical_mapping_init(unsigned long start,
25496 unmap_low_page(pud);
25497
25498 spin_lock(&init_mm.page_table_lock);
25499- pgd_populate(&init_mm, pgd, __va(pud_phys));
25500+ pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
25501 spin_unlock(&init_mm.page_table_lock);
25502 pgd_changed = true;
25503 }
25504@@ -684,6 +698,12 @@ void __init mem_init(void)
df50ba0c
MT
25505
25506 pci_iommu_alloc();
25507
25508+#ifdef CONFIG_PAX_PER_CPU_PGD
25509+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25510+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25511+ KERNEL_PGD_PTRS);
25512+#endif
25513+
25514 /* clear_bss() already clear the empty_zero_page */
25515
25516 reservedpages = 0;
5e856224 25517@@ -844,8 +864,8 @@ int kern_addr_valid(unsigned long addr)
58c5fc13
MT
25518 static struct vm_area_struct gate_vma = {
25519 .vm_start = VSYSCALL_START,
25520 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25521- .vm_page_prot = PAGE_READONLY_EXEC,
25522- .vm_flags = VM_READ | VM_EXEC
25523+ .vm_page_prot = PAGE_READONLY,
25524+ .vm_flags = VM_READ
25525 };
25526
66a7e928 25527 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
5e856224 25528@@ -879,7 +899,7 @@ int in_gate_area_no_mm(unsigned long addr)
58c5fc13
MT
25529
25530 const char *arch_vma_name(struct vm_area_struct *vma)
25531 {
25532- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25533+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25534 return "[vdso]";
25535 if (vma == &gate_vma)
25536 return "[vsyscall]";
fe2de317
MT
25537diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25538index 7b179b4..6bd1777 100644
25539--- a/arch/x86/mm/iomap_32.c
25540+++ b/arch/x86/mm/iomap_32.c
25541@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
bc901d79 25542 type = kmap_atomic_idx_push();
58c5fc13
MT
25543 idx = type + KM_TYPE_NR * smp_processor_id();
25544 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25545+
ae4e228f 25546+ pax_open_kernel();
58c5fc13 25547 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
ae4e228f 25548+ pax_close_kernel();
58c5fc13
MT
25549+
25550 arch_flush_lazy_mmu_mode();
25551
25552 return (void *)vaddr;
fe2de317 25553diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
4c928ab7 25554index be1ef57..55f0160 100644
fe2de317
MT
25555--- a/arch/x86/mm/ioremap.c
25556+++ b/arch/x86/mm/ioremap.c
25557@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
6892158b 25558 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
58c5fc13
MT
25559 int is_ram = page_is_ram(pfn);
25560
ae4e228f
MT
25561- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25562+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25563 return NULL;
25564 WARN_ON_ONCE(is_ram);
58c5fc13 25565 }
4c928ab7
MT
25566@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25567
25568 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25569 if (page_is_ram(start >> PAGE_SHIFT))
25570+#ifdef CONFIG_HIGHMEM
25571+ if ((start >> PAGE_SHIFT) < max_low_pfn)
25572+#endif
25573 return __va(phys);
25574
25575 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
25576@@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
58c5fc13
MT
25577 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25578
25579 static __initdata int after_paging_init;
25580-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25581+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25582
25583 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25584 {
4c928ab7 25585@@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
58c5fc13
MT
25586 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25587
25588 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25589- memset(bm_pte, 0, sizeof(bm_pte));
25590- pmd_populate_kernel(&init_mm, pmd, bm_pte);
25591+ pmd_populate_user(&init_mm, pmd, bm_pte);
25592
25593 /*
25594 * The boot-ioremap range spans multiple pmds, for which
fe2de317
MT
25595diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25596index d87dd6d..bf3fa66 100644
25597--- a/arch/x86/mm/kmemcheck/kmemcheck.c
25598+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25599@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
ae4e228f
MT
25600 * memory (e.g. tracked pages)? For now, we need this to avoid
25601 * invoking kmemcheck for PnP BIOS calls.
25602 */
25603- if (regs->flags & X86_VM_MASK)
25604+ if (v8086_mode(regs))
25605 return false;
25606- if (regs->cs != __KERNEL_CS)
25607+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25608 return false;
25609
25610 pte = kmemcheck_pte_lookup(address);
fe2de317 25611diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
4c928ab7 25612index 845df68..1d8d29f 100644
fe2de317
MT
25613--- a/arch/x86/mm/mmap.c
25614+++ b/arch/x86/mm/mmap.c
4c928ab7 25615@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
ae4e228f 25616 * Leave an at least ~128 MB hole with possible stack randomization.
58c5fc13 25617 */
ae4e228f 25618 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
58c5fc13
MT
25619-#define MAX_GAP (TASK_SIZE/6*5)
25620+#define MAX_GAP (pax_task_size/6*5)
25621
4c928ab7
MT
25622 static int mmap_is_legacy(void)
25623 {
25624@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
58c5fc13
MT
25625 return rnd << PAGE_SHIFT;
25626 }
25627
25628-static unsigned long mmap_base(void)
25629+static unsigned long mmap_base(struct mm_struct *mm)
25630 {
df50ba0c 25631 unsigned long gap = rlimit(RLIMIT_STACK);
58c5fc13
MT
25632+ unsigned long pax_task_size = TASK_SIZE;
25633+
25634+#ifdef CONFIG_PAX_SEGMEXEC
25635+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25636+ pax_task_size = SEGMEXEC_TASK_SIZE;
25637+#endif
25638
25639 if (gap < MIN_GAP)
25640 gap = MIN_GAP;
25641 else if (gap > MAX_GAP)
25642 gap = MAX_GAP;
25643
25644- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25645+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25646 }
25647
25648 /*
25649 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25650 * does, but not when emulating X86_32
25651 */
25652-static unsigned long mmap_legacy_base(void)
25653+static unsigned long mmap_legacy_base(struct mm_struct *mm)
25654 {
25655- if (mmap_is_ia32())
25656+ if (mmap_is_ia32()) {
25657+
25658+#ifdef CONFIG_PAX_SEGMEXEC
25659+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25660+ return SEGMEXEC_TASK_UNMAPPED_BASE;
25661+ else
25662+#endif
25663+
25664 return TASK_UNMAPPED_BASE;
25665- else
25666+ } else
25667 return TASK_UNMAPPED_BASE + mmap_rnd();
25668 }
25669
4c928ab7 25670@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
58c5fc13
MT
25671 void arch_pick_mmap_layout(struct mm_struct *mm)
25672 {
25673 if (mmap_is_legacy()) {
25674- mm->mmap_base = mmap_legacy_base();
25675+ mm->mmap_base = mmap_legacy_base(mm);
25676+
25677+#ifdef CONFIG_PAX_RANDMMAP
25678+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25679+ mm->mmap_base += mm->delta_mmap;
25680+#endif
25681+
25682 mm->get_unmapped_area = arch_get_unmapped_area;
25683 mm->unmap_area = arch_unmap_area;
25684 } else {
25685- mm->mmap_base = mmap_base();
25686+ mm->mmap_base = mmap_base(mm);
25687+
25688+#ifdef CONFIG_PAX_RANDMMAP
25689+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25690+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25691+#endif
25692+
25693 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25694 mm->unmap_area = arch_unmap_area_topdown;
25695 }
fe2de317 25696diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
5e856224 25697index dc0b727..dc9d71a 100644
fe2de317
MT
25698--- a/arch/x86/mm/mmio-mod.c
25699+++ b/arch/x86/mm/mmio-mod.c
4c928ab7 25700@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
15a11c5b
MT
25701 break;
25702 default:
25703 {
25704- unsigned char *ip = (unsigned char *)instptr;
25705+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25706 my_trace->opcode = MMIO_UNKNOWN_OP;
25707 my_trace->width = 0;
25708 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
4c928ab7 25709@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
8308f9c9
MT
25710 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25711 void __iomem *addr)
25712 {
25713- static atomic_t next_id;
25714+ static atomic_unchecked_t next_id;
25715 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25716 /* These are page-unaligned. */
25717 struct mmiotrace_map map = {
4c928ab7 25718@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
8308f9c9
MT
25719 .private = trace
25720 },
25721 .phys = offset,
25722- .id = atomic_inc_return(&next_id)
25723+ .id = atomic_inc_return_unchecked(&next_id)
25724 };
25725 map.map_id = trace->id;
25726
fe2de317
MT
25727diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25728index b008656..773eac2 100644
25729--- a/arch/x86/mm/pageattr-test.c
25730+++ b/arch/x86/mm/pageattr-test.c
25731@@ -36,7 +36,7 @@ enum {
25732
25733 static int pte_testbit(pte_t pte)
25734 {
25735- return pte_flags(pte) & _PAGE_UNUSED1;
25736+ return pte_flags(pte) & _PAGE_CPA_TEST;
25737 }
25738
25739 struct split_state {
25740diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
5e856224 25741index e1ebde3..b1e1db38 100644
fe2de317
MT
25742--- a/arch/x86/mm/pageattr.c
25743+++ b/arch/x86/mm/pageattr.c
25744@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
df50ba0c 25745 */
16454cff
MT
25746 #ifdef CONFIG_PCI_BIOS
25747 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
df50ba0c
MT
25748- pgprot_val(forbidden) |= _PAGE_NX;
25749+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
16454cff 25750 #endif
df50ba0c
MT
25751
25752 /*
fe2de317 25753@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
58c5fc13
MT
25754 * Does not cover __inittext since that is gone later on. On
25755 * 64bit we do not enforce !NX on the low mapping
25756 */
25757- if (within(address, (unsigned long)_text, (unsigned long)_etext))
df50ba0c 25758- pgprot_val(forbidden) |= _PAGE_NX;
58c5fc13 25759+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
df50ba0c 25760+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
58c5fc13
MT
25761
25762+#ifdef CONFIG_DEBUG_RODATA
25763 /*
25764 * The .rodata section needs to be read-only. Using the pfn
25765 * catches all aliases.
fe2de317 25766@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
58c5fc13
MT
25767 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25768 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25769 pgprot_val(forbidden) |= _PAGE_RW;
25770+#endif
25771
ae4e228f
MT
25772 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
25773 /*
fe2de317 25774@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
df50ba0c
MT
25775 }
25776 #endif
25777
25778+#ifdef CONFIG_PAX_KERNEXEC
25779+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25780+ pgprot_val(forbidden) |= _PAGE_RW;
25781+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25782+ }
25783+#endif
25784+
25785 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25786
25787 return prot;
16454cff 25788@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
58c5fc13
MT
25789 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25790 {
58c5fc13 25791 /* change init_mm */
ae4e228f 25792+ pax_open_kernel();
58c5fc13 25793 set_pte_atomic(kpte, pte);
58c5fc13
MT
25794+
25795 #ifdef CONFIG_X86_32
25796 if (!SHARED_KERNEL_PMD) {
df50ba0c
MT
25797+
25798+#ifdef CONFIG_PAX_PER_CPU_PGD
25799+ unsigned long cpu;
25800+#else
58c5fc13 25801 struct page *page;
df50ba0c
MT
25802+#endif
25803
25804+#ifdef CONFIG_PAX_PER_CPU_PGD
4c928ab7 25805+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
df50ba0c
MT
25806+ pgd_t *pgd = get_cpu_pgd(cpu);
25807+#else
25808 list_for_each_entry(page, &pgd_list, lru) {
25809- pgd_t *pgd;
25810+ pgd_t *pgd = (pgd_t *)page_address(page);
25811+#endif
25812+
25813 pud_t *pud;
25814 pmd_t *pmd;
25815
25816- pgd = (pgd_t *)page_address(page) + pgd_index(address);
25817+ pgd += pgd_index(address);
25818 pud = pud_offset(pgd, address);
25819 pmd = pmd_offset(pud, address);
25820 set_pte_atomic((pte_t *)pmd, pte);
25821 }
25822 }
25823 #endif
25824+ pax_close_kernel();
25825 }
25826
25827 static int
fe2de317
MT
25828diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25829index f6ff57b..481690f 100644
25830--- a/arch/x86/mm/pat.c
25831+++ b/arch/x86/mm/pat.c
57199397
MT
25832@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
25833
25834 if (!entry) {
58c5fc13
MT
25835 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25836- current->comm, current->pid, start, end);
25837+ current->comm, task_pid_nr(current), start, end);
57199397 25838 return -EINVAL;
58c5fc13
MT
25839 }
25840
fe2de317 25841@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
57199397
MT
25842 while (cursor < to) {
25843 if (!devmem_is_allowed(pfn)) {
25844 printk(KERN_INFO
25845- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25846- current->comm, from, to);
25847+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25848+ current->comm, from, to, cursor);
25849 return 0;
25850 }
25851 cursor += PAGE_SIZE;
fe2de317 25852@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
58c5fc13
MT
25853 printk(KERN_INFO
25854 "%s:%d ioremap_change_attr failed %s "
25855 "for %Lx-%Lx\n",
25856- current->comm, current->pid,
25857+ current->comm, task_pid_nr(current),
25858 cattr_name(flags),
25859 base, (unsigned long long)(base + size));
25860 return -EINVAL;
fe2de317 25861@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
57199397
MT
25862 if (want_flags != flags) {
25863 printk(KERN_WARNING
25864 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
25865- current->comm, current->pid,
25866+ current->comm, task_pid_nr(current),
25867 cattr_name(want_flags),
25868 (unsigned long long)paddr,
25869 (unsigned long long)(paddr + size),
fe2de317 25870@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
58c5fc13
MT
25871 free_memtype(paddr, paddr + size);
25872 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25873 " for %Lx-%Lx, got %s\n",
25874- current->comm, current->pid,
25875+ current->comm, task_pid_nr(current),
25876 cattr_name(want_flags),
25877 (unsigned long long)paddr,
25878 (unsigned long long)(paddr + size),
fe2de317
MT
25879diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25880index 9f0614d..92ae64a 100644
25881--- a/arch/x86/mm/pf_in.c
25882+++ b/arch/x86/mm/pf_in.c
25883@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
15a11c5b
MT
25884 int i;
25885 enum reason_type rv = OTHERS;
25886
25887- p = (unsigned char *)ins_addr;
25888+ p = (unsigned char *)ktla_ktva(ins_addr);
25889 p += skip_prefix(p, &prf);
25890 p += get_opcode(p, &opcode);
25891
fe2de317 25892@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
15a11c5b
MT
25893 struct prefix_bits prf;
25894 int i;
25895
25896- p = (unsigned char *)ins_addr;
25897+ p = (unsigned char *)ktla_ktva(ins_addr);
25898 p += skip_prefix(p, &prf);
25899 p += get_opcode(p, &opcode);
25900
fe2de317 25901@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
15a11c5b
MT
25902 struct prefix_bits prf;
25903 int i;
25904
25905- p = (unsigned char *)ins_addr;
25906+ p = (unsigned char *)ktla_ktva(ins_addr);
25907 p += skip_prefix(p, &prf);
25908 p += get_opcode(p, &opcode);
25909
fe2de317 25910@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
15a11c5b
MT
25911 struct prefix_bits prf;
25912 int i;
25913
25914- p = (unsigned char *)ins_addr;
25915+ p = (unsigned char *)ktla_ktva(ins_addr);
25916 p += skip_prefix(p, &prf);
25917 p += get_opcode(p, &opcode);
25918 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
fe2de317 25919@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
15a11c5b
MT
25920 struct prefix_bits prf;
25921 int i;
25922
25923- p = (unsigned char *)ins_addr;
25924+ p = (unsigned char *)ktla_ktva(ins_addr);
25925 p += skip_prefix(p, &prf);
25926 p += get_opcode(p, &opcode);
25927 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
fe2de317 25928diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
5e856224 25929index 8573b83..7d9628f 100644
fe2de317
MT
25930--- a/arch/x86/mm/pgtable.c
25931+++ b/arch/x86/mm/pgtable.c
5e856224 25932@@ -84,10 +84,60 @@ static inline void pgd_list_del(pgd_t *pgd)
df50ba0c
MT
25933 list_del(&page->lru);
25934 }
25935
25936-#define UNSHARED_PTRS_PER_PGD \
25937- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25938+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25939+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
bc901d79 25940
df50ba0c
MT
25941+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25942+{
25943+ while (count--)
bc901d79 25944+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
df50ba0c
MT
25945+}
25946+#endif
fe2de317 25947
df50ba0c
MT
25948+#ifdef CONFIG_PAX_PER_CPU_PGD
25949+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25950+{
5e856224
MT
25951+ while (count--) {
25952+ pgd_t pgd;
df50ba0c 25953+
5e856224
MT
25954+#ifdef CONFIG_X86_64
25955+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
df50ba0c 25956+#else
5e856224
MT
25957+ pgd = *src++;
25958+#endif
25959+
25960+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25961+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
df50ba0c 25962+#endif
fe2de317 25963+
5e856224
MT
25964+ *dst++ = pgd;
25965+ }
25966+
df50ba0c
MT
25967+}
25968+#endif
25969+
df50ba0c
MT
25970+#ifdef CONFIG_X86_64
25971+#define pxd_t pud_t
25972+#define pyd_t pgd_t
25973+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25974+#define pxd_free(mm, pud) pud_free((mm), (pud))
25975+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
4c928ab7 25976+#define pyd_offset(mm, address) pgd_offset((mm), (address))
df50ba0c
MT
25977+#define PYD_SIZE PGDIR_SIZE
25978+#else
25979+#define pxd_t pmd_t
25980+#define pyd_t pud_t
25981+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25982+#define pxd_free(mm, pud) pmd_free((mm), (pud))
25983+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
4c928ab7 25984+#define pyd_offset(mm, address) pud_offset((mm), (address))
df50ba0c
MT
25985+#define PYD_SIZE PUD_SIZE
25986+#endif
66a7e928
MT
25987+
25988+#ifdef CONFIG_PAX_PER_CPU_PGD
25989+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
25990+static inline void pgd_dtor(pgd_t *pgd) {}
df50ba0c 25991+#else
bc901d79 25992 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
df50ba0c 25993 {
66a7e928 25994 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
5e856224 25995@@ -128,6 +178,7 @@ static void pgd_dtor(pgd_t *pgd)
df50ba0c 25996 pgd_list_del(pgd);
16454cff 25997 spin_unlock(&pgd_lock);
df50ba0c
MT
25998 }
25999+#endif
26000
26001 /*
26002 * List of all pgd's needed for non-PAE so it can invalidate entries
5e856224 26003@@ -140,7 +191,7 @@ static void pgd_dtor(pgd_t *pgd)
df50ba0c
MT
26004 * -- wli
26005 */
26006
26007-#ifdef CONFIG_X86_PAE
26008+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26009 /*
26010 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
26011 * updating the top-level pagetable entries to guarantee the
5e856224 26012@@ -152,7 +203,7 @@ static void pgd_dtor(pgd_t *pgd)
df50ba0c
MT
26013 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
26014 * and initialize the kernel pmds here.
26015 */
26016-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
26017+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
26018
26019 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
26020 {
5e856224 26021@@ -170,36 +221,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
16454cff
MT
26022 */
26023 flush_tlb_mm(mm);
df50ba0c
MT
26024 }
26025+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
26026+#define PREALLOCATED_PXDS USER_PGD_PTRS
26027 #else /* !CONFIG_X86_PAE */
26028
26029 /* No need to prepopulate any pagetable entries in non-PAE modes. */
26030-#define PREALLOCATED_PMDS 0
26031+#define PREALLOCATED_PXDS 0
26032
26033 #endif /* CONFIG_X86_PAE */
26034
26035-static void free_pmds(pmd_t *pmds[])
26036+static void free_pxds(pxd_t *pxds[])
26037 {
26038 int i;
26039
26040- for(i = 0; i < PREALLOCATED_PMDS; i++)
26041- if (pmds[i])
26042- free_page((unsigned long)pmds[i]);
26043+ for(i = 0; i < PREALLOCATED_PXDS; i++)
26044+ if (pxds[i])
26045+ free_page((unsigned long)pxds[i]);
26046 }
26047
26048-static int preallocate_pmds(pmd_t *pmds[])
26049+static int preallocate_pxds(pxd_t *pxds[])
26050 {
26051 int i;
26052 bool failed = false;
26053
26054- for(i = 0; i < PREALLOCATED_PMDS; i++) {
26055- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
26056- if (pmd == NULL)
26057+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
26058+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
26059+ if (pxd == NULL)
26060 failed = true;
26061- pmds[i] = pmd;
26062+ pxds[i] = pxd;
26063 }
26064
26065 if (failed) {
26066- free_pmds(pmds);
26067+ free_pxds(pxds);
26068 return -ENOMEM;
26069 }
26070
5e856224 26071@@ -212,51 +265,55 @@ static int preallocate_pmds(pmd_t *pmds[])
df50ba0c
MT
26072 * preallocate which never got a corresponding vma will need to be
26073 * freed manually.
26074 */
26075-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
26076+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
26077 {
26078 int i;
26079
26080- for(i = 0; i < PREALLOCATED_PMDS; i++) {
26081+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
26082 pgd_t pgd = pgdp[i];
26083
26084 if (pgd_val(pgd) != 0) {
26085- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
26086+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
26087
26088- pgdp[i] = native_make_pgd(0);
26089+ set_pgd(pgdp + i, native_make_pgd(0));
26090
26091- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
26092- pmd_free(mm, pmd);
26093+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
26094+ pxd_free(mm, pxd);
26095 }
26096 }
26097 }
26098
26099-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
26100+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
26101 {
26102- pud_t *pud;
26103+ pyd_t *pyd;
26104 unsigned long addr;
26105 int i;
26106
26107- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
26108+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
26109 return;
26110
26111- pud = pud_offset(pgd, 0);
26112+#ifdef CONFIG_X86_64
26113+ pyd = pyd_offset(mm, 0L);
26114+#else
26115+ pyd = pyd_offset(pgd, 0L);
26116+#endif
26117
26118- for (addr = i = 0; i < PREALLOCATED_PMDS;
26119- i++, pud++, addr += PUD_SIZE) {
26120- pmd_t *pmd = pmds[i];
26121+ for (addr = i = 0; i < PREALLOCATED_PXDS;
26122+ i++, pyd++, addr += PYD_SIZE) {
26123+ pxd_t *pxd = pxds[i];
26124
26125 if (i >= KERNEL_PGD_BOUNDARY)
26126- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
26127- sizeof(pmd_t) * PTRS_PER_PMD);
26128+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
26129+ sizeof(pxd_t) * PTRS_PER_PMD);
26130
26131- pud_populate(mm, pud, pmd);
26132+ pyd_populate(mm, pyd, pxd);
26133 }
26134 }
26135
26136 pgd_t *pgd_alloc(struct mm_struct *mm)
26137 {
26138 pgd_t *pgd;
26139- pmd_t *pmds[PREALLOCATED_PMDS];
26140+ pxd_t *pxds[PREALLOCATED_PXDS];
df50ba0c
MT
26141
26142 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
16454cff 26143
5e856224 26144@@ -265,11 +322,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
df50ba0c
MT
26145
26146 mm->pgd = pgd;
26147
26148- if (preallocate_pmds(pmds) != 0)
26149+ if (preallocate_pxds(pxds) != 0)
26150 goto out_free_pgd;
26151
26152 if (paravirt_pgd_alloc(mm) != 0)
26153- goto out_free_pmds;
26154+ goto out_free_pxds;
26155
26156 /*
26157 * Make sure that pre-populating the pmds is atomic with
5e856224 26158@@ -279,14 +336,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
16454cff 26159 spin_lock(&pgd_lock);
df50ba0c 26160
bc901d79 26161 pgd_ctor(mm, pgd);
df50ba0c
MT
26162- pgd_prepopulate_pmd(mm, pgd, pmds);
26163+ pgd_prepopulate_pxd(mm, pgd, pxds);
26164
16454cff 26165 spin_unlock(&pgd_lock);
df50ba0c
MT
26166
26167 return pgd;
26168
26169-out_free_pmds:
26170- free_pmds(pmds);
26171+out_free_pxds:
26172+ free_pxds(pxds);
26173 out_free_pgd:
26174 free_page((unsigned long)pgd);
26175 out:
5e856224 26176@@ -295,7 +352,7 @@ out:
df50ba0c
MT
26177
26178 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
26179 {
26180- pgd_mop_up_pmds(mm, pgd);
26181+ pgd_mop_up_pxds(mm, pgd);
26182 pgd_dtor(pgd);
26183 paravirt_pgd_free(mm, pgd);
26184 free_page((unsigned long)pgd);
fe2de317
MT
26185diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
26186index cac7184..09a39fa 100644
26187--- a/arch/x86/mm/pgtable_32.c
26188+++ b/arch/x86/mm/pgtable_32.c
26189@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
26190 return;
26191 }
26192 pte = pte_offset_kernel(pmd, vaddr);
26193+
26194+ pax_open_kernel();
26195 if (pte_val(pteval))
26196 set_pte_at(&init_mm, vaddr, pte, pteval);
26197 else
26198 pte_clear(&init_mm, vaddr, pte);
26199+ pax_close_kernel();
26200
26201 /*
26202 * It's enough to flush this one mapping.
26203diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
26204index 410531d..0f16030 100644
26205--- a/arch/x86/mm/setup_nx.c
26206+++ b/arch/x86/mm/setup_nx.c
efbe55a5 26207@@ -5,8 +5,10 @@
df50ba0c
MT
26208 #include <asm/pgtable.h>
26209 #include <asm/proto.h>
26210
26211+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26212 static int disable_nx __cpuinitdata;
df50ba0c 26213
efbe55a5 26214+#ifndef CONFIG_PAX_PAGEEXEC
df50ba0c
MT
26215 /*
26216 * noexec = on|off
26217 *
fe2de317 26218@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
df50ba0c
MT
26219 return 0;
26220 }
26221 early_param("noexec", noexec_setup);
efbe55a5
MT
26222+#endif
26223+
df50ba0c
MT
26224+#endif
26225
26226 void __cpuinit x86_configure_nx(void)
26227 {
26228+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26229 if (cpu_has_nx && !disable_nx)
26230 __supported_pte_mask |= _PAGE_NX;
26231 else
26232+#endif
26233 __supported_pte_mask &= ~_PAGE_NX;
26234 }
26235
fe2de317
MT
26236diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
26237index d6c0418..06a0ad5 100644
26238--- a/arch/x86/mm/tlb.c
26239+++ b/arch/x86/mm/tlb.c
bc901d79 26240@@ -65,7 +65,11 @@ void leave_mm(int cpu)
df50ba0c
MT
26241 BUG();
26242 cpumask_clear_cpu(cpu,
26243 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
26244+
26245+#ifndef CONFIG_PAX_PER_CPU_PGD
26246 load_cr3(swapper_pg_dir);
26247+#endif
26248+
26249 }
26250 EXPORT_SYMBOL_GPL(leave_mm);
26251
fe2de317
MT
26252diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
26253index 6687022..ceabcfa 100644
26254--- a/arch/x86/net/bpf_jit.S
26255+++ b/arch/x86/net/bpf_jit.S
6e9df6a3
MT
26256@@ -9,6 +9,7 @@
26257 */
26258 #include <linux/linkage.h>
26259 #include <asm/dwarf2.h>
26260+#include <asm/alternative-asm.h>
26261
26262 /*
26263 * Calling convention :
26264@@ -35,6 +36,7 @@ sk_load_word:
26265 jle bpf_slow_path_word
26266 mov (SKBDATA,%rsi),%eax
26267 bswap %eax /* ntohl() */
26268+ pax_force_retaddr
26269 ret
26270
26271
26272@@ -53,6 +55,7 @@ sk_load_half:
26273 jle bpf_slow_path_half
26274 movzwl (SKBDATA,%rsi),%eax
26275 rol $8,%ax # ntohs()
26276+ pax_force_retaddr
26277 ret
26278
26279 sk_load_byte_ind:
26280@@ -66,6 +69,7 @@ sk_load_byte:
26281 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
26282 jle bpf_slow_path_byte
26283 movzbl (SKBDATA,%rsi),%eax
26284+ pax_force_retaddr
26285 ret
26286
26287 /**
26288@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
26289 movzbl (SKBDATA,%rsi),%ebx
26290 and $15,%bl
26291 shl $2,%bl
26292+ pax_force_retaddr
26293 ret
26294 CFI_ENDPROC
26295 ENDPROC(sk_load_byte_msh)
26296@@ -91,6 +96,7 @@ bpf_error:
26297 xor %eax,%eax
26298 mov -8(%rbp),%rbx
26299 leaveq
26300+ pax_force_retaddr
26301 ret
26302
26303 /* rsi contains offset and can be scratched */
26304@@ -113,6 +119,7 @@ bpf_slow_path_word:
26305 js bpf_error
26306 mov -12(%rbp),%eax
26307 bswap %eax
26308+ pax_force_retaddr
26309 ret
26310
26311 bpf_slow_path_half:
26312@@ -121,12 +128,14 @@ bpf_slow_path_half:
26313 mov -12(%rbp),%ax
26314 rol $8,%ax
26315 movzwl %ax,%eax
26316+ pax_force_retaddr
26317 ret
26318
26319 bpf_slow_path_byte:
26320 bpf_slow_path_common(1)
26321 js bpf_error
26322 movzbl -12(%rbp),%eax
26323+ pax_force_retaddr
26324 ret
26325
26326 bpf_slow_path_byte_msh:
26327@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
26328 and $15,%al
26329 shl $2,%al
26330 xchg %eax,%ebx
26331+ pax_force_retaddr
26332 ret
fe2de317 26333diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
5e856224 26334index 5a5b6e4..201d42e 100644
fe2de317
MT
26335--- a/arch/x86/net/bpf_jit_comp.c
26336+++ b/arch/x86/net/bpf_jit_comp.c
26337@@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
26338 set_fs(old_fs);
26339 }
26340
26341+struct bpf_jit_work {
26342+ struct work_struct work;
26343+ void *image;
26344+};
26345
26346 void bpf_jit_compile(struct sk_filter *fp)
26347 {
26348@@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
26349 if (addrs == NULL)
26350 return;
26351
26352+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
26353+ if (!fp->work)
26354+ goto out;
26355+
26356 /* Before first pass, make a rough estimation of addrs[]
26357 * each bpf instruction is translated to less than 64 bytes
26358 */
5e856224 26359@@ -477,7 +485,7 @@ void bpf_jit_compile(struct sk_filter *fp)
4c928ab7 26360 common_load: seen |= SEEN_DATAREF;
5e856224
MT
26361 if ((int)K < 0) {
26362 /* Abort the JIT because __load_pointer() is needed. */
4c928ab7
MT
26363- goto out;
26364+ goto error;
5e856224 26365 }
4c928ab7
MT
26366 t_offset = func - (image + addrs[i]);
26367 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
5e856224
MT
26368@@ -492,7 +500,7 @@ common_load: seen |= SEEN_DATAREF;
26369 case BPF_S_LDX_B_MSH:
26370 if ((int)K < 0) {
26371 /* Abort the JIT because __load_pointer() is needed. */
26372- goto out;
26373+ goto error;
26374 }
26375 seen |= SEEN_DATAREF | SEEN_XREG;
26376 t_offset = sk_load_byte_msh - (image + addrs[i]);
26377@@ -582,17 +590,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
4c928ab7
MT
26378 break;
26379 default:
26380 /* hmm, too complex filter, give up with jit compiler */
26381- goto out;
26382+ goto error;
26383 }
26384 ilen = prog - temp;
fe2de317
MT
26385 if (image) {
26386 if (unlikely(proglen + ilen > oldproglen)) {
26387 pr_err("bpb_jit_compile fatal error\n");
26388- kfree(addrs);
26389- module_free(NULL, image);
26390- return;
26391+ module_free_exec(NULL, image);
4c928ab7 26392+ goto error;
fe2de317
MT
26393 }
26394+ pax_open_kernel();
26395 memcpy(image + proglen, temp, ilen);
26396+ pax_close_kernel();
26397 }
26398 proglen += ilen;
26399 addrs[i] = proglen;
5e856224 26400@@ -613,11 +622,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
fe2de317
MT
26401 break;
26402 }
26403 if (proglen == oldproglen) {
26404- image = module_alloc(max_t(unsigned int,
4c928ab7
MT
26405- proglen,
26406- sizeof(struct work_struct)));
26407+ image = module_alloc_exec(proglen);
fe2de317 26408 if (!image)
4c928ab7
MT
26409- goto out;
26410+ goto error;
26411 }
26412 oldproglen = proglen;
fe2de317 26413 }
5e856224 26414@@ -633,7 +640,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
4c928ab7
MT
26415 bpf_flush_icache(image, image + proglen);
26416
26417 fp->bpf_func = (void *)image;
26418- }
26419+ } else
26420+error:
26421+ kfree(fp->work);
26422+
fe2de317 26423 out:
fe2de317
MT
26424 kfree(addrs);
26425 return;
5e856224 26426@@ -641,18 +651,20 @@ out:
fe2de317
MT
26427
26428 static void jit_free_defer(struct work_struct *arg)
26429 {
26430- module_free(NULL, arg);
4c928ab7 26431+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
fe2de317
MT
26432+ kfree(arg);
26433 }
26434
26435 /* run from softirq, we must use a work_struct to call
26436- * module_free() from process context
26437+ * module_free_exec() from process context
26438 */
26439 void bpf_jit_free(struct sk_filter *fp)
26440 {
26441 if (fp->bpf_func != sk_run_filter) {
26442- struct work_struct *work = (struct work_struct *)fp->bpf_func;
26443+ struct work_struct *work = &fp->work->work;
26444
26445 INIT_WORK(work, jit_free_defer);
26446+ fp->work->image = fp->bpf_func;
26447 schedule_work(work);
26448 }
26449 }
26450diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26451index bff89df..377758a 100644
26452--- a/arch/x86/oprofile/backtrace.c
26453+++ b/arch/x86/oprofile/backtrace.c
26454@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
6e9df6a3
MT
26455 struct stack_frame_ia32 *fp;
26456 unsigned long bytes;
26457
26458- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26459+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26460 if (bytes != sizeof(bufhead))
26461 return NULL;
26462
26463- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
26464+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
26465
26466 oprofile_add_trace(bufhead[0].return_address);
26467
fe2de317 26468@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
6e9df6a3
MT
26469 struct stack_frame bufhead[2];
26470 unsigned long bytes;
26471
26472- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26473+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26474 if (bytes != sizeof(bufhead))
26475 return NULL;
26476
fe2de317 26477@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
58c5fc13 26478 {
bc901d79 26479 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
58c5fc13
MT
26480
26481- if (!user_mode_vm(regs)) {
26482+ if (!user_mode(regs)) {
26483 unsigned long stack = kernel_stack_pointer(regs);
26484 if (depth)
66a7e928 26485 dump_trace(NULL, regs, (unsigned long *)stack, 0,
fe2de317
MT
26486diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
26487index cb29191..036766d 100644
26488--- a/arch/x86/pci/mrst.c
26489+++ b/arch/x86/pci/mrst.c
15a11c5b
MT
26490@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
26491 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
26492 pci_mmcfg_late_init();
26493 pcibios_enable_irq = mrst_pci_irq_enable;
26494- pci_root_ops = pci_mrst_ops;
26495+ pax_open_kernel();
26496+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
26497+ pax_close_kernel();
26498 /* Continue with standard init */
26499 return 1;
ae4e228f 26500 }
fe2de317 26501diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
5e856224 26502index da8fe05..7ee6704 100644
fe2de317
MT
26503--- a/arch/x86/pci/pcbios.c
26504+++ b/arch/x86/pci/pcbios.c
16454cff 26505@@ -79,50 +79,93 @@ union bios32 {
58c5fc13
MT
26506 static struct {
26507 unsigned long address;
26508 unsigned short segment;
26509-} bios32_indirect = { 0, __KERNEL_CS };
26510+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26511
26512 /*
26513 * Returns the entry point for the given service, NULL on error
26514 */
26515
26516-static unsigned long bios32_service(unsigned long service)
26517+static unsigned long __devinit bios32_service(unsigned long service)
26518 {
26519 unsigned char return_code; /* %al */
26520 unsigned long address; /* %ebx */
26521 unsigned long length; /* %ecx */
26522 unsigned long entry; /* %edx */
26523 unsigned long flags;
26524+ struct desc_struct d, *gdt;
58c5fc13
MT
26525
26526 local_irq_save(flags);
26527- __asm__("lcall *(%%edi); cld"
26528+
26529+ gdt = get_cpu_gdt_table(smp_processor_id());
26530+
58c5fc13
MT
26531+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26532+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26533+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26534+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26535+
58c5fc13
MT
26536+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26537 : "=a" (return_code),
26538 "=b" (address),
26539 "=c" (length),
26540 "=d" (entry)
26541 : "0" (service),
26542 "1" (0),
26543- "D" (&bios32_indirect));
26544+ "D" (&bios32_indirect),
26545+ "r"(__PCIBIOS_DS)
26546+ : "memory");
26547+
ae4e228f 26548+ pax_open_kernel();
58c5fc13
MT
26549+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26550+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26551+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26552+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
ae4e228f 26553+ pax_close_kernel();
58c5fc13
MT
26554+
26555 local_irq_restore(flags);
26556
26557 switch (return_code) {
26558- case 0:
26559- return address + entry;
26560- case 0x80: /* Not present */
26561- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26562- return 0;
26563- default: /* Shouldn't happen */
26564- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26565- service, return_code);
26566+ case 0: {
26567+ int cpu;
26568+ unsigned char flags;
26569+
26570+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26571+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26572+ printk(KERN_WARNING "bios32_service: not valid\n");
26573 return 0;
26574+ }
26575+ address = address + PAGE_OFFSET;
26576+ length += 16UL; /* some BIOSs underreport this... */
26577+ flags = 4;
26578+ if (length >= 64*1024*1024) {
26579+ length >>= PAGE_SHIFT;
26580+ flags |= 8;
26581+ }
26582+
4c928ab7 26583+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
58c5fc13
MT
26584+ gdt = get_cpu_gdt_table(cpu);
26585+ pack_descriptor(&d, address, length, 0x9b, flags);
26586+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26587+ pack_descriptor(&d, address, length, 0x93, flags);
26588+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26589+ }
58c5fc13
MT
26590+ return entry;
26591+ }
26592+ case 0x80: /* Not present */
26593+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26594+ return 0;
26595+ default: /* Shouldn't happen */
26596+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26597+ service, return_code);
26598+ return 0;
26599 }
26600 }
26601
26602 static struct {
26603 unsigned long address;
26604 unsigned short segment;
26605-} pci_indirect = { 0, __KERNEL_CS };
26606+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26607
26608-static int pci_bios_present;
26609+static int pci_bios_present __read_only;
26610
26611 static int __devinit check_pcibios(void)
26612 {
16454cff 26613@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
58c5fc13
MT
26614 unsigned long flags, pcibios_entry;
26615
26616 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26617- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26618+ pci_indirect.address = pcibios_entry;
26619
26620 local_irq_save(flags);
26621- __asm__(
26622- "lcall *(%%edi); cld\n\t"
26623+ __asm__("movw %w6, %%ds\n\t"
26624+ "lcall *%%ss:(%%edi); cld\n\t"
26625+ "push %%ss\n\t"
26626+ "pop %%ds\n\t"
26627 "jc 1f\n\t"
26628 "xor %%ah, %%ah\n"
26629 "1:"
16454cff 26630@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
58c5fc13
MT
26631 "=b" (ebx),
26632 "=c" (ecx)
26633 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26634- "D" (&pci_indirect)
26635+ "D" (&pci_indirect),
26636+ "r" (__PCIBIOS_DS)
26637 : "memory");
26638 local_irq_restore(flags);
26639
fe2de317 26640@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
58c5fc13
MT
26641
26642 switch (len) {
26643 case 1:
26644- __asm__("lcall *(%%esi); cld\n\t"
26645+ __asm__("movw %w6, %%ds\n\t"
26646+ "lcall *%%ss:(%%esi); cld\n\t"
26647+ "push %%ss\n\t"
26648+ "pop %%ds\n\t"
26649 "jc 1f\n\t"
26650 "xor %%ah, %%ah\n"
26651 "1:"
fe2de317 26652@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
58c5fc13
MT
26653 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26654 "b" (bx),
26655 "D" ((long)reg),
26656- "S" (&pci_indirect));
26657+ "S" (&pci_indirect),
26658+ "r" (__PCIBIOS_DS));
26659 /*
26660 * Zero-extend the result beyond 8 bits, do not trust the
26661 * BIOS having done it:
fe2de317 26662@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
58c5fc13
MT
26663 *value &= 0xff;
26664 break;
26665 case 2:
26666- __asm__("lcall *(%%esi); cld\n\t"
26667+ __asm__("movw %w6, %%ds\n\t"
26668+ "lcall *%%ss:(%%esi); cld\n\t"
26669+ "push %%ss\n\t"
26670+ "pop %%ds\n\t"
26671 "jc 1f\n\t"
26672 "xor %%ah, %%ah\n"
26673 "1:"
fe2de317 26674@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
58c5fc13
MT
26675 : "1" (PCIBIOS_READ_CONFIG_WORD),
26676 "b" (bx),
26677 "D" ((long)reg),
26678- "S" (&pci_indirect));
26679+ "S" (&pci_indirect),
26680+ "r" (__PCIBIOS_DS));
26681 /*
26682 * Zero-extend the result beyond 16 bits, do not trust the
26683 * BIOS having done it:
fe2de317 26684@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
58c5fc13
MT
26685 *value &= 0xffff;
26686 break;
26687 case 4:
26688- __asm__("lcall *(%%esi); cld\n\t"
26689+ __asm__("movw %w6, %%ds\n\t"
26690+ "lcall *%%ss:(%%esi); cld\n\t"
26691+ "push %%ss\n\t"
26692+ "pop %%ds\n\t"
26693 "jc 1f\n\t"
26694 "xor %%ah, %%ah\n"
26695 "1:"
fe2de317 26696@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
58c5fc13
MT
26697 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26698 "b" (bx),
26699 "D" ((long)reg),
26700- "S" (&pci_indirect));
26701+ "S" (&pci_indirect),
26702+ "r" (__PCIBIOS_DS));
26703 break;
26704 }
26705
fe2de317 26706@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
58c5fc13
MT
26707
26708 switch (len) {
26709 case 1:
26710- __asm__("lcall *(%%esi); cld\n\t"
26711+ __asm__("movw %w6, %%ds\n\t"
26712+ "lcall *%%ss:(%%esi); cld\n\t"
26713+ "push %%ss\n\t"
26714+ "pop %%ds\n\t"
26715 "jc 1f\n\t"
26716 "xor %%ah, %%ah\n"
26717 "1:"
fe2de317 26718@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
58c5fc13
MT
26719 "c" (value),
26720 "b" (bx),
26721 "D" ((long)reg),
26722- "S" (&pci_indirect));
26723+ "S" (&pci_indirect),
26724+ "r" (__PCIBIOS_DS));
26725 break;
26726 case 2:
26727- __asm__("lcall *(%%esi); cld\n\t"
26728+ __asm__("movw %w6, %%ds\n\t"
26729+ "lcall *%%ss:(%%esi); cld\n\t"
26730+ "push %%ss\n\t"
26731+ "pop %%ds\n\t"
26732 "jc 1f\n\t"
26733 "xor %%ah, %%ah\n"
26734 "1:"
fe2de317 26735@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
58c5fc13
MT
26736 "c" (value),
26737 "b" (bx),
26738 "D" ((long)reg),
26739- "S" (&pci_indirect));
26740+ "S" (&pci_indirect),
26741+ "r" (__PCIBIOS_DS));
26742 break;
26743 case 4:
26744- __asm__("lcall *(%%esi); cld\n\t"
26745+ __asm__("movw %w6, %%ds\n\t"
26746+ "lcall *%%ss:(%%esi); cld\n\t"
26747+ "push %%ss\n\t"
26748+ "pop %%ds\n\t"
26749 "jc 1f\n\t"
26750 "xor %%ah, %%ah\n"
26751 "1:"
fe2de317 26752@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
58c5fc13
MT
26753 "c" (value),
26754 "b" (bx),
26755 "D" ((long)reg),
26756- "S" (&pci_indirect));
26757+ "S" (&pci_indirect),
26758+ "r" (__PCIBIOS_DS));
26759 break;
26760 }
26761
fe2de317 26762@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
58c5fc13
MT
26763
26764 DBG("PCI: Fetching IRQ routing table... ");
26765 __asm__("push %%es\n\t"
26766+ "movw %w8, %%ds\n\t"
26767 "push %%ds\n\t"
26768 "pop %%es\n\t"
26769- "lcall *(%%esi); cld\n\t"
26770+ "lcall *%%ss:(%%esi); cld\n\t"
26771 "pop %%es\n\t"
26772+ "push %%ss\n\t"
26773+ "pop %%ds\n"
26774 "jc 1f\n\t"
26775 "xor %%ah, %%ah\n"
26776 "1:"
fe2de317 26777@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
58c5fc13
MT
26778 "1" (0),
26779 "D" ((long) &opt),
26780 "S" (&pci_indirect),
26781- "m" (opt)
26782+ "m" (opt),
26783+ "r" (__PCIBIOS_DS)
26784 : "memory");
26785 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26786 if (ret & 0xff00)
fe2de317 26787@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
58c5fc13
MT
26788 {
26789 int ret;
26790
26791- __asm__("lcall *(%%esi); cld\n\t"
26792+ __asm__("movw %w5, %%ds\n\t"
26793+ "lcall *%%ss:(%%esi); cld\n\t"
26794+ "push %%ss\n\t"
26795+ "pop %%ds\n"
26796 "jc 1f\n\t"
26797 "xor %%ah, %%ah\n"
26798 "1:"
fe2de317 26799@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
58c5fc13
MT
26800 : "0" (PCIBIOS_SET_PCI_HW_INT),
26801 "b" ((dev->bus->number << 8) | dev->devfn),
26802 "c" ((irq << 8) | (pin + 10)),
26803- "S" (&pci_indirect));
26804+ "S" (&pci_indirect),
26805+ "r" (__PCIBIOS_DS));
26806 return !(ret & 0xff00);
26807 }
26808 EXPORT_SYMBOL(pcibios_set_irq_routing);
fe2de317 26809diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
4c928ab7 26810index 40e4469..1ab536e 100644
fe2de317
MT
26811--- a/arch/x86/platform/efi/efi_32.c
26812+++ b/arch/x86/platform/efi/efi_32.c
4c928ab7 26813@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
bc901d79 26814 {
bc901d79
MT
26815 struct desc_ptr gdt_descr;
26816
15a11c5b
MT
26817+#ifdef CONFIG_PAX_KERNEXEC
26818+ struct desc_struct d;
26819+#endif
fe2de317
MT
26820+
26821 local_irq_save(efi_rt_eflags);
bc901d79 26822
4c928ab7 26823 load_cr3(initial_page_table);
bc901d79
MT
26824 __flush_tlb_all();
26825
15a11c5b
MT
26826+#ifdef CONFIG_PAX_KERNEXEC
26827+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
6e9df6a3 26828+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
15a11c5b 26829+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
6e9df6a3 26830+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
15a11c5b
MT
26831+#endif
26832+
26833 gdt_descr.address = __pa(get_cpu_gdt_table(0));
bc901d79
MT
26834 gdt_descr.size = GDT_SIZE - 1;
26835 load_gdt(&gdt_descr);
4c928ab7 26836@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
bc901d79 26837 {
bc901d79
MT
26838 struct desc_ptr gdt_descr;
26839
15a11c5b
MT
26840+#ifdef CONFIG_PAX_KERNEXEC
26841+ struct desc_struct d;
26842+
26843+ memset(&d, 0, sizeof d);
6e9df6a3
MT
26844+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26845+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
15a11c5b
MT
26846+#endif
26847+
26848 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
bc901d79
MT
26849 gdt_descr.size = GDT_SIZE - 1;
26850 load_gdt(&gdt_descr);
fe2de317
MT
26851diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
26852index fbe66e6..c5c0dd2 100644
26853--- a/arch/x86/platform/efi/efi_stub_32.S
26854+++ b/arch/x86/platform/efi/efi_stub_32.S
15a11c5b 26855@@ -6,7 +6,9 @@
bc901d79
MT
26856 */
26857
26858 #include <linux/linkage.h>
26859+#include <linux/init.h>
26860 #include <asm/page_types.h>
15a11c5b 26861+#include <asm/segment.h>
bc901d79
MT
26862
26863 /*
15a11c5b
MT
26864 * efi_call_phys(void *, ...) is a function with variable parameters.
26865@@ -20,7 +22,7 @@
bc901d79
MT
26866 * service functions will comply with gcc calling convention, too.
26867 */
26868
26869-.text
26870+__INIT
26871 ENTRY(efi_call_phys)
26872 /*
26873 * 0. The function can only be called in Linux kernel. So CS has been
15a11c5b 26874@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
bc901d79
MT
26875 * The mapping of lower virtual memory has been created in prelog and
26876 * epilog.
26877 */
26878- movl $1f, %edx
26879- subl $__PAGE_OFFSET, %edx
26880- jmp *%edx
15a11c5b
MT
26881+ movl $(__KERNEXEC_EFI_DS), %edx
26882+ mov %edx, %ds
26883+ mov %edx, %es
26884+ mov %edx, %ss
26885+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
bc901d79
MT
26886 1:
26887
26888 /*
15a11c5b 26889@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
bc901d79
MT
26890 * parameter 2, ..., param n. To make things easy, we save the return
26891 * address of efi_call_phys in a global variable.
26892 */
26893- popl %edx
26894- movl %edx, saved_return_addr
26895- /* get the function pointer into ECX*/
26896- popl %ecx
26897- movl %ecx, efi_rt_function_ptr
26898- movl $2f, %edx
26899- subl $__PAGE_OFFSET, %edx
26900- pushl %edx
26901+ popl (saved_return_addr)
26902+ popl (efi_rt_function_ptr)
26903
26904 /*
26905 * 3. Clear PG bit in %CR0.
15a11c5b 26906@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
bc901d79
MT
26907 /*
26908 * 5. Call the physical function.
26909 */
26910- jmp *%ecx
26911+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
26912
26913-2:
26914 /*
26915 * 6. After EFI runtime service returns, control will return to
26916 * following instruction. We'd better readjust stack pointer first.
15a11c5b 26917@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
bc901d79
MT
26918 movl %cr0, %edx
26919 orl $0x80000000, %edx
26920 movl %edx, %cr0
26921- jmp 1f
26922-1:
26923+
26924 /*
26925 * 8. Now restore the virtual mode from flat mode by
26926 * adding EIP with PAGE_OFFSET.
26927 */
26928- movl $1f, %edx
26929- jmp *%edx
15a11c5b 26930+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
bc901d79 26931 1:
15a11c5b
MT
26932+ movl $(__KERNEL_DS), %edx
26933+ mov %edx, %ds
26934+ mov %edx, %es
26935+ mov %edx, %ss
bc901d79
MT
26936
26937 /*
26938 * 9. Balance the stack. And because EAX contain the return value,
26939 * we'd better not clobber it.
26940 */
26941- leal efi_rt_function_ptr, %edx
26942- movl (%edx), %ecx
26943- pushl %ecx
26944+ pushl (efi_rt_function_ptr)
26945
26946 /*
26947- * 10. Push the saved return address onto the stack and return.
26948+ * 10. Return to the saved return address.
26949 */
26950- leal saved_return_addr, %edx
26951- movl (%edx), %ecx
26952- pushl %ecx
26953- ret
26954+ jmpl *(saved_return_addr)
26955 ENDPROC(efi_call_phys)
26956 .previous
26957
26958-.data
26959+__INITDATA
26960 saved_return_addr:
26961 .long 0
26962 efi_rt_function_ptr:
fe2de317
MT
26963diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
26964index 4c07cca..2c8427d 100644
26965--- a/arch/x86/platform/efi/efi_stub_64.S
26966+++ b/arch/x86/platform/efi/efi_stub_64.S
6e9df6a3
MT
26967@@ -7,6 +7,7 @@
26968 */
26969
26970 #include <linux/linkage.h>
26971+#include <asm/alternative-asm.h>
26972
26973 #define SAVE_XMM \
26974 mov %rsp, %rax; \
26975@@ -40,6 +41,7 @@ ENTRY(efi_call0)
15a11c5b
MT
26976 call *%rdi
26977 addq $32, %rsp
26978 RESTORE_XMM
fe2de317 26979+ pax_force_retaddr 0, 1
15a11c5b
MT
26980 ret
26981 ENDPROC(efi_call0)
26982
6e9df6a3 26983@@ -50,6 +52,7 @@ ENTRY(efi_call1)
15a11c5b
MT
26984 call *%rdi
26985 addq $32, %rsp
26986 RESTORE_XMM
fe2de317 26987+ pax_force_retaddr 0, 1
15a11c5b
MT
26988 ret
26989 ENDPROC(efi_call1)
26990
6e9df6a3 26991@@ -60,6 +63,7 @@ ENTRY(efi_call2)
15a11c5b
MT
26992 call *%rdi
26993 addq $32, %rsp
26994 RESTORE_XMM
fe2de317 26995+ pax_force_retaddr 0, 1
15a11c5b
MT
26996 ret
26997 ENDPROC(efi_call2)
26998
6e9df6a3 26999@@ -71,6 +75,7 @@ ENTRY(efi_call3)
15a11c5b
MT
27000 call *%rdi
27001 addq $32, %rsp
27002 RESTORE_XMM
fe2de317 27003+ pax_force_retaddr 0, 1
15a11c5b
MT
27004 ret
27005 ENDPROC(efi_call3)
27006
6e9df6a3 27007@@ -83,6 +88,7 @@ ENTRY(efi_call4)
15a11c5b
MT
27008 call *%rdi
27009 addq $32, %rsp
27010 RESTORE_XMM
fe2de317 27011+ pax_force_retaddr 0, 1
15a11c5b
MT
27012 ret
27013 ENDPROC(efi_call4)
27014
6e9df6a3 27015@@ -96,6 +102,7 @@ ENTRY(efi_call5)
15a11c5b
MT
27016 call *%rdi
27017 addq $48, %rsp
27018 RESTORE_XMM
fe2de317 27019+ pax_force_retaddr 0, 1
15a11c5b
MT
27020 ret
27021 ENDPROC(efi_call5)
27022
6e9df6a3 27023@@ -112,5 +119,6 @@ ENTRY(efi_call6)
15a11c5b
MT
27024 call *%rdi
27025 addq $48, %rsp
27026 RESTORE_XMM
fe2de317 27027+ pax_force_retaddr 0, 1
15a11c5b
MT
27028 ret
27029 ENDPROC(efi_call6)
fe2de317 27030diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
5e856224 27031index 475e2cd..1b8e708 100644
fe2de317
MT
27032--- a/arch/x86/platform/mrst/mrst.c
27033+++ b/arch/x86/platform/mrst/mrst.c
4c928ab7
MT
27034@@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
27035 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
27036 int sfi_mrtc_num;
15a11c5b 27037
15a11c5b
MT
27038-static void mrst_power_off(void)
27039+static __noreturn void mrst_power_off(void)
27040 {
4c928ab7
MT
27041 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
27042 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
15a11c5b
MT
27043+ BUG();
27044 }
27045
27046-static void mrst_reboot(void)
27047+static __noreturn void mrst_reboot(void)
27048 {
4c928ab7
MT
27049 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
27050 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
27051 else
27052 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
15a11c5b 27053+ BUG();
66a7e928
MT
27054 }
27055
4c928ab7 27056 /* parse all the mtimer info to a static mtimer array */
fe2de317 27057diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
5e856224 27058index 3ae0e61..4202d86 100644
fe2de317
MT
27059--- a/arch/x86/platform/uv/tlb_uv.c
27060+++ b/arch/x86/platform/uv/tlb_uv.c
5e856224 27061@@ -1424,6 +1424,8 @@ static ssize_t tunables_read(struct file *file, char __user *userbuf,
4c928ab7
MT
27062 * 0: display meaning of the statistics
27063 */
27064 static ssize_t ptc_proc_write(struct file *file, const char __user *user,
27065+ size_t count, loff_t *data) __size_overflow(3);
27066+static ssize_t ptc_proc_write(struct file *file, const char __user *user,
27067 size_t count, loff_t *data)
27068 {
27069 int cpu;
5e856224 27070@@ -1539,6 +1541,8 @@ static int parse_tunables_write(struct bau_control *bcp, char *instr,
4c928ab7
MT
27071 * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
27072 */
27073 static ssize_t tunables_write(struct file *file, const char __user *user,
27074+ size_t count, loff_t *data) __size_overflow(3);
27075+static ssize_t tunables_write(struct file *file, const char __user *user,
27076 size_t count, loff_t *data)
27077 {
27078 int cpu;
fe2de317 27079diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
4c928ab7 27080index f10c0af..3ec1f95 100644
fe2de317
MT
27081--- a/arch/x86/power/cpu.c
27082+++ b/arch/x86/power/cpu.c
4c928ab7 27083@@ -131,7 +131,7 @@ static void do_fpu_end(void)
58c5fc13
MT
27084 static void fix_processor_context(void)
27085 {
27086 int cpu = smp_processor_id();
27087- struct tss_struct *t = &per_cpu(init_tss, cpu);
27088+ struct tss_struct *t = init_tss + cpu;
58c5fc13
MT
27089
27090 set_tss_desc(cpu, t); /*
27091 * This just modifies memory; should not be
4c928ab7 27092@@ -141,7 +141,9 @@ static void fix_processor_context(void)
58c5fc13
MT
27093 */
27094
27095 #ifdef CONFIG_X86_64
ae4e228f 27096+ pax_open_kernel();
58c5fc13 27097 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
ae4e228f 27098+ pax_close_kernel();
58c5fc13 27099
58c5fc13
MT
27100 syscall_init(); /* This sets MSR_*STAR and related */
27101 #endif
fe2de317
MT
27102diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
27103index 5d17950..2253fc9 100644
27104--- a/arch/x86/vdso/Makefile
27105+++ b/arch/x86/vdso/Makefile
6e9df6a3 27106@@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
6892158b
MT
27107 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
27108 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
58c5fc13 27109
ae4e228f 27110-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
71d190be 27111+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
58c5fc13
MT
27112 GCOV_PROFILE := n
27113
27114 #
fe2de317
MT
27115diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
27116index 468d591..8e80a0a 100644
27117--- a/arch/x86/vdso/vdso32-setup.c
27118+++ b/arch/x86/vdso/vdso32-setup.c
58c5fc13
MT
27119@@ -25,6 +25,7 @@
27120 #include <asm/tlbflush.h>
27121 #include <asm/vdso.h>
27122 #include <asm/proto.h>
27123+#include <asm/mman.h>
27124
27125 enum {
27126 VDSO_DISABLED = 0,
fe2de317 27127@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
58c5fc13
MT
27128 void enable_sep_cpu(void)
27129 {
27130 int cpu = get_cpu();
27131- struct tss_struct *tss = &per_cpu(init_tss, cpu);
27132+ struct tss_struct *tss = init_tss + cpu;
27133
27134 if (!boot_cpu_has(X86_FEATURE_SEP)) {
27135 put_cpu();
27136@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
27137 gate_vma.vm_start = FIXADDR_USER_START;
27138 gate_vma.vm_end = FIXADDR_USER_END;
27139 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
27140- gate_vma.vm_page_prot = __P101;
27141+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
27142 /*
27143 * Make sure the vDSO gets into every core dump.
27144 * Dumping its contents makes post-mortem fully interpretable later
fe2de317 27145@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
58c5fc13
MT
27146 if (compat)
27147 addr = VDSO_HIGH_BASE;
27148 else {
27149- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
27150+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
27151 if (IS_ERR_VALUE(addr)) {
27152 ret = addr;
27153 goto up_fail;
27154 }
27155 }
27156
27157- current->mm->context.vdso = (void *)addr;
27158+ current->mm->context.vdso = addr;
27159
27160 if (compat_uses_vma || !compat) {
27161 /*
fe2de317 27162@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
ae4e228f
MT
27163 }
27164
27165 current_thread_info()->sysenter_return =
27166- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27167+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
58c5fc13
MT
27168
27169 up_fail:
27170 if (ret)
27171- current->mm->context.vdso = NULL;
27172+ current->mm->context.vdso = 0;
27173
27174 up_write(&mm->mmap_sem);
27175
ae4e228f 27176@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
58c5fc13
MT
27177
27178 const char *arch_vma_name(struct vm_area_struct *vma)
27179 {
27180- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
27181+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
27182 return "[vdso]";
27183+
27184+#ifdef CONFIG_PAX_SEGMEXEC
27185+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
27186+ return "[vdso]";
27187+#endif
27188+
27189 return NULL;
27190 }
27191
fe2de317 27192@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
66a7e928
MT
27193 * Check to see if the corresponding task was created in compat vdso
27194 * mode.
27195 */
58c5fc13
MT
27196- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
27197+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
27198 return &gate_vma;
27199 return NULL;
27200 }
fe2de317 27201diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
4c928ab7 27202index 153407c..611cba9 100644
fe2de317
MT
27203--- a/arch/x86/vdso/vma.c
27204+++ b/arch/x86/vdso/vma.c
6e9df6a3 27205@@ -16,8 +16,6 @@
15a11c5b 27206 #include <asm/vdso.h>
6e9df6a3 27207 #include <asm/page.h>
15a11c5b
MT
27208
27209-unsigned int __read_mostly vdso_enabled = 1;
27210-
27211 extern char vdso_start[], vdso_end[];
27212 extern unsigned short vdso_sync_cpuid;
15a11c5b 27213
4c928ab7
MT
27214@@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
27215 * unaligned here as a result of stack start randomization.
27216 */
27217 addr = PAGE_ALIGN(addr);
27218- addr = align_addr(addr, NULL, ALIGN_VDSO);
27219
27220 return addr;
27221 }
27222@@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
6e9df6a3 27223 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
15a11c5b 27224 {
6e9df6a3
MT
27225 struct mm_struct *mm = current->mm;
27226- unsigned long addr;
27227+ unsigned long addr = 0;
15a11c5b
MT
27228 int ret;
27229
27230- if (!vdso_enabled)
27231- return 0;
27232-
27233 down_write(&mm->mmap_sem);
6e9df6a3
MT
27234+
27235+#ifdef CONFIG_PAX_RANDMMAP
27236+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27237+#endif
27238+
27239 addr = vdso_addr(mm->start_stack, vdso_size);
4c928ab7 27240+ addr = align_addr(addr, NULL, ALIGN_VDSO);
6e9df6a3 27241 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
15a11c5b 27242 if (IS_ERR_VALUE(addr)) {
4c928ab7 27243 ret = addr;
58c5fc13
MT
27244 goto up_fail;
27245 }
27246
27247- current->mm->context.vdso = (void *)addr;
6e9df6a3 27248+ mm->context.vdso = addr;
58c5fc13 27249
6e9df6a3 27250 ret = install_special_mapping(mm, addr, vdso_size,
58c5fc13 27251 VM_READ|VM_EXEC|
6e9df6a3 27252 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
58c5fc13 27253 VM_ALWAYSDUMP,
6e9df6a3
MT
27254 vdso_pages);
27255- if (ret) {
58c5fc13 27256- current->mm->context.vdso = NULL;
6e9df6a3
MT
27257- goto up_fail;
27258- }
27259+
15a11c5b
MT
27260+ if (ret)
27261+ mm->context.vdso = 0;
6e9df6a3 27262
15a11c5b 27263 up_fail:
58c5fc13
MT
27264 up_write(&mm->mmap_sem);
27265 return ret;
27266 }
27267-
27268-static __init int vdso_setup(char *s)
27269-{
27270- vdso_enabled = simple_strtoul(s, NULL, 0);
27271- return 0;
27272-}
27273-__setup("vdso=", vdso_setup);
fe2de317 27274diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
5e856224 27275index 4e517d4..68a48f5 100644
fe2de317
MT
27276--- a/arch/x86/xen/enlighten.c
27277+++ b/arch/x86/xen/enlighten.c
5e856224 27278@@ -86,8 +86,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
58c5fc13
MT
27279
27280 struct shared_info xen_dummy_shared_info;
27281
27282-void *xen_initial_gdt;
27283-
6892158b
MT
27284 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
27285 __read_mostly int xen_have_vector_callback;
27286 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
5e856224 27287@@ -1030,30 +1028,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
66a7e928
MT
27288 #endif
27289 };
27290
27291-static void xen_reboot(int reason)
27292+static __noreturn void xen_reboot(int reason)
27293 {
27294 struct sched_shutdown r = { .reason = reason };
27295
5e856224
MT
27296- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
27297- BUG();
27298+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
27299+ BUG();
66a7e928
MT
27300 }
27301
27302-static void xen_restart(char *msg)
27303+static __noreturn void xen_restart(char *msg)
27304 {
27305 xen_reboot(SHUTDOWN_reboot);
27306 }
27307
27308-static void xen_emergency_restart(void)
27309+static __noreturn void xen_emergency_restart(void)
27310 {
27311 xen_reboot(SHUTDOWN_reboot);
27312 }
27313
27314-static void xen_machine_halt(void)
27315+static __noreturn void xen_machine_halt(void)
27316 {
27317 xen_reboot(SHUTDOWN_poweroff);
27318 }
5e856224
MT
27319
27320-static void xen_machine_power_off(void)
27321+static __noreturn void xen_machine_power_off(void)
27322 {
27323 if (pm_power_off)
27324 pm_power_off();
27325@@ -1156,7 +1154,17 @@ asmlinkage void __init xen_start_kernel(void)
df50ba0c
MT
27326 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27327
27328 /* Work out if we support NX */
27329- x86_configure_nx();
27330+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27331+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
57199397 27332+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
df50ba0c
MT
27333+ unsigned l, h;
27334+
27335+ __supported_pte_mask |= _PAGE_NX;
27336+ rdmsr(MSR_EFER, l, h);
27337+ l |= EFER_NX;
27338+ wrmsr(MSR_EFER, l, h);
27339+ }
27340+#endif
27341
27342 xen_setup_features();
27343
5e856224 27344@@ -1187,13 +1195,6 @@ asmlinkage void __init xen_start_kernel(void)
58c5fc13
MT
27345
27346 machine_ops = xen_machine_ops;
27347
27348- /*
27349- * The only reliable way to retain the initial address of the
27350- * percpu gdt_page is to remember it here, so we can go and
27351- * mark it RW later, when the initial percpu area is freed.
27352- */
27353- xen_initial_gdt = &per_cpu(gdt_page, 0);
27354-
27355 xen_smp_init();
27356
16454cff 27357 #ifdef CONFIG_ACPI_NUMA
fe2de317 27358diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
5e856224 27359index dc19347..1b07a2c 100644
fe2de317
MT
27360--- a/arch/x86/xen/mmu.c
27361+++ b/arch/x86/xen/mmu.c
5e856224 27362@@ -1738,6 +1738,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
58c5fc13
MT
27363 convert_pfn_mfn(init_level4_pgt);
27364 convert_pfn_mfn(level3_ident_pgt);
27365 convert_pfn_mfn(level3_kernel_pgt);
fe2de317
MT
27366+ convert_pfn_mfn(level3_vmalloc_start_pgt);
27367+ convert_pfn_mfn(level3_vmalloc_end_pgt);
58c5fc13
MT
27368+ convert_pfn_mfn(level3_vmemmap_pgt);
27369
27370 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27371 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
5e856224 27372@@ -1756,7 +1759,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
58c5fc13
MT
27373 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27374 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27375 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
fe2de317
MT
27376+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27377+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
58c5fc13
MT
27378+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27379 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27380+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27381 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27382 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27383
5e856224 27384@@ -1963,6 +1970,7 @@ static void __init xen_post_allocator_init(void)
15a11c5b
MT
27385 pv_mmu_ops.set_pud = xen_set_pud;
27386 #if PAGETABLE_LEVELS == 4
27387 pv_mmu_ops.set_pgd = xen_set_pgd;
27388+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27389 #endif
6892158b 27390
15a11c5b 27391 /* This will work as long as patching hasn't happened yet
5e856224 27392@@ -2044,6 +2052,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
15a11c5b
MT
27393 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27394 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27395 .set_pgd = xen_set_pgd_hyper,
27396+ .set_pgd_batched = xen_set_pgd_hyper,
6892158b 27397
15a11c5b
MT
27398 .alloc_pud = xen_alloc_pmd_init,
27399 .release_pud = xen_release_pmd_init,
fe2de317 27400diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
5e856224 27401index f2ce60a..14e08dc 100644
fe2de317
MT
27402--- a/arch/x86/xen/smp.c
27403+++ b/arch/x86/xen/smp.c
5e856224 27404@@ -209,11 +209,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
58c5fc13
MT
27405 {
27406 BUG_ON(smp_processor_id() != 0);
27407 native_smp_prepare_boot_cpu();
27408-
27409- /* We've switched to the "real" per-cpu gdt, so make sure the
27410- old memory can be recycled */
27411- make_lowmem_page_readwrite(xen_initial_gdt);
27412-
bc901d79 27413 xen_filter_cpu_maps();
58c5fc13
MT
27414 xen_setup_vcpu_info_placement();
27415 }
5e856224 27416@@ -290,12 +285,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
58c5fc13
MT
27417 gdt = get_cpu_gdt_table(cpu);
27418
27419 ctxt->flags = VGCF_IN_KERNEL;
27420- ctxt->user_regs.ds = __USER_DS;
27421- ctxt->user_regs.es = __USER_DS;
27422+ ctxt->user_regs.ds = __KERNEL_DS;
27423+ ctxt->user_regs.es = __KERNEL_DS;
27424 ctxt->user_regs.ss = __KERNEL_DS;
27425 #ifdef CONFIG_X86_32
27426 ctxt->user_regs.fs = __KERNEL_PERCPU;
bc901d79
MT
27427- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27428+ savesegment(gs, ctxt->user_regs.gs);
27429 #else
27430 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27431 #endif
5e856224 27432@@ -346,13 +341,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
71d190be
MT
27433 int rc;
27434
27435 per_cpu(current_task, cpu) = idle;
27436+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27437 #ifdef CONFIG_X86_32
27438 irq_ctx_init(cpu);
27439 #else
27440 clear_tsk_thread_flag(idle, TIF_FORK);
27441- per_cpu(kernel_stack, cpu) =
27442- (unsigned long)task_stack_page(idle) -
27443- KERNEL_STACK_OFFSET + THREAD_SIZE;
66a7e928 27444+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
71d190be
MT
27445 #endif
27446 xen_setup_runstate_info(cpu);
27447 xen_setup_timer(cpu);
fe2de317
MT
27448diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27449index b040b0e..8cc4fe0 100644
27450--- a/arch/x86/xen/xen-asm_32.S
27451+++ b/arch/x86/xen/xen-asm_32.S
71d190be
MT
27452@@ -83,14 +83,14 @@ ENTRY(xen_iret)
27453 ESP_OFFSET=4 # bytes pushed onto stack
27454
27455 /*
27456- * Store vcpu_info pointer for easy access. Do it this way to
27457- * avoid having to reload %fs
27458+ * Store vcpu_info pointer for easy access.
27459 */
27460 #ifdef CONFIG_SMP
27461- GET_THREAD_INFO(%eax)
27462- movl TI_cpu(%eax), %eax
27463- movl __per_cpu_offset(,%eax,4), %eax
27464- mov xen_vcpu(%eax), %eax
27465+ push %fs
27466+ mov $(__KERNEL_PERCPU), %eax
27467+ mov %eax, %fs
27468+ mov PER_CPU_VAR(xen_vcpu), %eax
27469+ pop %fs
27470 #else
27471 movl xen_vcpu, %eax
27472 #endif
fe2de317
MT
27473diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27474index aaa7291..3f77960 100644
27475--- a/arch/x86/xen/xen-head.S
27476+++ b/arch/x86/xen/xen-head.S
df50ba0c
MT
27477@@ -19,6 +19,17 @@ ENTRY(startup_xen)
27478 #ifdef CONFIG_X86_32
27479 mov %esi,xen_start_info
27480 mov $init_thread_union+THREAD_SIZE,%esp
27481+#ifdef CONFIG_SMP
27482+ movl $cpu_gdt_table,%edi
27483+ movl $__per_cpu_load,%eax
27484+ movw %ax,__KERNEL_PERCPU + 2(%edi)
27485+ rorl $16,%eax
27486+ movb %al,__KERNEL_PERCPU + 4(%edi)
27487+ movb %ah,__KERNEL_PERCPU + 7(%edi)
27488+ movl $__per_cpu_end - 1,%eax
27489+ subl $__per_cpu_start,%eax
27490+ movw %ax,__KERNEL_PERCPU + 0(%edi)
27491+#endif
27492 #else
27493 mov %rsi,xen_start_info
27494 mov $init_thread_union+THREAD_SIZE,%rsp
fe2de317
MT
27495diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27496index b095739..8c17bcd 100644
27497--- a/arch/x86/xen/xen-ops.h
27498+++ b/arch/x86/xen/xen-ops.h
58c5fc13
MT
27499@@ -10,8 +10,6 @@
27500 extern const char xen_hypervisor_callback[];
27501 extern const char xen_failsafe_callback[];
27502
27503-extern void *xen_initial_gdt;
27504-
27505 struct trap_info;
27506 void xen_copy_trap_info(struct trap_info *traps);
27507
4c928ab7
MT
27508diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
27509index 525bd3d..ef888b1 100644
27510--- a/arch/xtensa/variants/dc232b/include/variant/core.h
27511+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
27512@@ -119,9 +119,9 @@
27513 ----------------------------------------------------------------------*/
27514
27515 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
27516-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
27517 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
27518 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
27519+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27520
27521 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
27522 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
27523diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
27524index 2f33760..835e50a 100644
27525--- a/arch/xtensa/variants/fsf/include/variant/core.h
27526+++ b/arch/xtensa/variants/fsf/include/variant/core.h
27527@@ -11,6 +11,7 @@
27528 #ifndef _XTENSA_CORE_H
27529 #define _XTENSA_CORE_H
27530
27531+#include <linux/const.h>
27532
27533 /****************************************************************************
27534 Parameters Useful for Any Code, USER or PRIVILEGED
27535@@ -112,9 +113,9 @@
27536 ----------------------------------------------------------------------*/
27537
27538 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27539-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27540 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27541 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27542+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27543
27544 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
27545 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
27546diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
27547index af00795..2bb8105 100644
27548--- a/arch/xtensa/variants/s6000/include/variant/core.h
27549+++ b/arch/xtensa/variants/s6000/include/variant/core.h
27550@@ -11,6 +11,7 @@
27551 #ifndef _XTENSA_CORE_CONFIGURATION_H
27552 #define _XTENSA_CORE_CONFIGURATION_H
27553
27554+#include <linux/const.h>
27555
27556 /****************************************************************************
27557 Parameters Useful for Any Code, USER or PRIVILEGED
27558@@ -118,9 +119,9 @@
27559 ----------------------------------------------------------------------*/
27560
27561 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27562-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27563 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27564 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27565+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27566
27567 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
27568 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
fe2de317
MT
27569diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27570index 58916af..9cb880b 100644
27571--- a/block/blk-iopoll.c
27572+++ b/block/blk-iopoll.c
27573@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
ae4e228f
MT
27574 }
27575 EXPORT_SYMBOL(blk_iopoll_complete);
27576
27577-static void blk_iopoll_softirq(struct softirq_action *h)
27578+static void blk_iopoll_softirq(void)
27579 {
27580 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27581 int rearm = 0, budget = blk_iopoll_budget;
fe2de317 27582diff --git a/block/blk-map.c b/block/blk-map.c
4c928ab7 27583index 623e1cd..ca1e109 100644
fe2de317
MT
27584--- a/block/blk-map.c
27585+++ b/block/blk-map.c
27586@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
ae4e228f
MT
27587 if (!len || !kbuf)
27588 return -EINVAL;
58c5fc13 27589
bc901d79
MT
27590- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
27591+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
ae4e228f
MT
27592 if (do_copy)
27593 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27594 else
fe2de317
MT
27595diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27596index 1366a89..e17f54b 100644
27597--- a/block/blk-softirq.c
27598+++ b/block/blk-softirq.c
27599@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
ae4e228f
MT
27600 * Softirq action handler - move entries to local list and loop over them
27601 * while passing them to the queue registered handler.
27602 */
27603-static void blk_done_softirq(struct softirq_action *h)
27604+static void blk_done_softirq(void)
27605 {
27606 struct list_head *cpu_list, local_list;
58c5fc13 27607
fe2de317 27608diff --git a/block/bsg.c b/block/bsg.c
5e856224 27609index ff64ae3..593560c 100644
fe2de317
MT
27610--- a/block/bsg.c
27611+++ b/block/bsg.c
27612@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
71d190be
MT
27613 struct sg_io_v4 *hdr, struct bsg_device *bd,
27614 fmode_t has_write_perm)
27615 {
27616+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27617+ unsigned char *cmdptr;
27618+
27619 if (hdr->request_len > BLK_MAX_CDB) {
27620 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27621 if (!rq->cmd)
27622 return -ENOMEM;
27623- }
27624+ cmdptr = rq->cmd;
27625+ } else
27626+ cmdptr = tmpcmd;
27627
6e9df6a3
MT
27628- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
27629+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
71d190be
MT
27630 hdr->request_len))
27631 return -EFAULT;
27632
27633+ if (cmdptr != rq->cmd)
27634+ memcpy(rq->cmd, cmdptr, hdr->request_len);
27635+
27636 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27637 if (blk_verify_command(rq->cmd, has_write_perm))
27638 return -EPERM;
fe2de317 27639diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
5e856224 27640index 7c668c8..db3521c 100644
fe2de317
MT
27641--- a/block/compat_ioctl.c
27642+++ b/block/compat_ioctl.c
27643@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
6e9df6a3
MT
27644 err |= __get_user(f->spec1, &uf->spec1);
27645 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27646 err |= __get_user(name, &uf->name);
27647- f->name = compat_ptr(name);
27648+ f->name = (void __force_kernel *)compat_ptr(name);
27649 if (err) {
27650 err = -EFAULT;
27651 goto out;
5e856224
MT
27652diff --git a/block/partitions/efi.c b/block/partitions/efi.c
27653index 6296b40..417c00f 100644
27654--- a/block/partitions/efi.c
27655+++ b/block/partitions/efi.c
27656@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
27657 if (!gpt)
27658 return NULL;
27659
27660+ if (!le32_to_cpu(gpt->num_partition_entries))
27661+ return NULL;
27662+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
27663+ if (!pte)
27664+ return NULL;
27665+
27666 count = le32_to_cpu(gpt->num_partition_entries) *
27667 le32_to_cpu(gpt->sizeof_partition_entry);
27668- if (!count)
27669- return NULL;
27670- pte = kzalloc(count, GFP_KERNEL);
27671- if (!pte)
27672- return NULL;
27673-
27674 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
27675 (u8 *) pte,
27676 count) < count) {
fe2de317 27677diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
5e856224 27678index 260fa80..e8f3caf 100644
fe2de317
MT
27679--- a/block/scsi_ioctl.c
27680+++ b/block/scsi_ioctl.c
4c928ab7 27681@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
71d190be
MT
27682 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27683 struct sg_io_hdr *hdr, fmode_t mode)
27684 {
27685- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27686+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27687+ unsigned char *cmdptr;
27688+
27689+ if (rq->cmd != rq->__cmd)
27690+ cmdptr = rq->cmd;
27691+ else
27692+ cmdptr = tmpcmd;
27693+
27694+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27695 return -EFAULT;
27696+
27697+ if (cmdptr != rq->cmd)
27698+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27699+
27700 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27701 return -EPERM;
27702
4c928ab7 27703@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
71d190be
MT
27704 int err;
27705 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27706 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27707+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27708+ unsigned char *cmdptr;
27709
27710 if (!sic)
27711 return -EINVAL;
4c928ab7 27712@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
71d190be
MT
27713 */
27714 err = -EFAULT;
27715 rq->cmd_len = cmdlen;
27716- if (copy_from_user(rq->cmd, sic->data, cmdlen))
27717+
27718+ if (rq->cmd != rq->__cmd)
27719+ cmdptr = rq->cmd;
27720+ else
27721+ cmdptr = tmpcmd;
27722+
27723+ if (copy_from_user(cmdptr, sic->data, cmdlen))
27724 goto error;
27725
27726+ if (rq->cmd != cmdptr)
27727+ memcpy(rq->cmd, cmdptr, cmdlen);
27728+
27729 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27730 goto error;
27731
4c928ab7
MT
27732diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
27733index a0f768c..1da9c73 100644
27734--- a/crypto/ablkcipher.c
27735+++ b/crypto/ablkcipher.c
27736@@ -307,6 +307,8 @@ int ablkcipher_walk_phys(struct ablkcipher_request *req,
27737 EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
27738
27739 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
27740+ unsigned int keylen) __size_overflow(3);
27741+static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
27742 unsigned int keylen)
27743 {
27744 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
27745@@ -329,6 +331,8 @@ static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
27746 }
27747
27748 static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
27749+ unsigned int keylen) __size_overflow(3);
27750+static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
27751 unsigned int keylen)
27752 {
27753 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
27754diff --git a/crypto/aead.c b/crypto/aead.c
27755index 04add3dc..983032f 100644
27756--- a/crypto/aead.c
27757+++ b/crypto/aead.c
27758@@ -27,6 +27,8 @@
27759 #include "internal.h"
27760
27761 static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
27762+ unsigned int keylen) __size_overflow(3);
27763+static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
27764 unsigned int keylen)
27765 {
27766 struct aead_alg *aead = crypto_aead_alg(tfm);
27767@@ -48,6 +50,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
27768 return ret;
27769 }
27770
27771+static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
27772 static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
27773 {
27774 struct aead_alg *aead = crypto_aead_alg(tfm);
27775diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
27776index 1e61d1a..cf06b86 100644
27777--- a/crypto/blkcipher.c
27778+++ b/crypto/blkcipher.c
27779@@ -359,6 +359,8 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
27780 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
27781
27782 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27783+ unsigned int keylen) __size_overflow(3);
27784+static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27785 unsigned int keylen)
27786 {
27787 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
27788@@ -380,6 +382,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27789 return ret;
27790 }
27791
27792+static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
27793 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
27794 {
27795 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
27796diff --git a/crypto/cipher.c b/crypto/cipher.c
27797index 39541e0..802d956 100644
27798--- a/crypto/cipher.c
27799+++ b/crypto/cipher.c
27800@@ -21,6 +21,8 @@
27801 #include "internal.h"
27802
27803 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27804+ unsigned int keylen) __size_overflow(3);
27805+static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27806 unsigned int keylen)
27807 {
27808 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
27809@@ -43,6 +45,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27810
27811 }
27812
27813+static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
27814 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
27815 {
27816 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
fe2de317
MT
27817diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27818index 671d4d6..5f24030 100644
27819--- a/crypto/cryptd.c
27820+++ b/crypto/cryptd.c
15a11c5b
MT
27821@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
27822
27823 struct cryptd_blkcipher_request_ctx {
27824 crypto_completion_t complete;
27825-};
27826+} __no_const;
27827
27828 struct cryptd_hash_ctx {
27829 struct crypto_shash *child;
27830@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
27831
27832 struct cryptd_aead_request_ctx {
27833 crypto_completion_t complete;
27834-};
27835+} __no_const;
27836
27837 static void cryptd_queue_worker(struct work_struct *work);
27838
fe2de317
MT
27839diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
27840index 5d41894..22021e4 100644
27841--- a/drivers/acpi/apei/cper.c
27842+++ b/drivers/acpi/apei/cper.c
66a7e928 27843@@ -38,12 +38,12 @@
8308f9c9
MT
27844 */
27845 u64 cper_next_record_id(void)
27846 {
27847- static atomic64_t seq;
27848+ static atomic64_unchecked_t seq;
27849
27850- if (!atomic64_read(&seq))
27851- atomic64_set(&seq, ((u64)get_seconds()) << 32);
27852+ if (!atomic64_read_unchecked(&seq))
27853+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
27854
27855- return atomic64_inc_return(&seq);
27856+ return atomic64_inc_return_unchecked(&seq);
27857 }
27858 EXPORT_SYMBOL_GPL(cper_next_record_id);
27859
4c928ab7 27860diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
5e856224 27861index 86933ca..5cb1a69 100644
4c928ab7
MT
27862--- a/drivers/acpi/battery.c
27863+++ b/drivers/acpi/battery.c
27864@@ -787,6 +787,9 @@ static int acpi_battery_print_alarm(struct seq_file *seq, int result)
27865
27866 static ssize_t acpi_battery_write_alarm(struct file *file,
27867 const char __user * buffer,
27868+ size_t count, loff_t * ppos) __size_overflow(3);
27869+static ssize_t acpi_battery_write_alarm(struct file *file,
27870+ const char __user * buffer,
27871 size_t count, loff_t * ppos)
27872 {
27873 int result = 0;
fe2de317 27874diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
5e856224 27875index b258cab..3fb7da7 100644
fe2de317
MT
27876--- a/drivers/acpi/ec_sys.c
27877+++ b/drivers/acpi/ec_sys.c
4c928ab7 27878@@ -12,6 +12,7 @@
15a11c5b
MT
27879 #include <linux/acpi.h>
27880 #include <linux/debugfs.h>
4c928ab7
MT
27881 #include <linux/module.h>
27882+#include <linux/uaccess.h>
15a11c5b 27883 #include "internal.h"
ae4e228f 27884
15a11c5b 27885 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
4c928ab7 27886@@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
15a11c5b
MT
27887 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
27888 */
27889 unsigned int size = EC_SPACE_SIZE;
27890- u8 *data = (u8 *) buf;
27891+ u8 data;
27892 loff_t init_off = *off;
27893 int err = 0;
ae4e228f 27894
4c928ab7 27895@@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
15a11c5b 27896 size = count;
58c5fc13 27897
15a11c5b
MT
27898 while (size) {
27899- err = ec_read(*off, &data[*off - init_off]);
27900+ err = ec_read(*off, &data);
27901 if (err)
27902 return err;
27903+ if (put_user(data, &buf[*off - init_off]))
27904+ return -EFAULT;
27905 *off += 1;
27906 size--;
27907 }
4c928ab7 27908@@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
66a7e928 27909
15a11c5b
MT
27910 unsigned int size = count;
27911 loff_t init_off = *off;
27912- u8 *data = (u8 *) buf;
27913 int err = 0;
df50ba0c 27914
15a11c5b 27915 if (*off >= EC_SPACE_SIZE)
4c928ab7 27916@@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
15a11c5b 27917 }
df50ba0c 27918
15a11c5b
MT
27919 while (size) {
27920- u8 byte_write = data[*off - init_off];
27921+ u8 byte_write;
27922+ if (get_user(byte_write, &buf[*off - init_off]))
27923+ return -EFAULT;
27924 err = ec_write(*off, byte_write);
27925 if (err)
27926 return err;
fe2de317 27927diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
4c928ab7 27928index 251c7b62..000462d 100644
fe2de317
MT
27929--- a/drivers/acpi/proc.c
27930+++ b/drivers/acpi/proc.c
4c928ab7 27931@@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
df50ba0c
MT
27932 size_t count, loff_t * ppos)
27933 {
27934 struct list_head *node, *next;
27935- char strbuf[5];
27936- char str[5] = "";
27937- unsigned int len = count;
fe2de317
MT
27938+ char strbuf[5] = {0};
27939
df50ba0c
MT
27940- if (len > 4)
27941- len = 4;
27942- if (len < 0)
16454cff
MT
27943+ if (count > 4)
27944+ count = 4;
df50ba0c
MT
27945+ if (copy_from_user(strbuf, buffer, count))
27946 return -EFAULT;
fe2de317
MT
27947-
27948- if (copy_from_user(strbuf, buffer, len))
27949- return -EFAULT;
df50ba0c
MT
27950- strbuf[len] = '\0';
27951- sscanf(strbuf, "%s", str);
27952+ strbuf[count] = '\0';
27953
27954 mutex_lock(&acpi_device_lock);
27955 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
4c928ab7 27956@@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
df50ba0c
MT
27957 if (!dev->wakeup.flags.valid)
27958 continue;
27959
27960- if (!strncmp(dev->pnp.bus_id, str, 4)) {
27961+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
16454cff
MT
27962 if (device_can_wakeup(&dev->dev)) {
27963 bool enable = !device_may_wakeup(&dev->dev);
27964 device_set_wakeup_enable(&dev->dev, enable);
fe2de317 27965diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
5e856224 27966index 8ae05ce..7dbbed9 100644
fe2de317
MT
27967--- a/drivers/acpi/processor_driver.c
27968+++ b/drivers/acpi/processor_driver.c
5e856224 27969@@ -555,7 +555,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
58c5fc13 27970 return 0;
57199397 27971 #endif
58c5fc13
MT
27972
27973- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27974+ BUG_ON(pr->id >= nr_cpu_ids);
27975
27976 /*
27977 * Buggy BIOS check
4c928ab7
MT
27978diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
27979index 6e36d0c..f319944 100644
27980--- a/drivers/acpi/sbs.c
27981+++ b/drivers/acpi/sbs.c
27982@@ -655,6 +655,9 @@ static int acpi_battery_read_alarm(struct seq_file *seq, void *offset)
27983
27984 static ssize_t
27985 acpi_battery_write_alarm(struct file *file, const char __user * buffer,
27986+ size_t count, loff_t * ppos) __size_overflow(3);
27987+static ssize_t
27988+acpi_battery_write_alarm(struct file *file, const char __user * buffer,
27989 size_t count, loff_t * ppos)
27990 {
27991 struct seq_file *seq = file->private_data;
fe2de317 27992diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
5e856224 27993index c06e0ec..a2c06ba 100644
fe2de317
MT
27994--- a/drivers/ata/libata-core.c
27995+++ b/drivers/ata/libata-core.c
5e856224 27996@@ -4736,7 +4736,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
6892158b
MT
27997 struct ata_port *ap;
27998 unsigned int tag;
27999
28000- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28001+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28002 ap = qc->ap;
28003
28004 qc->flags = 0;
5e856224 28005@@ -4752,7 +4752,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
6892158b
MT
28006 struct ata_port *ap;
28007 struct ata_link *link;
28008
28009- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28010+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28011 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
28012 ap = qc->ap;
28013 link = qc->dev->link;
5e856224 28014@@ -5816,6 +5816,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
ae4e228f
MT
28015 return;
28016
28017 spin_lock(&lock);
28018+ pax_open_kernel();
28019
28020 for (cur = ops->inherits; cur; cur = cur->inherits) {
28021 void **inherit = (void **)cur;
5e856224 28022@@ -5829,8 +5830,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
ae4e228f
MT
28023 if (IS_ERR(*pp))
28024 *pp = NULL;
28025
28026- ops->inherits = NULL;
15a11c5b 28027+ *(struct ata_port_operations **)&ops->inherits = NULL;
ae4e228f
MT
28028
28029+ pax_close_kernel();
28030 spin_unlock(&lock);
28031 }
28032
fe2de317 28033diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
5e856224 28034index 048589f..4002b98 100644
fe2de317
MT
28035--- a/drivers/ata/pata_arasan_cf.c
28036+++ b/drivers/ata/pata_arasan_cf.c
28037@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
66a7e928
MT
28038 /* Handle platform specific quirks */
28039 if (pdata->quirk) {
28040 if (pdata->quirk & CF_BROKEN_PIO) {
28041- ap->ops->set_piomode = NULL;
28042+ pax_open_kernel();
15a11c5b 28043+ *(void **)&ap->ops->set_piomode = NULL;
66a7e928
MT
28044+ pax_close_kernel();
28045 ap->pio_mask = 0;
28046 }
28047 if (pdata->quirk & CF_BROKEN_MWDMA)
fe2de317
MT
28048diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
28049index f9b983a..887b9d8 100644
28050--- a/drivers/atm/adummy.c
28051+++ b/drivers/atm/adummy.c
28052@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
15a11c5b
MT
28053 vcc->pop(vcc, skb);
28054 else
28055 dev_kfree_skb_any(skb);
28056- atomic_inc(&vcc->stats->tx);
28057+ atomic_inc_unchecked(&vcc->stats->tx);
ae4e228f 28058
15a11c5b
MT
28059 return 0;
28060 }
fe2de317
MT
28061diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
28062index f8f41e0..1f987dd 100644
28063--- a/drivers/atm/ambassador.c
28064+++ b/drivers/atm/ambassador.c
28065@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
15a11c5b
MT
28066 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
28067
28068 // VC layer stats
28069- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28070+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28071
28072 // free the descriptor
28073 kfree (tx_descr);
fe2de317 28074@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
15a11c5b
MT
28075 dump_skb ("<<<", vc, skb);
28076
28077 // VC layer stats
28078- atomic_inc(&atm_vcc->stats->rx);
28079+ atomic_inc_unchecked(&atm_vcc->stats->rx);
28080 __net_timestamp(skb);
28081 // end of our responsibility
28082 atm_vcc->push (atm_vcc, skb);
fe2de317 28083@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
15a11c5b
MT
28084 } else {
28085 PRINTK (KERN_INFO, "dropped over-size frame");
28086 // should we count this?
28087- atomic_inc(&atm_vcc->stats->rx_drop);
28088+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
28089 }
28090
28091 } else {
fe2de317 28092@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
15a11c5b
MT
28093 }
28094
28095 if (check_area (skb->data, skb->len)) {
28096- atomic_inc(&atm_vcc->stats->tx_err);
28097+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
28098 return -ENOMEM; // ?
28099 }
28100
fe2de317
MT
28101diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
28102index b22d71c..d6e1049 100644
28103--- a/drivers/atm/atmtcp.c
28104+++ b/drivers/atm/atmtcp.c
28105@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
15a11c5b
MT
28106 if (vcc->pop) vcc->pop(vcc,skb);
28107 else dev_kfree_skb(skb);
28108 if (dev_data) return 0;
28109- atomic_inc(&vcc->stats->tx_err);
28110+ atomic_inc_unchecked(&vcc->stats->tx_err);
28111 return -ENOLINK;
28112 }
28113 size = skb->len+sizeof(struct atmtcp_hdr);
fe2de317 28114@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
15a11c5b
MT
28115 if (!new_skb) {
28116 if (vcc->pop) vcc->pop(vcc,skb);
28117 else dev_kfree_skb(skb);
28118- atomic_inc(&vcc->stats->tx_err);
28119+ atomic_inc_unchecked(&vcc->stats->tx_err);
28120 return -ENOBUFS;
28121 }
28122 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
fe2de317 28123@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
15a11c5b
MT
28124 if (vcc->pop) vcc->pop(vcc,skb);
28125 else dev_kfree_skb(skb);
28126 out_vcc->push(out_vcc,new_skb);
28127- atomic_inc(&vcc->stats->tx);
28128- atomic_inc(&out_vcc->stats->rx);
28129+ atomic_inc_unchecked(&vcc->stats->tx);
28130+ atomic_inc_unchecked(&out_vcc->stats->rx);
28131 return 0;
28132 }
ae4e228f 28133
fe2de317 28134@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
15a11c5b
MT
28135 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
28136 read_unlock(&vcc_sklist_lock);
28137 if (!out_vcc) {
28138- atomic_inc(&vcc->stats->tx_err);
28139+ atomic_inc_unchecked(&vcc->stats->tx_err);
28140 goto done;
28141 }
28142 skb_pull(skb,sizeof(struct atmtcp_hdr));
fe2de317 28143@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
15a11c5b
MT
28144 __net_timestamp(new_skb);
28145 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
28146 out_vcc->push(out_vcc,new_skb);
28147- atomic_inc(&vcc->stats->tx);
28148- atomic_inc(&out_vcc->stats->rx);
28149+ atomic_inc_unchecked(&vcc->stats->tx);
28150+ atomic_inc_unchecked(&out_vcc->stats->rx);
28151 done:
28152 if (vcc->pop) vcc->pop(vcc,skb);
28153 else dev_kfree_skb(skb);
fe2de317 28154diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
4c928ab7 28155index 956e9ac..133516d 100644
fe2de317
MT
28156--- a/drivers/atm/eni.c
28157+++ b/drivers/atm/eni.c
15a11c5b
MT
28158@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
28159 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
28160 vcc->dev->number);
28161 length = 0;
28162- atomic_inc(&vcc->stats->rx_err);
28163+ atomic_inc_unchecked(&vcc->stats->rx_err);
28164 }
28165 else {
28166 length = ATM_CELL_SIZE-1; /* no HEC */
28167@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
28168 size);
28169 }
28170 eff = length = 0;
28171- atomic_inc(&vcc->stats->rx_err);
28172+ atomic_inc_unchecked(&vcc->stats->rx_err);
28173 }
28174 else {
28175 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
28176@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
28177 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
28178 vcc->dev->number,vcc->vci,length,size << 2,descr);
28179 length = eff = 0;
28180- atomic_inc(&vcc->stats->rx_err);
28181+ atomic_inc_unchecked(&vcc->stats->rx_err);
28182 }
28183 }
28184 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
28185@@ -771,7 +771,7 @@ rx_dequeued++;
28186 vcc->push(vcc,skb);
28187 pushed++;
28188 }
28189- atomic_inc(&vcc->stats->rx);
28190+ atomic_inc_unchecked(&vcc->stats->rx);
28191 }
28192 wake_up(&eni_dev->rx_wait);
28193 }
4c928ab7 28194@@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
15a11c5b
MT
28195 PCI_DMA_TODEVICE);
28196 if (vcc->pop) vcc->pop(vcc,skb);
28197 else dev_kfree_skb_irq(skb);
28198- atomic_inc(&vcc->stats->tx);
28199+ atomic_inc_unchecked(&vcc->stats->tx);
28200 wake_up(&eni_dev->tx_wait);
28201 dma_complete++;
28202 }
4c928ab7 28203@@ -1569,7 +1569,7 @@ tx_complete++;
6e9df6a3
MT
28204 /*--------------------------------- entries ---------------------------------*/
28205
28206
28207-static const char *media_name[] __devinitdata = {
28208+static const char *media_name[] __devinitconst = {
28209 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
28210 "UTP", "05?", "06?", "07?", /* 4- 7 */
28211 "TAXI","09?", "10?", "11?", /* 8-11 */
fe2de317 28212diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
4c928ab7 28213index 5072f8a..fa52520d 100644
fe2de317
MT
28214--- a/drivers/atm/firestream.c
28215+++ b/drivers/atm/firestream.c
28216@@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
15a11c5b
MT
28217 }
28218 }
ae4e228f 28219
15a11c5b
MT
28220- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28221+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
ae4e228f 28222
15a11c5b
MT
28223 fs_dprintk (FS_DEBUG_TXMEM, "i");
28224 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
fe2de317 28225@@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
15a11c5b
MT
28226 #endif
28227 skb_put (skb, qe->p1 & 0xffff);
28228 ATM_SKB(skb)->vcc = atm_vcc;
28229- atomic_inc(&atm_vcc->stats->rx);
28230+ atomic_inc_unchecked(&atm_vcc->stats->rx);
28231 __net_timestamp(skb);
28232 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
28233 atm_vcc->push (atm_vcc, skb);
fe2de317 28234@@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
15a11c5b
MT
28235 kfree (pe);
28236 }
28237 if (atm_vcc)
28238- atomic_inc(&atm_vcc->stats->rx_drop);
28239+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
28240 break;
28241 case 0x1f: /* Reassembly abort: no buffers. */
28242 /* Silently increment error counter. */
28243 if (atm_vcc)
28244- atomic_inc(&atm_vcc->stats->rx_drop);
28245+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
28246 break;
28247 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
28248 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
fe2de317
MT
28249diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
28250index 361f5ae..7fc552d 100644
28251--- a/drivers/atm/fore200e.c
28252+++ b/drivers/atm/fore200e.c
28253@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
15a11c5b
MT
28254 #endif
28255 /* check error condition */
28256 if (*entry->status & STATUS_ERROR)
28257- atomic_inc(&vcc->stats->tx_err);
28258+ atomic_inc_unchecked(&vcc->stats->tx_err);
28259 else
28260- atomic_inc(&vcc->stats->tx);
28261+ atomic_inc_unchecked(&vcc->stats->tx);
28262 }
28263 }
ae4e228f 28264
fe2de317 28265@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
15a11c5b
MT
28266 if (skb == NULL) {
28267 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
ae4e228f 28268
15a11c5b
MT
28269- atomic_inc(&vcc->stats->rx_drop);
28270+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28271 return -ENOMEM;
28272 }
ae4e228f 28273
fe2de317 28274@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
ae4e228f 28275
15a11c5b 28276 dev_kfree_skb_any(skb);
ae4e228f 28277
15a11c5b
MT
28278- atomic_inc(&vcc->stats->rx_drop);
28279+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28280 return -ENOMEM;
28281 }
ae4e228f 28282
15a11c5b 28283 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
ae4e228f 28284
15a11c5b
MT
28285 vcc->push(vcc, skb);
28286- atomic_inc(&vcc->stats->rx);
28287+ atomic_inc_unchecked(&vcc->stats->rx);
ae4e228f 28288
15a11c5b 28289 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
ae4e228f 28290
fe2de317 28291@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
15a11c5b
MT
28292 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
28293 fore200e->atm_dev->number,
28294 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
28295- atomic_inc(&vcc->stats->rx_err);
28296+ atomic_inc_unchecked(&vcc->stats->rx_err);
28297 }
28298 }
ae4e228f 28299
fe2de317 28300@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
15a11c5b
MT
28301 goto retry_here;
28302 }
ae4e228f 28303
15a11c5b
MT
28304- atomic_inc(&vcc->stats->tx_err);
28305+ atomic_inc_unchecked(&vcc->stats->tx_err);
ae4e228f 28306
15a11c5b
MT
28307 fore200e->tx_sat++;
28308 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
fe2de317 28309diff --git a/drivers/atm/he.c b/drivers/atm/he.c
5e856224 28310index b182c2f..1c6fa8a 100644
fe2de317
MT
28311--- a/drivers/atm/he.c
28312+++ b/drivers/atm/he.c
28313@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
ae4e228f 28314
15a11c5b
MT
28315 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
28316 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
28317- atomic_inc(&vcc->stats->rx_drop);
28318+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28319 goto return_host_buffers;
28320 }
ae4e228f 28321
fe2de317 28322@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
15a11c5b
MT
28323 RBRQ_LEN_ERR(he_dev->rbrq_head)
28324 ? "LEN_ERR" : "",
28325 vcc->vpi, vcc->vci);
28326- atomic_inc(&vcc->stats->rx_err);
28327+ atomic_inc_unchecked(&vcc->stats->rx_err);
28328 goto return_host_buffers;
28329 }
ae4e228f 28330
fe2de317 28331@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
15a11c5b
MT
28332 vcc->push(vcc, skb);
28333 spin_lock(&he_dev->global_lock);
ae4e228f 28334
15a11c5b
MT
28335- atomic_inc(&vcc->stats->rx);
28336+ atomic_inc_unchecked(&vcc->stats->rx);
ae4e228f 28337
15a11c5b
MT
28338 return_host_buffers:
28339 ++pdus_assembled;
fe2de317 28340@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
15a11c5b
MT
28341 tpd->vcc->pop(tpd->vcc, tpd->skb);
28342 else
28343 dev_kfree_skb_any(tpd->skb);
28344- atomic_inc(&tpd->vcc->stats->tx_err);
28345+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
28346 }
28347 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
28348 return;
fe2de317 28349@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
15a11c5b
MT
28350 vcc->pop(vcc, skb);
28351 else
28352 dev_kfree_skb_any(skb);
28353- atomic_inc(&vcc->stats->tx_err);
28354+ atomic_inc_unchecked(&vcc->stats->tx_err);
28355 return -EINVAL;
28356 }
ae4e228f 28357
fe2de317 28358@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
15a11c5b
MT
28359 vcc->pop(vcc, skb);
28360 else
28361 dev_kfree_skb_any(skb);
28362- atomic_inc(&vcc->stats->tx_err);
28363+ atomic_inc_unchecked(&vcc->stats->tx_err);
28364 return -EINVAL;
ae4e228f 28365 }
15a11c5b 28366 #endif
fe2de317 28367@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
15a11c5b
MT
28368 vcc->pop(vcc, skb);
28369 else
28370 dev_kfree_skb_any(skb);
28371- atomic_inc(&vcc->stats->tx_err);
28372+ atomic_inc_unchecked(&vcc->stats->tx_err);
28373 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28374 return -ENOMEM;
28375 }
fe2de317 28376@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
15a11c5b
MT
28377 vcc->pop(vcc, skb);
28378 else
28379 dev_kfree_skb_any(skb);
28380- atomic_inc(&vcc->stats->tx_err);
28381+ atomic_inc_unchecked(&vcc->stats->tx_err);
28382 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28383 return -ENOMEM;
28384 }
fe2de317 28385@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
15a11c5b
MT
28386 __enqueue_tpd(he_dev, tpd, cid);
28387 spin_unlock_irqrestore(&he_dev->global_lock, flags);
ae4e228f 28388
15a11c5b
MT
28389- atomic_inc(&vcc->stats->tx);
28390+ atomic_inc_unchecked(&vcc->stats->tx);
ae4e228f 28391
ae4e228f
MT
28392 return 0;
28393 }
fe2de317
MT
28394diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
28395index b812103..e391a49 100644
28396--- a/drivers/atm/horizon.c
28397+++ b/drivers/atm/horizon.c
28398@@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
15a11c5b
MT
28399 {
28400 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
28401 // VC layer stats
28402- atomic_inc(&vcc->stats->rx);
28403+ atomic_inc_unchecked(&vcc->stats->rx);
28404 __net_timestamp(skb);
28405 // end of our responsibility
28406 vcc->push (vcc, skb);
fe2de317 28407@@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
15a11c5b
MT
28408 dev->tx_iovec = NULL;
28409
28410 // VC layer stats
28411- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28412+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28413
28414 // free the skb
28415 hrz_kfree_skb (skb);
fe2de317 28416diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
4c928ab7 28417index 1c05212..c28e200 100644
fe2de317
MT
28418--- a/drivers/atm/idt77252.c
28419+++ b/drivers/atm/idt77252.c
28420@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
15a11c5b
MT
28421 else
28422 dev_kfree_skb(skb);
ae4e228f 28423
15a11c5b
MT
28424- atomic_inc(&vcc->stats->tx);
28425+ atomic_inc_unchecked(&vcc->stats->tx);
28426 }
ae4e228f 28427
15a11c5b 28428 atomic_dec(&scq->used);
fe2de317 28429@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
15a11c5b
MT
28430 if ((sb = dev_alloc_skb(64)) == NULL) {
28431 printk("%s: Can't allocate buffers for aal0.\n",
28432 card->name);
28433- atomic_add(i, &vcc->stats->rx_drop);
28434+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
28435 break;
28436 }
28437 if (!atm_charge(vcc, sb->truesize)) {
28438 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
28439 card->name);
28440- atomic_add(i - 1, &vcc->stats->rx_drop);
28441+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
28442 dev_kfree_skb(sb);
28443 break;
28444 }
fe2de317 28445@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
15a11c5b
MT
28446 ATM_SKB(sb)->vcc = vcc;
28447 __net_timestamp(sb);
28448 vcc->push(vcc, sb);
28449- atomic_inc(&vcc->stats->rx);
28450+ atomic_inc_unchecked(&vcc->stats->rx);
ae4e228f 28451
15a11c5b
MT
28452 cell += ATM_CELL_PAYLOAD;
28453 }
fe2de317 28454@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
15a11c5b
MT
28455 "(CDC: %08x)\n",
28456 card->name, len, rpp->len, readl(SAR_REG_CDC));
28457 recycle_rx_pool_skb(card, rpp);
28458- atomic_inc(&vcc->stats->rx_err);
28459+ atomic_inc_unchecked(&vcc->stats->rx_err);
28460 return;
28461 }
28462 if (stat & SAR_RSQE_CRC) {
28463 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
28464 recycle_rx_pool_skb(card, rpp);
28465- atomic_inc(&vcc->stats->rx_err);
28466+ atomic_inc_unchecked(&vcc->stats->rx_err);
28467 return;
28468 }
28469 if (skb_queue_len(&rpp->queue) > 1) {
fe2de317 28470@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
15a11c5b
MT
28471 RXPRINTK("%s: Can't alloc RX skb.\n",
28472 card->name);
28473 recycle_rx_pool_skb(card, rpp);
28474- atomic_inc(&vcc->stats->rx_err);
28475+ atomic_inc_unchecked(&vcc->stats->rx_err);
28476 return;
28477 }
28478 if (!atm_charge(vcc, skb->truesize)) {
fe2de317 28479@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
15a11c5b 28480 __net_timestamp(skb);
ae4e228f 28481
15a11c5b
MT
28482 vcc->push(vcc, skb);
28483- atomic_inc(&vcc->stats->rx);
28484+ atomic_inc_unchecked(&vcc->stats->rx);
ae4e228f 28485
15a11c5b
MT
28486 return;
28487 }
fe2de317 28488@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
15a11c5b 28489 __net_timestamp(skb);
ae4e228f 28490
15a11c5b
MT
28491 vcc->push(vcc, skb);
28492- atomic_inc(&vcc->stats->rx);
28493+ atomic_inc_unchecked(&vcc->stats->rx);
ae4e228f 28494
15a11c5b
MT
28495 if (skb->truesize > SAR_FB_SIZE_3)
28496 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
fe2de317 28497@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
15a11c5b
MT
28498 if (vcc->qos.aal != ATM_AAL0) {
28499 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
28500 card->name, vpi, vci);
28501- atomic_inc(&vcc->stats->rx_drop);
28502+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28503 goto drop;
28504 }
28505
28506 if ((sb = dev_alloc_skb(64)) == NULL) {
28507 printk("%s: Can't allocate buffers for AAL0.\n",
28508 card->name);
28509- atomic_inc(&vcc->stats->rx_err);
28510+ atomic_inc_unchecked(&vcc->stats->rx_err);
28511 goto drop;
28512 }
ae4e228f 28513
fe2de317 28514@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
15a11c5b
MT
28515 ATM_SKB(sb)->vcc = vcc;
28516 __net_timestamp(sb);
28517 vcc->push(vcc, sb);
28518- atomic_inc(&vcc->stats->rx);
28519+ atomic_inc_unchecked(&vcc->stats->rx);
ae4e228f 28520
15a11c5b
MT
28521 drop:
28522 skb_pull(queue, 64);
fe2de317 28523@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
58c5fc13 28524
15a11c5b
MT
28525 if (vc == NULL) {
28526 printk("%s: NULL connection in send().\n", card->name);
58c5fc13
MT
28527- atomic_inc(&vcc->stats->tx_err);
28528+ atomic_inc_unchecked(&vcc->stats->tx_err);
15a11c5b
MT
28529 dev_kfree_skb(skb);
28530 return -EINVAL;
58c5fc13 28531 }
15a11c5b
MT
28532 if (!test_bit(VCF_TX, &vc->flags)) {
28533 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
58c5fc13
MT
28534- atomic_inc(&vcc->stats->tx_err);
28535+ atomic_inc_unchecked(&vcc->stats->tx_err);
15a11c5b
MT
28536 dev_kfree_skb(skb);
28537 return -EINVAL;
58c5fc13 28538 }
fe2de317 28539@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
15a11c5b
MT
28540 break;
28541 default:
28542 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
58c5fc13
MT
28543- atomic_inc(&vcc->stats->tx_err);
28544+ atomic_inc_unchecked(&vcc->stats->tx_err);
15a11c5b
MT
28545 dev_kfree_skb(skb);
28546 return -EINVAL;
58c5fc13 28547 }
15a11c5b
MT
28548
28549 if (skb_shinfo(skb)->nr_frags != 0) {
28550 printk("%s: No scatter-gather yet.\n", card->name);
28551- atomic_inc(&vcc->stats->tx_err);
28552+ atomic_inc_unchecked(&vcc->stats->tx_err);
28553 dev_kfree_skb(skb);
28554 return -EINVAL;
58c5fc13 28555 }
fe2de317 28556@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
15a11c5b
MT
28557
28558 err = queue_skb(card, vc, skb, oam);
28559 if (err) {
28560- atomic_inc(&vcc->stats->tx_err);
28561+ atomic_inc_unchecked(&vcc->stats->tx_err);
28562 dev_kfree_skb(skb);
28563 return err;
58c5fc13 28564 }
fe2de317 28565@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
15a11c5b
MT
28566 skb = dev_alloc_skb(64);
28567 if (!skb) {
28568 printk("%s: Out of memory in send_oam().\n", card->name);
28569- atomic_inc(&vcc->stats->tx_err);
28570+ atomic_inc_unchecked(&vcc->stats->tx_err);
28571 return -ENOMEM;
58c5fc13
MT
28572 }
28573 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
fe2de317 28574diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
5e856224 28575index 9e373ba..cf93727 100644
fe2de317
MT
28576--- a/drivers/atm/iphase.c
28577+++ b/drivers/atm/iphase.c
4c928ab7 28578@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
58c5fc13
MT
28579 status = (u_short) (buf_desc_ptr->desc_mode);
28580 if (status & (RX_CER | RX_PTE | RX_OFL))
28581 {
28582- atomic_inc(&vcc->stats->rx_err);
28583+ atomic_inc_unchecked(&vcc->stats->rx_err);
28584 IF_ERR(printk("IA: bad packet, dropping it");)
28585 if (status & RX_CER) {
28586 IF_ERR(printk(" cause: packet CRC error\n");)
4c928ab7 28587@@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
58c5fc13
MT
28588 len = dma_addr - buf_addr;
28589 if (len > iadev->rx_buf_sz) {
28590 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
28591- atomic_inc(&vcc->stats->rx_err);
28592+ atomic_inc_unchecked(&vcc->stats->rx_err);
28593 goto out_free_desc;
28594 }
28595
4c928ab7 28596@@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
58c5fc13
MT
28597 ia_vcc = INPH_IA_VCC(vcc);
28598 if (ia_vcc == NULL)
28599 {
28600- atomic_inc(&vcc->stats->rx_err);
28601+ atomic_inc_unchecked(&vcc->stats->rx_err);
5e856224 28602 atm_return(vcc, skb->truesize);
58c5fc13 28603 dev_kfree_skb_any(skb);
58c5fc13 28604 goto INCR_DLE;
4c928ab7 28605@@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
58c5fc13
MT
28606 if ((length > iadev->rx_buf_sz) || (length >
28607 (skb->len - sizeof(struct cpcs_trailer))))
28608 {
28609- atomic_inc(&vcc->stats->rx_err);
28610+ atomic_inc_unchecked(&vcc->stats->rx_err);
28611 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
28612 length, skb->len);)
5e856224 28613 atm_return(vcc, skb->truesize);
4c928ab7 28614@@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
58c5fc13
MT
28615
28616 IF_RX(printk("rx_dle_intr: skb push");)
28617 vcc->push(vcc,skb);
28618- atomic_inc(&vcc->stats->rx);
28619+ atomic_inc_unchecked(&vcc->stats->rx);
28620 iadev->rx_pkt_cnt++;
28621 }
28622 INCR_DLE:
4c928ab7 28623@@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
58c5fc13
MT
28624 {
28625 struct k_sonet_stats *stats;
28626 stats = &PRIV(_ia_dev[board])->sonet_stats;
28627- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
28628- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
28629- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
28630- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
28631- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
28632- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
28633- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
28634- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
28635- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
28636+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
28637+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
28638+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
28639+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
28640+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
28641+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
28642+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
28643+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
28644+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
28645 }
28646 ia_cmds.status = 0;
28647 break;
4c928ab7 28648@@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
58c5fc13
MT
28649 if ((desc == 0) || (desc > iadev->num_tx_desc))
28650 {
28651 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
28652- atomic_inc(&vcc->stats->tx);
28653+ atomic_inc_unchecked(&vcc->stats->tx);
28654 if (vcc->pop)
28655 vcc->pop(vcc, skb);
28656 else
4c928ab7 28657@@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
58c5fc13
MT
28658 ATM_DESC(skb) = vcc->vci;
28659 skb_queue_tail(&iadev->tx_dma_q, skb);
28660
28661- atomic_inc(&vcc->stats->tx);
28662+ atomic_inc_unchecked(&vcc->stats->tx);
28663 iadev->tx_pkt_cnt++;
28664 /* Increment transaction counter */
28665 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
28666
28667 #if 0
28668 /* add flow control logic */
28669- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
28670+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
28671 if (iavcc->vc_desc_cnt > 10) {
28672 vcc->tx_quota = vcc->tx_quota * 3 / 4;
28673 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
fe2de317 28674diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
4c928ab7 28675index f556969..0da15eb 100644
fe2de317
MT
28676--- a/drivers/atm/lanai.c
28677+++ b/drivers/atm/lanai.c
28678@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
58c5fc13
MT
28679 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
28680 lanai_endtx(lanai, lvcc);
28681 lanai_free_skb(lvcc->tx.atmvcc, skb);
28682- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
28683+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
28684 }
28685
28686 /* Try to fill the buffer - don't call unless there is backlog */
fe2de317 28687@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
58c5fc13
MT
28688 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
28689 __net_timestamp(skb);
28690 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
28691- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
28692+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
28693 out:
28694 lvcc->rx.buf.ptr = end;
28695 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
4c928ab7 28696@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
58c5fc13
MT
28697 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
28698 "vcc %d\n", lanai->number, (unsigned int) s, vci);
28699 lanai->stats.service_rxnotaal5++;
28700- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28701+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28702 return 0;
28703 }
28704 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
4c928ab7 28705@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
58c5fc13
MT
28706 int bytes;
28707 read_unlock(&vcc_sklist_lock);
28708 DPRINTK("got trashed rx pdu on vci %d\n", vci);
28709- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28710+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28711 lvcc->stats.x.aal5.service_trash++;
28712 bytes = (SERVICE_GET_END(s) * 16) -
28713 (((unsigned long) lvcc->rx.buf.ptr) -
4c928ab7 28714@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
58c5fc13
MT
28715 }
28716 if (s & SERVICE_STREAM) {
28717 read_unlock(&vcc_sklist_lock);
28718- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28719+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28720 lvcc->stats.x.aal5.service_stream++;
28721 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
28722 "PDU on VCI %d!\n", lanai->number, vci);
4c928ab7 28723@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
58c5fc13
MT
28724 return 0;
28725 }
28726 DPRINTK("got rx crc error on vci %d\n", vci);
28727- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28728+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28729 lvcc->stats.x.aal5.service_rxcrc++;
28730 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
28731 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
fe2de317
MT
28732diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
28733index 1c70c45..300718d 100644
28734--- a/drivers/atm/nicstar.c
28735+++ b/drivers/atm/nicstar.c
28736@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
6892158b
MT
28737 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
28738 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
28739 card->index);
28740- atomic_inc(&vcc->stats->tx_err);
28741+ atomic_inc_unchecked(&vcc->stats->tx_err);
28742 dev_kfree_skb_any(skb);
28743 return -EINVAL;
28744 }
fe2de317 28745@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
6892158b
MT
28746 if (!vc->tx) {
28747 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
28748 card->index);
28749- atomic_inc(&vcc->stats->tx_err);
28750+ atomic_inc_unchecked(&vcc->stats->tx_err);
28751 dev_kfree_skb_any(skb);
28752 return -EINVAL;
28753 }
fe2de317 28754@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
6892158b
MT
28755 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
28756 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
28757 card->index);
28758- atomic_inc(&vcc->stats->tx_err);
28759+ atomic_inc_unchecked(&vcc->stats->tx_err);
28760 dev_kfree_skb_any(skb);
28761 return -EINVAL;
28762 }
58c5fc13 28763
6892158b
MT
28764 if (skb_shinfo(skb)->nr_frags != 0) {
28765 printk("nicstar%d: No scatter-gather yet.\n", card->index);
28766- atomic_inc(&vcc->stats->tx_err);
28767+ atomic_inc_unchecked(&vcc->stats->tx_err);
28768 dev_kfree_skb_any(skb);
28769 return -EINVAL;
28770 }
fe2de317 28771@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
6892158b
MT
28772 }
28773
28774 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
28775- atomic_inc(&vcc->stats->tx_err);
28776+ atomic_inc_unchecked(&vcc->stats->tx_err);
28777 dev_kfree_skb_any(skb);
28778 return -EIO;
28779 }
28780- atomic_inc(&vcc->stats->tx);
28781+ atomic_inc_unchecked(&vcc->stats->tx);
28782
28783 return 0;
28784 }
fe2de317 28785@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28786 printk
28787 ("nicstar%d: Can't allocate buffers for aal0.\n",
28788 card->index);
28789- atomic_add(i, &vcc->stats->rx_drop);
28790+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
28791 break;
28792 }
28793 if (!atm_charge(vcc, sb->truesize)) {
28794 RXPRINTK
28795 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
28796 card->index);
28797- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28798+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28799 dev_kfree_skb_any(sb);
28800 break;
28801 }
fe2de317 28802@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28803 ATM_SKB(sb)->vcc = vcc;
28804 __net_timestamp(sb);
28805 vcc->push(vcc, sb);
28806- atomic_inc(&vcc->stats->rx);
28807+ atomic_inc_unchecked(&vcc->stats->rx);
28808 cell += ATM_CELL_PAYLOAD;
28809 }
28810
fe2de317 28811@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28812 if (iovb == NULL) {
28813 printk("nicstar%d: Out of iovec buffers.\n",
28814 card->index);
28815- atomic_inc(&vcc->stats->rx_drop);
28816+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28817 recycle_rx_buf(card, skb);
28818 return;
28819 }
fe2de317 28820@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28821 small or large buffer itself. */
28822 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
28823 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
28824- atomic_inc(&vcc->stats->rx_err);
28825+ atomic_inc_unchecked(&vcc->stats->rx_err);
28826 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28827 NS_MAX_IOVECS);
28828 NS_PRV_IOVCNT(iovb) = 0;
fe2de317 28829@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28830 ("nicstar%d: Expected a small buffer, and this is not one.\n",
28831 card->index);
28832 which_list(card, skb);
28833- atomic_inc(&vcc->stats->rx_err);
28834+ atomic_inc_unchecked(&vcc->stats->rx_err);
28835 recycle_rx_buf(card, skb);
28836 vc->rx_iov = NULL;
28837 recycle_iov_buf(card, iovb);
fe2de317 28838@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28839 ("nicstar%d: Expected a large buffer, and this is not one.\n",
28840 card->index);
28841 which_list(card, skb);
28842- atomic_inc(&vcc->stats->rx_err);
28843+ atomic_inc_unchecked(&vcc->stats->rx_err);
28844 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28845 NS_PRV_IOVCNT(iovb));
28846 vc->rx_iov = NULL;
fe2de317 28847@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28848 printk(" - PDU size mismatch.\n");
28849 else
28850 printk(".\n");
28851- atomic_inc(&vcc->stats->rx_err);
28852+ atomic_inc_unchecked(&vcc->stats->rx_err);
28853 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28854 NS_PRV_IOVCNT(iovb));
28855 vc->rx_iov = NULL;
fe2de317 28856@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28857 /* skb points to a small buffer */
28858 if (!atm_charge(vcc, skb->truesize)) {
28859 push_rxbufs(card, skb);
28860- atomic_inc(&vcc->stats->rx_drop);
28861+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28862 } else {
28863 skb_put(skb, len);
28864 dequeue_sm_buf(card, skb);
fe2de317 28865@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28866 ATM_SKB(skb)->vcc = vcc;
28867 __net_timestamp(skb);
28868 vcc->push(vcc, skb);
28869- atomic_inc(&vcc->stats->rx);
28870+ atomic_inc_unchecked(&vcc->stats->rx);
28871 }
28872 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
28873 struct sk_buff *sb;
fe2de317 28874@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28875 if (len <= NS_SMBUFSIZE) {
28876 if (!atm_charge(vcc, sb->truesize)) {
28877 push_rxbufs(card, sb);
28878- atomic_inc(&vcc->stats->rx_drop);
28879+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28880 } else {
28881 skb_put(sb, len);
28882 dequeue_sm_buf(card, sb);
fe2de317 28883@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28884 ATM_SKB(sb)->vcc = vcc;
28885 __net_timestamp(sb);
28886 vcc->push(vcc, sb);
28887- atomic_inc(&vcc->stats->rx);
28888+ atomic_inc_unchecked(&vcc->stats->rx);
28889 }
28890
28891 push_rxbufs(card, skb);
fe2de317 28892@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28893
28894 if (!atm_charge(vcc, skb->truesize)) {
28895 push_rxbufs(card, skb);
28896- atomic_inc(&vcc->stats->rx_drop);
28897+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28898 } else {
28899 dequeue_lg_buf(card, skb);
28900 #ifdef NS_USE_DESTRUCTORS
fe2de317 28901@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28902 ATM_SKB(skb)->vcc = vcc;
28903 __net_timestamp(skb);
28904 vcc->push(vcc, skb);
28905- atomic_inc(&vcc->stats->rx);
28906+ atomic_inc_unchecked(&vcc->stats->rx);
28907 }
28908
28909 push_rxbufs(card, sb);
fe2de317 28910@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28911 printk
28912 ("nicstar%d: Out of huge buffers.\n",
28913 card->index);
28914- atomic_inc(&vcc->stats->rx_drop);
28915+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28916 recycle_iovec_rx_bufs(card,
28917 (struct iovec *)
28918 iovb->data,
fe2de317 28919@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28920 card->hbpool.count++;
28921 } else
28922 dev_kfree_skb_any(hb);
28923- atomic_inc(&vcc->stats->rx_drop);
28924+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28925 } else {
28926 /* Copy the small buffer to the huge buffer */
28927 sb = (struct sk_buff *)iov->iov_base;
fe2de317 28928@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
58c5fc13 28929 #endif /* NS_USE_DESTRUCTORS */
6892158b
MT
28930 __net_timestamp(hb);
28931 vcc->push(vcc, hb);
28932- atomic_inc(&vcc->stats->rx);
28933+ atomic_inc_unchecked(&vcc->stats->rx);
28934 }
28935 }
58c5fc13 28936
fe2de317 28937diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
5e856224 28938index e8cd652..bbbd1fc 100644
fe2de317
MT
28939--- a/drivers/atm/solos-pci.c
28940+++ b/drivers/atm/solos-pci.c
15a11c5b 28941@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
58c5fc13
MT
28942 }
28943 atm_charge(vcc, skb->truesize);
28944 vcc->push(vcc, skb);
28945- atomic_inc(&vcc->stats->rx);
28946+ atomic_inc_unchecked(&vcc->stats->rx);
28947 break;
28948
28949 case PKT_STATUS:
4c928ab7 28950@@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
58c5fc13
MT
28951 vcc = SKB_CB(oldskb)->vcc;
28952
28953 if (vcc) {
28954- atomic_inc(&vcc->stats->tx);
28955+ atomic_inc_unchecked(&vcc->stats->tx);
28956 solos_pop(vcc, oldskb);
28957 } else
28958 dev_kfree_skb_irq(oldskb);
fe2de317
MT
28959diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
28960index 90f1ccc..04c4a1e 100644
28961--- a/drivers/atm/suni.c
28962+++ b/drivers/atm/suni.c
df50ba0c 28963@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
58c5fc13
MT
28964
28965
28966 #define ADD_LIMITED(s,v) \
28967- atomic_add((v),&stats->s); \
28968- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
28969+ atomic_add_unchecked((v),&stats->s); \
28970+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
28971
28972
28973 static void suni_hz(unsigned long from_timer)
fe2de317
MT
28974diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
28975index 5120a96..e2572bd 100644
28976--- a/drivers/atm/uPD98402.c
28977+++ b/drivers/atm/uPD98402.c
28978@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
58c5fc13
MT
28979 struct sonet_stats tmp;
28980 int error = 0;
28981
28982- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28983+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28984 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
28985 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
28986 if (zero && !error) {
fe2de317 28987@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
58c5fc13
MT
28988
28989
28990 #define ADD_LIMITED(s,v) \
28991- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
28992- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
28993- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28994+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
28995+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
28996+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28997
28998
28999 static void stat_event(struct atm_dev *dev)
fe2de317 29000@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
58c5fc13
MT
29001 if (reason & uPD98402_INT_PFM) stat_event(dev);
29002 if (reason & uPD98402_INT_PCO) {
29003 (void) GET(PCOCR); /* clear interrupt cause */
29004- atomic_add(GET(HECCT),
29005+ atomic_add_unchecked(GET(HECCT),
29006 &PRIV(dev)->sonet_stats.uncorr_hcs);
29007 }
29008 if ((reason & uPD98402_INT_RFO) &&
fe2de317 29009@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
58c5fc13
MT
29010 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
29011 uPD98402_INT_LOS),PIMR); /* enable them */
29012 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
29013- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
29014- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
29015- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
29016+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
29017+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
29018+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
29019 return 0;
29020 }
29021
fe2de317
MT
29022diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
29023index d889f56..17eb71e 100644
29024--- a/drivers/atm/zatm.c
29025+++ b/drivers/atm/zatm.c
29026@@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
58c5fc13
MT
29027 }
29028 if (!size) {
29029 dev_kfree_skb_irq(skb);
29030- if (vcc) atomic_inc(&vcc->stats->rx_err);
29031+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
29032 continue;
29033 }
29034 if (!atm_charge(vcc,skb->truesize)) {
fe2de317 29035@@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
58c5fc13
MT
29036 skb->len = size;
29037 ATM_SKB(skb)->vcc = vcc;
29038 vcc->push(vcc,skb);
29039- atomic_inc(&vcc->stats->rx);
29040+ atomic_inc_unchecked(&vcc->stats->rx);
29041 }
29042 zout(pos & 0xffff,MTA(mbx));
29043 #if 0 /* probably a stupid idea */
fe2de317 29044@@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
58c5fc13
MT
29045 skb_queue_head(&zatm_vcc->backlog,skb);
29046 break;
29047 }
29048- atomic_inc(&vcc->stats->tx);
29049+ atomic_inc_unchecked(&vcc->stats->tx);
29050 wake_up(&zatm_vcc->tx_wait);
29051 }
29052
fe2de317 29053diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
5e856224 29054index 8493536..31adee0 100644
fe2de317
MT
29055--- a/drivers/base/devtmpfs.c
29056+++ b/drivers/base/devtmpfs.c
6e9df6a3
MT
29057@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
29058 if (!thread)
29059 return 0;
29060
29061- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
29062+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
29063 if (err)
29064 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
29065 else
fe2de317 29066diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
4c928ab7 29067index caf995f..6f76697 100644
fe2de317
MT
29068--- a/drivers/base/power/wakeup.c
29069+++ b/drivers/base/power/wakeup.c
4c928ab7 29070@@ -30,14 +30,14 @@ bool events_check_enabled;
66a7e928
MT
29071 * They need to be modified together atomically, so it's better to use one
29072 * atomic variable to hold them both.
29073 */
29074-static atomic_t combined_event_count = ATOMIC_INIT(0);
29075+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
8308f9c9 29076
66a7e928
MT
29077 #define IN_PROGRESS_BITS (sizeof(int) * 4)
29078 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
29079
29080 static void split_counters(unsigned int *cnt, unsigned int *inpr)
29081 {
29082- unsigned int comb = atomic_read(&combined_event_count);
29083+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
29084
29085 *cnt = (comb >> IN_PROGRESS_BITS);
29086 *inpr = comb & MAX_IN_PROGRESS;
4c928ab7 29087@@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
66a7e928
MT
29088 ws->last_time = ktime_get();
29089
29090 /* Increment the counter of events in progress. */
29091- atomic_inc(&combined_event_count);
29092+ atomic_inc_unchecked(&combined_event_count);
8308f9c9
MT
29093 }
29094
66a7e928 29095 /**
4c928ab7 29096@@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
66a7e928
MT
29097 * Increment the counter of registered wakeup events and decrement the
29098 * couter of wakeup events in progress simultaneously.
29099 */
29100- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
29101+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
29102 }
8308f9c9 29103
66a7e928 29104 /**
fe2de317 29105diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
4c928ab7 29106index b0f553b..77b928b 100644
fe2de317
MT
29107--- a/drivers/block/cciss.c
29108+++ b/drivers/block/cciss.c
4c928ab7 29109@@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
bc901d79
MT
29110 int err;
29111 u32 cp;
29112
29113+ memset(&arg64, 0, sizeof(arg64));
29114+
29115 err = 0;
29116 err |=
29117 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4c928ab7 29118@@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
15a11c5b
MT
29119 while (!list_empty(&h->reqQ)) {
29120 c = list_entry(h->reqQ.next, CommandList_struct, list);
29121 /* can't do anything if fifo is full */
29122- if ((h->access.fifo_full(h))) {
29123+ if ((h->access->fifo_full(h))) {
29124 dev_warn(&h->pdev->dev, "fifo full\n");
29125 break;
29126 }
4c928ab7 29127@@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
15a11c5b 29128 h->Qdepth--;
66a7e928 29129
15a11c5b
MT
29130 /* Tell the controller execute command */
29131- h->access.submit_command(h, c);
29132+ h->access->submit_command(h, c);
66a7e928 29133
15a11c5b
MT
29134 /* Put job onto the completed Q */
29135 addQ(&h->cmpQ, c);
4c928ab7 29136@@ -3443,17 +3445,17 @@ startio:
66a7e928 29137
15a11c5b
MT
29138 static inline unsigned long get_next_completion(ctlr_info_t *h)
29139 {
29140- return h->access.command_completed(h);
29141+ return h->access->command_completed(h);
29142 }
29143
29144 static inline int interrupt_pending(ctlr_info_t *h)
29145 {
29146- return h->access.intr_pending(h);
29147+ return h->access->intr_pending(h);
29148 }
29149
29150 static inline long interrupt_not_for_us(ctlr_info_t *h)
29151 {
29152- return ((h->access.intr_pending(h) == 0) ||
29153+ return ((h->access->intr_pending(h) == 0) ||
29154 (h->interrupts_enabled == 0));
29155 }
29156
4c928ab7 29157@@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
15a11c5b
MT
29158 u32 a;
29159
29160 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
29161- return h->access.command_completed(h);
29162+ return h->access->command_completed(h);
29163
29164 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
29165 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
4c928ab7 29166@@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
15a11c5b
MT
29167 trans_support & CFGTBL_Trans_use_short_tags);
29168
29169 /* Change the access methods to the performant access methods */
29170- h->access = SA5_performant_access;
29171+ h->access = &SA5_performant_access;
29172 h->transMethod = CFGTBL_Trans_Performant;
29173
29174 return;
4c928ab7 29175@@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
15a11c5b
MT
29176 if (prod_index < 0)
29177 return -ENODEV;
29178 h->product_name = products[prod_index].product_name;
29179- h->access = *(products[prod_index].access);
29180+ h->access = products[prod_index].access;
29181
29182 if (cciss_board_disabled(h)) {
29183 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
4c928ab7 29184@@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
15a11c5b
MT
29185 }
29186
29187 /* make sure the board interrupts are off */
29188- h->access.set_intr_mask(h, CCISS_INTR_OFF);
29189+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
29190 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
29191 if (rc)
29192 goto clean2;
4c928ab7 29193@@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
15a11c5b
MT
29194 * fake ones to scoop up any residual completions.
29195 */
29196 spin_lock_irqsave(&h->lock, flags);
29197- h->access.set_intr_mask(h, CCISS_INTR_OFF);
29198+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
29199 spin_unlock_irqrestore(&h->lock, flags);
4c928ab7 29200 free_irq(h->intr[h->intr_mode], h);
15a11c5b 29201 rc = cciss_request_irq(h, cciss_msix_discard_completions,
4c928ab7 29202@@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
15a11c5b
MT
29203 dev_info(&h->pdev->dev, "Board READY.\n");
29204 dev_info(&h->pdev->dev,
29205 "Waiting for stale completions to drain.\n");
29206- h->access.set_intr_mask(h, CCISS_INTR_ON);
29207+ h->access->set_intr_mask(h, CCISS_INTR_ON);
29208 msleep(10000);
29209- h->access.set_intr_mask(h, CCISS_INTR_OFF);
29210+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
29211
29212 rc = controller_reset_failed(h->cfgtable);
29213 if (rc)
4c928ab7 29214@@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
15a11c5b
MT
29215 cciss_scsi_setup(h);
29216
29217 /* Turn the interrupts on so we can service requests */
29218- h->access.set_intr_mask(h, CCISS_INTR_ON);
29219+ h->access->set_intr_mask(h, CCISS_INTR_ON);
29220
29221 /* Get the firmware version */
29222 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
4c928ab7 29223@@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
15a11c5b
MT
29224 kfree(flush_buf);
29225 if (return_code != IO_OK)
29226 dev_warn(&h->pdev->dev, "Error flushing cache\n");
29227- h->access.set_intr_mask(h, CCISS_INTR_OFF);
29228+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
4c928ab7 29229 free_irq(h->intr[h->intr_mode], h);
15a11c5b
MT
29230 }
29231
fe2de317 29232diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
4c928ab7 29233index 7fda30e..eb5dfe0 100644
fe2de317
MT
29234--- a/drivers/block/cciss.h
29235+++ b/drivers/block/cciss.h
4c928ab7 29236@@ -101,7 +101,7 @@ struct ctlr_info
15a11c5b
MT
29237 /* information about each logical volume */
29238 drive_info_struct *drv[CISS_MAX_LUN];
29239
29240- struct access_method access;
29241+ struct access_method *access;
29242
29243 /* queue and queue Info */
29244 struct list_head reqQ;
fe2de317 29245diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
4c928ab7 29246index 9125bbe..eede5c8 100644
fe2de317
MT
29247--- a/drivers/block/cpqarray.c
29248+++ b/drivers/block/cpqarray.c
29249@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
15a11c5b
MT
29250 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
29251 goto Enomem4;
29252 }
29253- hba[i]->access.set_intr_mask(hba[i], 0);
29254+ hba[i]->access->set_intr_mask(hba[i], 0);
29255 if (request_irq(hba[i]->intr, do_ida_intr,
29256 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
29257 {
fe2de317 29258@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
15a11c5b
MT
29259 add_timer(&hba[i]->timer);
29260
29261 /* Enable IRQ now that spinlock and rate limit timer are set up */
29262- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
29263+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
29264
29265 for(j=0; j<NWD; j++) {
29266 struct gendisk *disk = ida_gendisk[i][j];
29267@@ -694,7 +694,7 @@ DBGINFO(
29268 for(i=0; i<NR_PRODUCTS; i++) {
29269 if (board_id == products[i].board_id) {
29270 c->product_name = products[i].product_name;
29271- c->access = *(products[i].access);
29272+ c->access = products[i].access;
29273 break;
29274 }
29275 }
fe2de317 29276@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
15a11c5b
MT
29277 hba[ctlr]->intr = intr;
29278 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
29279 hba[ctlr]->product_name = products[j].product_name;
29280- hba[ctlr]->access = *(products[j].access);
29281+ hba[ctlr]->access = products[j].access;
29282 hba[ctlr]->ctlr = ctlr;
29283 hba[ctlr]->board_id = board_id;
29284 hba[ctlr]->pci_dev = NULL; /* not PCI */
4c928ab7 29285@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
15a11c5b
MT
29286
29287 while((c = h->reqQ) != NULL) {
29288 /* Can't do anything if we're busy */
29289- if (h->access.fifo_full(h) == 0)
29290+ if (h->access->fifo_full(h) == 0)
29291 return;
66a7e928 29292
15a11c5b 29293 /* Get the first entry from the request Q */
4c928ab7 29294@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
15a11c5b
MT
29295 h->Qdepth--;
29296
29297 /* Tell the controller to do our bidding */
29298- h->access.submit_command(h, c);
29299+ h->access->submit_command(h, c);
29300
29301 /* Get onto the completion Q */
29302 addQ(&h->cmpQ, c);
4c928ab7 29303@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
15a11c5b
MT
29304 unsigned long flags;
29305 __u32 a,a1;
29306
29307- istat = h->access.intr_pending(h);
29308+ istat = h->access->intr_pending(h);
29309 /* Is this interrupt for us? */
29310 if (istat == 0)
29311 return IRQ_NONE;
4c928ab7 29312@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
15a11c5b
MT
29313 */
29314 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
29315 if (istat & FIFO_NOT_EMPTY) {
29316- while((a = h->access.command_completed(h))) {
29317+ while((a = h->access->command_completed(h))) {
29318 a1 = a; a &= ~3;
29319 if ((c = h->cmpQ) == NULL)
29320 {
4c928ab7 29321@@ -1449,11 +1449,11 @@ static int sendcmd(
15a11c5b
MT
29322 /*
29323 * Disable interrupt
29324 */
29325- info_p->access.set_intr_mask(info_p, 0);
29326+ info_p->access->set_intr_mask(info_p, 0);
29327 /* Make sure there is room in the command FIFO */
29328 /* Actually it should be completely empty at this time. */
29329 for (i = 200000; i > 0; i--) {
29330- temp = info_p->access.fifo_full(info_p);
29331+ temp = info_p->access->fifo_full(info_p);
29332 if (temp != 0) {
29333 break;
29334 }
4c928ab7 29335@@ -1466,7 +1466,7 @@ DBG(
15a11c5b
MT
29336 /*
29337 * Send the cmd
29338 */
29339- info_p->access.submit_command(info_p, c);
29340+ info_p->access->submit_command(info_p, c);
29341 complete = pollcomplete(ctlr);
29342
29343 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
4c928ab7 29344@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
15a11c5b
MT
29345 * we check the new geometry. Then turn interrupts back on when
29346 * we're done.
29347 */
29348- host->access.set_intr_mask(host, 0);
29349+ host->access->set_intr_mask(host, 0);
29350 getgeometry(ctlr);
29351- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
29352+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
29353
29354 for(i=0; i<NWD; i++) {
29355 struct gendisk *disk = ida_gendisk[ctlr][i];
4c928ab7 29356@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
15a11c5b
MT
29357 /* Wait (up to 2 seconds) for a command to complete */
29358
29359 for (i = 200000; i > 0; i--) {
29360- done = hba[ctlr]->access.command_completed(hba[ctlr]);
29361+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
29362 if (done == 0) {
29363 udelay(10); /* a short fixed delay */
29364 } else
fe2de317
MT
29365diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
29366index be73e9d..7fbf140 100644
29367--- a/drivers/block/cpqarray.h
29368+++ b/drivers/block/cpqarray.h
15a11c5b
MT
29369@@ -99,7 +99,7 @@ struct ctlr_info {
29370 drv_info_t drv[NWD];
29371 struct proc_dir_entry *proc;
29372
29373- struct access_method access;
29374+ struct access_method *access;
29375
29376 cmdlist_t *reqQ;
29377 cmdlist_t *cmpQ;
fe2de317 29378diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
5e856224 29379index 8d68056..e67050f 100644
fe2de317
MT
29380--- a/drivers/block/drbd/drbd_int.h
29381+++ b/drivers/block/drbd/drbd_int.h
4c928ab7 29382@@ -736,7 +736,7 @@ struct drbd_request;
8308f9c9
MT
29383 struct drbd_epoch {
29384 struct list_head list;
29385 unsigned int barrier_nr;
29386- atomic_t epoch_size; /* increased on every request added. */
29387+ atomic_unchecked_t epoch_size; /* increased on every request added. */
29388 atomic_t active; /* increased on every req. added, and dec on every finished. */
29389 unsigned long flags;
29390 };
4c928ab7 29391@@ -1108,7 +1108,7 @@ struct drbd_conf {
8308f9c9
MT
29392 void *int_dig_in;
29393 void *int_dig_vv;
29394 wait_queue_head_t seq_wait;
29395- atomic_t packet_seq;
29396+ atomic_unchecked_t packet_seq;
29397 unsigned int peer_seq;
29398 spinlock_t peer_seq_lock;
29399 unsigned int minor;
4c928ab7 29400@@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
6e9df6a3
MT
29401
29402 static inline void drbd_tcp_cork(struct socket *sock)
29403 {
29404- int __user val = 1;
29405+ int val = 1;
29406 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29407- (char __user *)&val, sizeof(val));
29408+ (char __force_user *)&val, sizeof(val));
29409 }
29410
29411 static inline void drbd_tcp_uncork(struct socket *sock)
29412 {
29413- int __user val = 0;
29414+ int val = 0;
29415 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29416- (char __user *)&val, sizeof(val));
29417+ (char __force_user *)&val, sizeof(val));
29418 }
29419
29420 static inline void drbd_tcp_nodelay(struct socket *sock)
29421 {
29422- int __user val = 1;
29423+ int val = 1;
29424 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
29425- (char __user *)&val, sizeof(val));
29426+ (char __force_user *)&val, sizeof(val));
29427 }
29428
29429 static inline void drbd_tcp_quickack(struct socket *sock)
29430 {
29431- int __user val = 2;
29432+ int val = 2;
29433 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
29434- (char __user *)&val, sizeof(val));
29435+ (char __force_user *)&val, sizeof(val));
29436 }
29437
29438 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
fe2de317 29439diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
5e856224 29440index 211fc44..c5116f1 100644
fe2de317
MT
29441--- a/drivers/block/drbd/drbd_main.c
29442+++ b/drivers/block/drbd/drbd_main.c
29443@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
8308f9c9
MT
29444 p.sector = sector;
29445 p.block_id = block_id;
29446 p.blksize = blksize;
29447- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
29448+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
29449
29450 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
66a7e928 29451 return false;
fe2de317 29452@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
8308f9c9
MT
29453 p.sector = cpu_to_be64(req->sector);
29454 p.block_id = (unsigned long)req;
29455 p.seq_num = cpu_to_be32(req->seq_num =
29456- atomic_add_return(1, &mdev->packet_seq));
29457+ atomic_add_return_unchecked(1, &mdev->packet_seq));
29458
29459 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
29460
fe2de317 29461@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
8308f9c9
MT
29462 atomic_set(&mdev->unacked_cnt, 0);
29463 atomic_set(&mdev->local_cnt, 0);
29464 atomic_set(&mdev->net_cnt, 0);
29465- atomic_set(&mdev->packet_seq, 0);
29466+ atomic_set_unchecked(&mdev->packet_seq, 0);
29467 atomic_set(&mdev->pp_in_use, 0);
29468 atomic_set(&mdev->pp_in_use_by_net, 0);
29469 atomic_set(&mdev->rs_sect_in, 0);
fe2de317 29470@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
8308f9c9
MT
29471 mdev->receiver.t_state);
29472
29473 /* no need to lock it, I'm the only thread alive */
29474- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
29475- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
29476+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
29477+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
29478 mdev->al_writ_cnt =
29479 mdev->bm_writ_cnt =
29480 mdev->read_cnt =
fe2de317 29481diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
5e856224 29482index af2a250..0fdeb75 100644
fe2de317
MT
29483--- a/drivers/block/drbd/drbd_nl.c
29484+++ b/drivers/block/drbd/drbd_nl.c
5e856224
MT
29485@@ -2297,7 +2297,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
29486 return;
29487 }
29488
29489- if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) {
29490+ if (!capable(CAP_SYS_ADMIN)) {
29491 retcode = ERR_PERM;
29492 goto fail;
29493 }
fe2de317 29494@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
8308f9c9
MT
29495 module_put(THIS_MODULE);
29496 }
29497
29498-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29499+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29500
29501 static unsigned short *
29502 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
fe2de317 29503@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
8308f9c9
MT
29504 cn_reply->id.idx = CN_IDX_DRBD;
29505 cn_reply->id.val = CN_VAL_DRBD;
29506
29507- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29508+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29509 cn_reply->ack = 0; /* not used here. */
29510 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29511 (int)((char *)tl - (char *)reply->tag_list);
fe2de317 29512@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
8308f9c9
MT
29513 cn_reply->id.idx = CN_IDX_DRBD;
29514 cn_reply->id.val = CN_VAL_DRBD;
29515
29516- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29517+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29518 cn_reply->ack = 0; /* not used here. */
29519 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29520 (int)((char *)tl - (char *)reply->tag_list);
fe2de317 29521@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
8308f9c9
MT
29522 cn_reply->id.idx = CN_IDX_DRBD;
29523 cn_reply->id.val = CN_VAL_DRBD;
29524
29525- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
29526+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
29527 cn_reply->ack = 0; // not used here.
29528 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29529 (int)((char*)tl - (char*)reply->tag_list);
fe2de317 29530@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
8308f9c9
MT
29531 cn_reply->id.idx = CN_IDX_DRBD;
29532 cn_reply->id.val = CN_VAL_DRBD;
29533
29534- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29535+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29536 cn_reply->ack = 0; /* not used here. */
29537 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29538 (int)((char *)tl - (char *)reply->tag_list);
fe2de317
MT
29539diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
29540index 43beaca..4a5b1dd 100644
29541--- a/drivers/block/drbd/drbd_receiver.c
29542+++ b/drivers/block/drbd/drbd_receiver.c
66a7e928 29543@@ -894,7 +894,7 @@ retry:
8308f9c9
MT
29544 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
29545 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
29546
29547- atomic_set(&mdev->packet_seq, 0);
29548+ atomic_set_unchecked(&mdev->packet_seq, 0);
29549 mdev->peer_seq = 0;
29550
29551 drbd_thread_start(&mdev->asender);
fe2de317 29552@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
8308f9c9
MT
29553 do {
29554 next_epoch = NULL;
29555
29556- epoch_size = atomic_read(&epoch->epoch_size);
29557+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
29558
29559 switch (ev & ~EV_CLEANUP) {
29560 case EV_PUT:
fe2de317 29561@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
8308f9c9
MT
29562 rv = FE_DESTROYED;
29563 } else {
29564 epoch->flags = 0;
29565- atomic_set(&epoch->epoch_size, 0);
29566+ atomic_set_unchecked(&epoch->epoch_size, 0);
29567 /* atomic_set(&epoch->active, 0); is already zero */
29568 if (rv == FE_STILL_LIVE)
29569 rv = FE_RECYCLED;
fe2de317 29570@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
8308f9c9
MT
29571 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
29572 drbd_flush(mdev);
29573
29574- if (atomic_read(&mdev->current_epoch->epoch_size)) {
29575+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29576 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
29577 if (epoch)
29578 break;
29579 }
29580
29581 epoch = mdev->current_epoch;
29582- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
29583+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
29584
29585 D_ASSERT(atomic_read(&epoch->active) == 0);
29586 D_ASSERT(epoch->flags == 0);
fe2de317 29587@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
8308f9c9
MT
29588 }
29589
29590 epoch->flags = 0;
29591- atomic_set(&epoch->epoch_size, 0);
29592+ atomic_set_unchecked(&epoch->epoch_size, 0);
29593 atomic_set(&epoch->active, 0);
29594
29595 spin_lock(&mdev->epoch_lock);
29596- if (atomic_read(&mdev->current_epoch->epoch_size)) {
29597+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29598 list_add(&epoch->list, &mdev->current_epoch->list);
29599 mdev->current_epoch = epoch;
29600 mdev->epochs++;
fe2de317 29601@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
8308f9c9
MT
29602 spin_unlock(&mdev->peer_seq_lock);
29603
29604 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
29605- atomic_inc(&mdev->current_epoch->epoch_size);
29606+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
29607 return drbd_drain_block(mdev, data_size);
29608 }
29609
fe2de317 29610@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
8308f9c9
MT
29611
29612 spin_lock(&mdev->epoch_lock);
29613 e->epoch = mdev->current_epoch;
29614- atomic_inc(&e->epoch->epoch_size);
29615+ atomic_inc_unchecked(&e->epoch->epoch_size);
29616 atomic_inc(&e->epoch->active);
29617 spin_unlock(&mdev->epoch_lock);
29618
fe2de317 29619@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
8308f9c9
MT
29620 D_ASSERT(list_empty(&mdev->done_ee));
29621
29622 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
29623- atomic_set(&mdev->current_epoch->epoch_size, 0);
29624+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
29625 D_ASSERT(list_empty(&mdev->current_epoch->list));
29626 }
29627
fe2de317 29628diff --git a/drivers/block/loop.c b/drivers/block/loop.c
5e856224 29629index cd50435..ba1ffb5 100644
fe2de317
MT
29630--- a/drivers/block/loop.c
29631+++ b/drivers/block/loop.c
5e856224 29632@@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
6e9df6a3
MT
29633 mm_segment_t old_fs = get_fs();
29634
29635 set_fs(get_ds());
29636- bw = file->f_op->write(file, buf, len, &pos);
29637+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
29638 set_fs(old_fs);
29639 if (likely(bw == len))
29640 return 0;
fe2de317 29641diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
4c928ab7 29642index 4364303..9adf4ee 100644
fe2de317
MT
29643--- a/drivers/char/Kconfig
29644+++ b/drivers/char/Kconfig
29645@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
29646
29647 config DEVKMEM
29648 bool "/dev/kmem virtual device support"
29649- default y
29650+ default n
29651+ depends on !GRKERNSEC_KMEM
29652 help
29653 Say Y here if you want to support the /dev/kmem device. The
29654 /dev/kmem device is rarely used, but can be used for certain
29655@@ -596,6 +597,7 @@ config DEVPORT
29656 bool
29657 depends on !M68K
29658 depends on ISA || PCI
29659+ depends on !GRKERNSEC_KMEM
29660 default y
29661
29662 source "drivers/s390/char/Kconfig"
29663diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
29664index 2e04433..22afc64 100644
29665--- a/drivers/char/agp/frontend.c
29666+++ b/drivers/char/agp/frontend.c
29667@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
58c5fc13
MT
29668 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
29669 return -EFAULT;
29670
29671- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
29672+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
29673 return -EFAULT;
29674
29675 client = agp_find_client_by_pid(reserve.pid);
fe2de317
MT
29676diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
29677index 095ab90..afad0a4 100644
29678--- a/drivers/char/briq_panel.c
29679+++ b/drivers/char/briq_panel.c
71d190be
MT
29680@@ -9,6 +9,7 @@
29681 #include <linux/types.h>
29682 #include <linux/errno.h>
29683 #include <linux/tty.h>
29684+#include <linux/mutex.h>
29685 #include <linux/timer.h>
29686 #include <linux/kernel.h>
29687 #include <linux/wait.h>
29688@@ -34,6 +35,7 @@ static int vfd_is_open;
29689 static unsigned char vfd[40];
29690 static int vfd_cursor;
29691 static unsigned char ledpb, led;
29692+static DEFINE_MUTEX(vfd_mutex);
29693
29694 static void update_vfd(void)
29695 {
fe2de317 29696@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
71d190be
MT
29697 if (!vfd_is_open)
29698 return -EBUSY;
29699
29700+ mutex_lock(&vfd_mutex);
29701 for (;;) {
29702 char c;
29703 if (!indx)
29704 break;
29705- if (get_user(c, buf))
29706+ if (get_user(c, buf)) {
29707+ mutex_unlock(&vfd_mutex);
29708 return -EFAULT;
29709+ }
29710 if (esc) {
29711 set_led(c);
29712 esc = 0;
fe2de317 29713@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
71d190be
MT
29714 buf++;
29715 }
29716 update_vfd();
29717+ mutex_unlock(&vfd_mutex);
29718
29719 return len;
29720 }
fe2de317
MT
29721diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
29722index f773a9d..65cd683 100644
29723--- a/drivers/char/genrtc.c
29724+++ b/drivers/char/genrtc.c
29725@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
71d190be
MT
29726 switch (cmd) {
29727
29728 case RTC_PLL_GET:
29729+ memset(&pll, 0, sizeof(pll));
29730 if (get_rtc_pll(&pll))
29731 return -EINVAL;
29732 else
fe2de317
MT
29733diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
29734index 0833896..cccce52 100644
29735--- a/drivers/char/hpet.c
29736+++ b/drivers/char/hpet.c
29737@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
ae4e228f
MT
29738 }
29739
df50ba0c 29740 static int
bc901d79
MT
29741-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
29742+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
29743 struct hpet_info *info)
ae4e228f 29744 {
df50ba0c 29745 struct hpet_timer __iomem *timer;
fe2de317 29746diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
4c928ab7 29747index 58c0e63..46c16bf 100644
fe2de317
MT
29748--- a/drivers/char/ipmi/ipmi_msghandler.c
29749+++ b/drivers/char/ipmi/ipmi_msghandler.c
15a11c5b 29750@@ -415,7 +415,7 @@ struct ipmi_smi {
58c5fc13
MT
29751 struct proc_dir_entry *proc_dir;
29752 char proc_dir_name[10];
29753
29754- atomic_t stats[IPMI_NUM_STATS];
29755+ atomic_unchecked_t stats[IPMI_NUM_STATS];
29756
29757 /*
29758 * run_to_completion duplicate of smb_info, smi_info
15a11c5b 29759@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
58c5fc13
MT
29760
29761
29762 #define ipmi_inc_stat(intf, stat) \
29763- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
29764+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
29765 #define ipmi_get_stat(intf, stat) \
29766- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
29767+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
29768
29769 static int is_lan_addr(struct ipmi_addr *addr)
29770 {
fe2de317 29771@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
58c5fc13
MT
29772 INIT_LIST_HEAD(&intf->cmd_rcvrs);
29773 init_waitqueue_head(&intf->waitq);
29774 for (i = 0; i < IPMI_NUM_STATS; i++)
29775- atomic_set(&intf->stats[i], 0);
29776+ atomic_set_unchecked(&intf->stats[i], 0);
29777
29778 intf->proc_dir = NULL;
29779
fe2de317 29780diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
5e856224 29781index 50fcf9c..91b5528 100644
fe2de317
MT
29782--- a/drivers/char/ipmi/ipmi_si_intf.c
29783+++ b/drivers/char/ipmi/ipmi_si_intf.c
15a11c5b 29784@@ -277,7 +277,7 @@ struct smi_info {
58c5fc13
MT
29785 unsigned char slave_addr;
29786
29787 /* Counters and things for the proc filesystem. */
29788- atomic_t stats[SI_NUM_STATS];
29789+ atomic_unchecked_t stats[SI_NUM_STATS];
29790
29791 struct task_struct *thread;
29792
15a11c5b 29793@@ -286,9 +286,9 @@ struct smi_info {
58c5fc13
MT
29794 };
29795
29796 #define smi_inc_stat(smi, stat) \
29797- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
29798+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
29799 #define smi_get_stat(smi, stat) \
29800- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
29801+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
29802
29803 #define SI_MAX_PARMS 4
29804
fe2de317 29805@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
58c5fc13
MT
29806 atomic_set(&new_smi->req_events, 0);
29807 new_smi->run_to_completion = 0;
29808 for (i = 0; i < SI_NUM_STATS; i++)
29809- atomic_set(&new_smi->stats[i], 0);
29810+ atomic_set_unchecked(&new_smi->stats[i], 0);
29811
57199397 29812 new_smi->interrupt_disabled = 1;
58c5fc13 29813 atomic_set(&new_smi->stop_operation, 0);
fe2de317
MT
29814diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
29815index 1aeaaba..e018570 100644
29816--- a/drivers/char/mbcs.c
29817+++ b/drivers/char/mbcs.c
29818@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
6e9df6a3
MT
29819 return 0;
29820 }
29821
29822-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
29823+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
29824 {
29825 .part_num = MBCS_PART_NUM,
29826 .mfg_num = MBCS_MFG_NUM,
fe2de317 29827diff --git a/drivers/char/mem.c b/drivers/char/mem.c
5e856224 29828index d6e9d08..4493e89 100644
fe2de317
MT
29829--- a/drivers/char/mem.c
29830+++ b/drivers/char/mem.c
58c5fc13
MT
29831@@ -18,6 +18,7 @@
29832 #include <linux/raw.h>
29833 #include <linux/tty.h>
29834 #include <linux/capability.h>
29835+#include <linux/security.h>
29836 #include <linux/ptrace.h>
29837 #include <linux/device.h>
29838 #include <linux/highmem.h>
4c928ab7 29839@@ -35,6 +36,10 @@
58c5fc13
MT
29840 # include <linux/efi.h>
29841 #endif
29842
29843+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
6e9df6a3 29844+extern const struct file_operations grsec_fops;
58c5fc13
MT
29845+#endif
29846+
ae4e228f
MT
29847 static inline unsigned long size_inside_page(unsigned long start,
29848 unsigned long size)
29849 {
4c928ab7 29850@@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
71d190be
MT
29851
29852 while (cursor < to) {
29853 if (!devmem_is_allowed(pfn)) {
29854+#ifdef CONFIG_GRKERNSEC_KMEM
29855+ gr_handle_mem_readwrite(from, to);
29856+#else
29857 printk(KERN_INFO
29858 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
29859 current->comm, from, to);
29860+#endif
29861 return 0;
29862 }
29863 cursor += PAGE_SIZE;
4c928ab7 29864@@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
71d190be
MT
29865 }
29866 return 1;
29867 }
29868+#elif defined(CONFIG_GRKERNSEC_KMEM)
29869+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29870+{
29871+ return 0;
29872+}
29873 #else
29874 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29875 {
4c928ab7 29876@@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
57199397
MT
29877
29878 while (count > 0) {
29879 unsigned long remaining;
29880+ char *temp;
29881
29882 sz = size_inside_page(p, count);
29883
4c928ab7 29884@@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
57199397
MT
29885 if (!ptr)
29886 return -EFAULT;
29887
29888- remaining = copy_to_user(buf, ptr, sz);
29889+#ifdef CONFIG_PAX_USERCOPY
29890+ temp = kmalloc(sz, GFP_KERNEL);
29891+ if (!temp) {
29892+ unxlate_dev_mem_ptr(p, ptr);
29893+ return -ENOMEM;
29894+ }
29895+ memcpy(temp, ptr, sz);
29896+#else
29897+ temp = ptr;
29898+#endif
29899+
29900+ remaining = copy_to_user(buf, temp, sz);
29901+
29902+#ifdef CONFIG_PAX_USERCOPY
29903+ kfree(temp);
29904+#endif
29905+
29906 unxlate_dev_mem_ptr(p, ptr);
29907 if (remaining)
29908 return -EFAULT;
4c928ab7 29909@@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
57199397
MT
29910 size_t count, loff_t *ppos)
29911 {
29912 unsigned long p = *ppos;
29913- ssize_t low_count, read, sz;
29914+ ssize_t low_count, read, sz, err = 0;
29915 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
29916- int err = 0;
29917
29918 read = 0;
29919 if (p < (unsigned long) high_memory) {
4c928ab7 29920@@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
57199397
MT
29921 }
29922 #endif
29923 while (low_count > 0) {
29924+ char *temp;
29925+
29926 sz = size_inside_page(p, low_count);
29927
29928 /*
4c928ab7 29929@@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
57199397
MT
29930 */
29931 kbuf = xlate_dev_kmem_ptr((char *)p);
29932
29933- if (copy_to_user(buf, kbuf, sz))
29934+#ifdef CONFIG_PAX_USERCOPY
29935+ temp = kmalloc(sz, GFP_KERNEL);
29936+ if (!temp)
29937+ return -ENOMEM;
29938+ memcpy(temp, kbuf, sz);
29939+#else
29940+ temp = kbuf;
29941+#endif
29942+
29943+ err = copy_to_user(buf, temp, sz);
29944+
29945+#ifdef CONFIG_PAX_USERCOPY
29946+ kfree(temp);
29947+#endif
29948+
29949+ if (err)
29950 return -EFAULT;
29951 buf += sz;
29952 p += sz;
4c928ab7 29953@@ -867,6 +914,9 @@ static const struct memdev {
58c5fc13 29954 #ifdef CONFIG_CRASH_DUMP
ae4e228f 29955 [12] = { "oldmem", 0, &oldmem_fops, NULL },
58c5fc13
MT
29956 #endif
29957+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
ae4e228f 29958+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
58c5fc13
MT
29959+#endif
29960 };
29961
29962 static int memory_open(struct inode *inode, struct file *filp)
fe2de317
MT
29963diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
29964index da3cfee..a5a6606 100644
29965--- a/drivers/char/nvram.c
29966+++ b/drivers/char/nvram.c
29967@@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
ae4e228f
MT
29968
29969 spin_unlock_irq(&rtc_lock);
58c5fc13 29970
ae4e228f
MT
29971- if (copy_to_user(buf, contents, tmp - contents))
29972+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
29973 return -EFAULT;
29974
29975 *ppos = i;
fe2de317 29976diff --git a/drivers/char/random.c b/drivers/char/random.c
5e856224 29977index 54ca8b2..4a092ed 100644
fe2de317
MT
29978--- a/drivers/char/random.c
29979+++ b/drivers/char/random.c
66a7e928 29980@@ -261,8 +261,13 @@
58c5fc13
MT
29981 /*
29982 * Configuration information
29983 */
29984+#ifdef CONFIG_GRKERNSEC_RANDNET
29985+#define INPUT_POOL_WORDS 512
29986+#define OUTPUT_POOL_WORDS 128
29987+#else
29988 #define INPUT_POOL_WORDS 128
29989 #define OUTPUT_POOL_WORDS 32
29990+#endif
29991 #define SEC_XFER_SIZE 512
57199397 29992 #define EXTRACT_SIZE 10
58c5fc13 29993
66a7e928 29994@@ -300,10 +305,17 @@ static struct poolinfo {
58c5fc13
MT
29995 int poolwords;
29996 int tap1, tap2, tap3, tap4, tap5;
29997 } poolinfo_table[] = {
29998+#ifdef CONFIG_GRKERNSEC_RANDNET
29999+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
30000+ { 512, 411, 308, 208, 104, 1 },
30001+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
30002+ { 128, 103, 76, 51, 25, 1 },
30003+#else
30004 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
30005 { 128, 103, 76, 51, 25, 1 },
30006 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
30007 { 32, 26, 20, 14, 7, 1 },
30008+#endif
30009 #if 0
30010 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
30011 { 2048, 1638, 1231, 819, 411, 1 },
5e856224 30012@@ -913,7 +925,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
ae4e228f
MT
30013
30014 extract_buf(r, tmp);
30015 i = min_t(int, nbytes, EXTRACT_SIZE);
30016- if (copy_to_user(buf, tmp, i)) {
30017+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
30018 ret = -EFAULT;
30019 break;
30020 }
5e856224 30021@@ -1238,7 +1250,7 @@ EXPORT_SYMBOL(generate_random_uuid);
58c5fc13
MT
30022 #include <linux/sysctl.h>
30023
30024 static int min_read_thresh = 8, min_write_thresh;
30025-static int max_read_thresh = INPUT_POOL_WORDS * 32;
30026+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
30027 static int max_write_thresh = INPUT_POOL_WORDS * 32;
30028 static char sysctl_bootid[16];
30029
5e856224
MT
30030@@ -1260,10 +1272,15 @@ static int proc_do_uuid(ctl_table *table, int write,
30031 uuid = table->data;
30032 if (!uuid) {
30033 uuid = tmp_uuid;
30034- uuid[8] = 0;
30035- }
30036- if (uuid[8] == 0)
30037 generate_random_uuid(uuid);
30038+ } else {
30039+ static DEFINE_SPINLOCK(bootid_spinlock);
30040+
30041+ spin_lock(&bootid_spinlock);
30042+ if (!uuid[8])
30043+ generate_random_uuid(uuid);
30044+ spin_unlock(&bootid_spinlock);
30045+ }
30046
30047 sprintf(buf, "%pU", uuid);
30048
fe2de317
MT
30049diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
30050index 1ee8ce7..b778bef 100644
30051--- a/drivers/char/sonypi.c
30052+++ b/drivers/char/sonypi.c
c52201e0
MT
30053@@ -55,6 +55,7 @@
30054 #include <asm/uaccess.h>
30055 #include <asm/io.h>
30056 #include <asm/system.h>
30057+#include <asm/local.h>
30058
30059 #include <linux/sonypi.h>
30060
30061@@ -491,7 +492,7 @@ static struct sonypi_device {
58c5fc13
MT
30062 spinlock_t fifo_lock;
30063 wait_queue_head_t fifo_proc_list;
30064 struct fasync_struct *fifo_async;
30065- int open_count;
c52201e0 30066+ local_t open_count;
58c5fc13
MT
30067 int model;
30068 struct input_dev *input_jog_dev;
30069 struct input_dev *input_key_dev;
fe2de317 30070@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
58c5fc13
MT
30071 static int sonypi_misc_release(struct inode *inode, struct file *file)
30072 {
30073 mutex_lock(&sonypi_device.lock);
30074- sonypi_device.open_count--;
c52201e0 30075+ local_dec(&sonypi_device.open_count);
58c5fc13
MT
30076 mutex_unlock(&sonypi_device.lock);
30077 return 0;
30078 }
fe2de317 30079@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
ae4e228f 30080 {
58c5fc13
MT
30081 mutex_lock(&sonypi_device.lock);
30082 /* Flush input queue on first open */
30083- if (!sonypi_device.open_count)
c52201e0 30084+ if (!local_read(&sonypi_device.open_count))
ae4e228f 30085 kfifo_reset(&sonypi_device.fifo);
58c5fc13 30086- sonypi_device.open_count++;
c52201e0 30087+ local_inc(&sonypi_device.open_count);
58c5fc13 30088 mutex_unlock(&sonypi_device.lock);
ae4e228f 30089
58c5fc13 30090 return 0;
fe2de317 30091diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
5e856224 30092index ad7c732..5aa8054 100644
fe2de317
MT
30093--- a/drivers/char/tpm/tpm.c
30094+++ b/drivers/char/tpm/tpm.c
5e856224 30095@@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
fe2de317
MT
30096 chip->vendor.req_complete_val)
30097 goto out_recv;
30098
30099- if ((status == chip->vendor.req_canceled)) {
30100+ if (status == chip->vendor.req_canceled) {
30101 dev_err(chip->dev, "Operation Canceled\n");
30102 rc = -ECANCELED;
30103 goto out;
fe2de317
MT
30104diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
30105index 0636520..169c1d0 100644
30106--- a/drivers/char/tpm/tpm_bios.c
30107+++ b/drivers/char/tpm/tpm_bios.c
30108@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
ae4e228f
MT
30109 event = addr;
30110
30111 if ((event->event_type == 0 && event->event_size == 0) ||
30112- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
30113+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
30114 return NULL;
30115
30116 return addr;
fe2de317 30117@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
ae4e228f
MT
30118 return NULL;
30119
30120 if ((event->event_type == 0 && event->event_size == 0) ||
30121- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
30122+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
30123 return NULL;
30124
30125 (*pos)++;
fe2de317 30126@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
ae4e228f
MT
30127 int i;
30128
30129 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
30130- seq_putc(m, data[i]);
30131+ if (!seq_putc(m, data[i]))
30132+ return -EFAULT;
30133
58c5fc13
MT
30134 return 0;
30135 }
fe2de317 30136@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
ae4e228f 30137 log->bios_event_log_end = log->bios_event_log + len;
58c5fc13 30138
ae4e228f
MT
30139 virt = acpi_os_map_memory(start, len);
30140+ if (!virt) {
30141+ kfree(log->bios_event_log);
30142+ log->bios_event_log = NULL;
30143+ return -EFAULT;
30144+ }
30145
6e9df6a3
MT
30146- memcpy(log->bios_event_log, virt, len);
30147+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
ae4e228f 30148
6e9df6a3
MT
30149 acpi_os_unmap_memory(virt, len);
30150 return 0;
fe2de317 30151diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
5e856224 30152index b58b561..c9088c8 100644
fe2de317
MT
30153--- a/drivers/char/virtio_console.c
30154+++ b/drivers/char/virtio_console.c
4c928ab7 30155@@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
6e9df6a3
MT
30156 if (to_user) {
30157 ssize_t ret;
30158
30159- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
30160+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
30161 if (ret)
30162 return -EFAULT;
30163 } else {
4c928ab7 30164@@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
6e9df6a3
MT
30165 if (!port_has_data(port) && !port->host_connected)
30166 return 0;
30167
30168- return fill_readbuf(port, ubuf, count, true);
30169+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
30170 }
30171
30172 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
fe2de317 30173diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
4c928ab7 30174index c9eee6d..f9d5280 100644
fe2de317
MT
30175--- a/drivers/edac/amd64_edac.c
30176+++ b/drivers/edac/amd64_edac.c
4c928ab7 30177@@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
6e9df6a3
MT
30178 * PCI core identifies what devices are on a system during boot, and then
30179 * inquiry this table to see if this driver is for a given device found.
30180 */
30181-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
30182+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
30183 {
30184 .vendor = PCI_VENDOR_ID_AMD,
30185 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
fe2de317
MT
30186diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
30187index e47e73b..348e0bd 100644
30188--- a/drivers/edac/amd76x_edac.c
30189+++ b/drivers/edac/amd76x_edac.c
30190@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
30191 edac_mc_free(mci);
30192 }
30193
30194-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
30195+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
30196 {
30197 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30198 AMD762},
fe2de317
MT
30199diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
30200index 1af531a..3a8ff27 100644
30201--- a/drivers/edac/e752x_edac.c
30202+++ b/drivers/edac/e752x_edac.c
30203@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
30204 edac_mc_free(mci);
30205 }
30206
30207-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
30208+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
30209 {
30210 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30211 E7520},
fe2de317
MT
30212diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
30213index 6ffb6d2..383d8d7 100644
30214--- a/drivers/edac/e7xxx_edac.c
30215+++ b/drivers/edac/e7xxx_edac.c
30216@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
30217 edac_mc_free(mci);
30218 }
30219
30220-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
30221+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
30222 {
30223 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30224 E7205},
fe2de317 30225diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
5e856224 30226index 97f5064..202b6e6 100644
fe2de317
MT
30227--- a/drivers/edac/edac_pci_sysfs.c
30228+++ b/drivers/edac/edac_pci_sysfs.c
30229@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
8308f9c9
MT
30230 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
30231 static int edac_pci_poll_msec = 1000; /* one second workq period */
30232
30233-static atomic_t pci_parity_count = ATOMIC_INIT(0);
30234-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
30235+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
30236+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
30237
30238 static struct kobject *edac_pci_top_main_kobj;
30239 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
fe2de317 30240@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
8308f9c9
MT
30241 edac_printk(KERN_CRIT, EDAC_PCI,
30242 "Signaled System Error on %s\n",
30243 pci_name(dev));
30244- atomic_inc(&pci_nonparity_count);
30245+ atomic_inc_unchecked(&pci_nonparity_count);
30246 }
30247
30248 if (status & (PCI_STATUS_PARITY)) {
fe2de317 30249@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
8308f9c9
MT
30250 "Master Data Parity Error on %s\n",
30251 pci_name(dev));
30252
30253- atomic_inc(&pci_parity_count);
30254+ atomic_inc_unchecked(&pci_parity_count);
30255 }
30256
30257 if (status & (PCI_STATUS_DETECTED_PARITY)) {
fe2de317 30258@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
8308f9c9
MT
30259 "Detected Parity Error on %s\n",
30260 pci_name(dev));
30261
30262- atomic_inc(&pci_parity_count);
30263+ atomic_inc_unchecked(&pci_parity_count);
30264 }
30265 }
30266
fe2de317 30267@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
8308f9c9
MT
30268 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
30269 "Signaled System Error on %s\n",
30270 pci_name(dev));
30271- atomic_inc(&pci_nonparity_count);
30272+ atomic_inc_unchecked(&pci_nonparity_count);
30273 }
30274
30275 if (status & (PCI_STATUS_PARITY)) {
fe2de317 30276@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
8308f9c9
MT
30277 "Master Data Parity Error on "
30278 "%s\n", pci_name(dev));
30279
30280- atomic_inc(&pci_parity_count);
30281+ atomic_inc_unchecked(&pci_parity_count);
30282 }
30283
30284 if (status & (PCI_STATUS_DETECTED_PARITY)) {
fe2de317 30285@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
8308f9c9
MT
30286 "Detected Parity Error on %s\n",
30287 pci_name(dev));
30288
30289- atomic_inc(&pci_parity_count);
30290+ atomic_inc_unchecked(&pci_parity_count);
30291 }
30292 }
30293 }
30294@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
30295 if (!check_pci_errors)
30296 return;
30297
30298- before_count = atomic_read(&pci_parity_count);
30299+ before_count = atomic_read_unchecked(&pci_parity_count);
30300
30301 /* scan all PCI devices looking for a Parity Error on devices and
30302 * bridges.
30303@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
30304 /* Only if operator has selected panic on PCI Error */
30305 if (edac_pci_get_panic_on_pe()) {
30306 /* If the count is different 'after' from 'before' */
30307- if (before_count != atomic_read(&pci_parity_count))
30308+ if (before_count != atomic_read_unchecked(&pci_parity_count))
30309 panic("EDAC: PCI Parity Error");
30310 }
30311 }
fe2de317
MT
30312diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
30313index c0510b3..6e2a954 100644
30314--- a/drivers/edac/i3000_edac.c
30315+++ b/drivers/edac/i3000_edac.c
30316@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
30317 edac_mc_free(mci);
30318 }
30319
30320-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
30321+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
30322 {
30323 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30324 I3000},
fe2de317 30325diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
5e856224 30326index 73f55e200..5faaf59 100644
fe2de317
MT
30327--- a/drivers/edac/i3200_edac.c
30328+++ b/drivers/edac/i3200_edac.c
5e856224 30329@@ -445,7 +445,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
30330 edac_mc_free(mci);
30331 }
30332
30333-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
30334+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
30335 {
30336 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30337 I3200},
fe2de317
MT
30338diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
30339index 4dc3ac2..67d05a6 100644
30340--- a/drivers/edac/i5000_edac.c
30341+++ b/drivers/edac/i5000_edac.c
30342@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
30343 *
30344 * The "E500P" device is the first device supported.
30345 */
30346-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
30347+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
30348 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
30349 .driver_data = I5000P},
30350
fe2de317
MT
30351diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
30352index bcbdeec..9886d16 100644
30353--- a/drivers/edac/i5100_edac.c
30354+++ b/drivers/edac/i5100_edac.c
30355@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
30356 edac_mc_free(mci);
30357 }
30358
30359-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
30360+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
30361 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
30362 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
30363 { 0, }
fe2de317
MT
30364diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
30365index 74d6ec34..baff517 100644
30366--- a/drivers/edac/i5400_edac.c
30367+++ b/drivers/edac/i5400_edac.c
30368@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
30369 *
30370 * The "E500P" device is the first device supported.
30371 */
30372-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
30373+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
30374 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
30375 {0,} /* 0 terminated list. */
30376 };
fe2de317 30377diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
4c928ab7 30378index 6104dba..e7ea8e1 100644
fe2de317
MT
30379--- a/drivers/edac/i7300_edac.c
30380+++ b/drivers/edac/i7300_edac.c
4c928ab7 30381@@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
30382 *
30383 * Has only 8086:360c PCI ID
30384 */
30385-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
30386+static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
30387 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
30388 {0,} /* 0 terminated list. */
30389 };
fe2de317 30390diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
5e856224 30391index 8568d9b..42b2fa8 100644
fe2de317
MT
30392--- a/drivers/edac/i7core_edac.c
30393+++ b/drivers/edac/i7core_edac.c
4c928ab7 30394@@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
6e9df6a3
MT
30395 /*
30396 * pci_device_id table for which devices we are looking for
30397 */
30398-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
30399+static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
30400 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
30401 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
30402 {0,} /* 0 terminated list. */
fe2de317
MT
30403diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
30404index 4329d39..f3022ef 100644
30405--- a/drivers/edac/i82443bxgx_edac.c
30406+++ b/drivers/edac/i82443bxgx_edac.c
30407@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
30408
30409 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
30410
30411-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
30412+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
30413 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
30414 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
30415 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
fe2de317
MT
30416diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
30417index 931a057..fd28340 100644
30418--- a/drivers/edac/i82860_edac.c
30419+++ b/drivers/edac/i82860_edac.c
30420@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
30421 edac_mc_free(mci);
30422 }
30423
30424-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
30425+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
30426 {
30427 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30428 I82860},
fe2de317
MT
30429diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
30430index 33864c6..01edc61 100644
30431--- a/drivers/edac/i82875p_edac.c
30432+++ b/drivers/edac/i82875p_edac.c
30433@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
30434 edac_mc_free(mci);
30435 }
30436
30437-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
30438+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
30439 {
30440 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30441 I82875P},
fe2de317 30442diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
5e856224 30443index 4184e01..dcb2cd3 100644
fe2de317
MT
30444--- a/drivers/edac/i82975x_edac.c
30445+++ b/drivers/edac/i82975x_edac.c
5e856224 30446@@ -612,7 +612,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
30447 edac_mc_free(mci);
30448 }
30449
30450-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
30451+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
30452 {
30453 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30454 I82975X
fe2de317 30455diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
4c928ab7 30456index 0106747..0b40417 100644
fe2de317
MT
30457--- a/drivers/edac/mce_amd.h
30458+++ b/drivers/edac/mce_amd.h
15a11c5b
MT
30459@@ -83,7 +83,7 @@ struct amd_decoder_ops {
30460 bool (*dc_mce)(u16, u8);
30461 bool (*ic_mce)(u16, u8);
30462 bool (*nb_mce)(u16, u8);
30463-};
30464+} __no_const;
30465
30466 void amd_report_gart_errors(bool);
4c928ab7 30467 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
fe2de317 30468diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
5e856224 30469index e294e1b..a41b05b 100644
fe2de317
MT
30470--- a/drivers/edac/r82600_edac.c
30471+++ b/drivers/edac/r82600_edac.c
30472@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
30473 edac_mc_free(mci);
30474 }
30475
30476-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
30477+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
30478 {
30479 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
30480 },
4c928ab7 30481diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
5e856224 30482index 1dc118d..8c68af9 100644
4c928ab7
MT
30483--- a/drivers/edac/sb_edac.c
30484+++ b/drivers/edac/sb_edac.c
30485@@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
30486 /*
30487 * pci_device_id table for which devices we are looking for
30488 */
30489-static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
30490+static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
30491 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
30492 {0,} /* 0 terminated list. */
30493 };
fe2de317
MT
30494diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
30495index b6f47de..c5acf3a 100644
30496--- a/drivers/edac/x38_edac.c
30497+++ b/drivers/edac/x38_edac.c
30498@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
30499 edac_mc_free(mci);
30500 }
30501
30502-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
30503+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
30504 {
30505 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30506 X38},
fe2de317
MT
30507diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
30508index 85661b0..c784559a 100644
30509--- a/drivers/firewire/core-card.c
30510+++ b/drivers/firewire/core-card.c
15a11c5b
MT
30511@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
30512
30513 void fw_core_remove_card(struct fw_card *card)
30514 {
30515- struct fw_card_driver dummy_driver = dummy_driver_template;
30516+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
30517
30518 card->driver->update_phy_reg(card, 4,
30519 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
fe2de317
MT
30520diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
30521index 4799393..37bd3ab 100644
30522--- a/drivers/firewire/core-cdev.c
30523+++ b/drivers/firewire/core-cdev.c
30524@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
df50ba0c 30525 int ret;
ae4e228f 30526
df50ba0c
MT
30527 if ((request->channels == 0 && request->bandwidth == 0) ||
30528- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
30529- request->bandwidth < 0)
30530+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
30531 return -EINVAL;
ae4e228f 30532
df50ba0c 30533 r = kmalloc(sizeof(*r), GFP_KERNEL);
fe2de317 30534diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
4c928ab7 30535index 855ab3f..11f4bbd 100644
fe2de317
MT
30536--- a/drivers/firewire/core-transaction.c
30537+++ b/drivers/firewire/core-transaction.c
15a11c5b 30538@@ -37,6 +37,7 @@
66a7e928
MT
30539 #include <linux/timer.h>
30540 #include <linux/types.h>
15a11c5b 30541 #include <linux/workqueue.h>
66a7e928
MT
30542+#include <linux/sched.h>
30543
30544 #include <asm/byteorder.h>
30545
fe2de317
MT
30546diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
30547index b45be57..5fad18b 100644
30548--- a/drivers/firewire/core.h
30549+++ b/drivers/firewire/core.h
30550@@ -101,6 +101,7 @@ struct fw_card_driver {
30551
30552 int (*stop_iso)(struct fw_iso_context *ctx);
30553 };
30554+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
30555
30556 void fw_card_initialize(struct fw_card *card,
30557 const struct fw_card_driver *driver, struct device *device);
30558diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
4c928ab7 30559index 153980b..4b4d046 100644
fe2de317
MT
30560--- a/drivers/firmware/dmi_scan.c
30561+++ b/drivers/firmware/dmi_scan.c
c52201e0 30562@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
58c5fc13
MT
30563 }
30564 }
30565 else {
30566- /*
30567- * no iounmap() for that ioremap(); it would be a no-op, but
30568- * it's so early in setup that sucker gets confused into doing
30569- * what it shouldn't if we actually call it.
30570- */
30571 p = dmi_ioremap(0xF0000, 0x10000);
30572 if (p == NULL)
30573 goto error;
4c928ab7 30574@@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
6e9df6a3
MT
30575 if (buf == NULL)
30576 return -1;
30577
30578- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
30579+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
30580
30581 iounmap(buf);
30582 return 0;
fe2de317 30583diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
5e856224 30584index 82d5c20..44a7177 100644
fe2de317
MT
30585--- a/drivers/gpio/gpio-vr41xx.c
30586+++ b/drivers/gpio/gpio-vr41xx.c
8308f9c9
MT
30587@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
30588 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
30589 maskl, pendl, maskh, pendh);
30590
30591- atomic_inc(&irq_err_count);
30592+ atomic_inc_unchecked(&irq_err_count);
30593
30594 return -EINVAL;
30595 }
fe2de317 30596diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
5e856224 30597index 84a4a80..ce0306e 100644
fe2de317
MT
30598--- a/drivers/gpu/drm/drm_crtc_helper.c
30599+++ b/drivers/gpu/drm/drm_crtc_helper.c
5e856224 30600@@ -280,7 +280,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
6892158b
MT
30601 struct drm_crtc *tmp;
30602 int crtc_mask = 1;
30603
bc901d79 30604- WARN(!crtc, "checking null crtc?\n");
6892158b
MT
30605+ BUG_ON(!crtc);
30606
30607 dev = crtc->dev;
30608
fe2de317 30609diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
5e856224 30610index ebf7d3f..d64c436 100644
fe2de317
MT
30611--- a/drivers/gpu/drm/drm_drv.c
30612+++ b/drivers/gpu/drm/drm_drv.c
5e856224 30613@@ -312,7 +312,7 @@ module_exit(drm_core_exit);
6e9df6a3
MT
30614 /**
30615 * Copy and IOCTL return string to user space
30616 */
30617-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
30618+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
30619 {
30620 int len;
30621
5e856224 30622@@ -391,7 +391,7 @@ long drm_ioctl(struct file *filp,
ae4e228f
MT
30623
30624 dev = file_priv->minor->dev;
58c5fc13
MT
30625 atomic_inc(&dev->ioctl_count);
30626- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
30627+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
30628 ++file_priv->ioctl_count;
30629
30630 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
fe2de317 30631diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
5e856224 30632index 6263b01..7987f55 100644
fe2de317
MT
30633--- a/drivers/gpu/drm/drm_fops.c
30634+++ b/drivers/gpu/drm/drm_fops.c
4c928ab7 30635@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
58c5fc13
MT
30636 }
30637
30638 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
30639- atomic_set(&dev->counts[i], 0);
30640+ atomic_set_unchecked(&dev->counts[i], 0);
30641
30642 dev->sigdata.lock = NULL;
30643
4c928ab7 30644@@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
58c5fc13
MT
30645
30646 retcode = drm_open_helper(inode, filp, dev);
30647 if (!retcode) {
30648- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
6892158b 30649- if (!dev->open_count++)
58c5fc13 30650+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
c52201e0 30651+ if (local_inc_return(&dev->open_count) == 1)
58c5fc13 30652 retcode = drm_setup(dev);
6892158b
MT
30653 }
30654 if (!retcode) {
4c928ab7 30655@@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
58c5fc13 30656
6892158b 30657 mutex_lock(&drm_global_mutex);
58c5fc13
MT
30658
30659- DRM_DEBUG("open_count = %d\n", dev->open_count);
5e856224 30660+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
58c5fc13
MT
30661
30662 if (dev->driver->preclose)
30663 dev->driver->preclose(dev, file_priv);
5e856224
MT
30664@@ -482,10 +482,10 @@ int drm_release(struct inode *inode, struct file *filp)
30665 * Begin inline drm_release
30666 */
30667
30668- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
30669+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
58c5fc13
MT
30670 task_pid_nr(current),
30671 (long)old_encode_dev(file_priv->minor->device),
30672- dev->open_count);
c52201e0 30673+ local_read(&dev->open_count));
58c5fc13 30674
4c928ab7
MT
30675 /* Release any auth tokens that might point to this file_priv,
30676 (do that under the drm_global_mutex) */
30677@@ -571,8 +571,8 @@ int drm_release(struct inode *inode, struct file *filp)
58c5fc13
MT
30678 * End inline drm_release
30679 */
30680
30681- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
58c5fc13 30682- if (!--dev->open_count) {
6892158b 30683+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
c52201e0 30684+ if (local_dec_and_test(&dev->open_count)) {
58c5fc13
MT
30685 if (atomic_read(&dev->ioctl_count)) {
30686 DRM_ERROR("Device busy: %d\n",
30687 atomic_read(&dev->ioctl_count));
fe2de317
MT
30688diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
30689index c87dc96..326055d 100644
30690--- a/drivers/gpu/drm/drm_global.c
30691+++ b/drivers/gpu/drm/drm_global.c
6892158b
MT
30692@@ -36,7 +36,7 @@
30693 struct drm_global_item {
30694 struct mutex mutex;
30695 void *object;
30696- int refcount;
30697+ atomic_t refcount;
30698 };
30699
30700 static struct drm_global_item glob[DRM_GLOBAL_NUM];
30701@@ -49,7 +49,7 @@ void drm_global_init(void)
30702 struct drm_global_item *item = &glob[i];
30703 mutex_init(&item->mutex);
30704 item->object = NULL;
30705- item->refcount = 0;
30706+ atomic_set(&item->refcount, 0);
30707 }
30708 }
30709
30710@@ -59,7 +59,7 @@ void drm_global_release(void)
30711 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
30712 struct drm_global_item *item = &glob[i];
30713 BUG_ON(item->object != NULL);
30714- BUG_ON(item->refcount != 0);
30715+ BUG_ON(atomic_read(&item->refcount) != 0);
30716 }
30717 }
30718
fe2de317 30719@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
6892158b
MT
30720 void *object;
30721
30722 mutex_lock(&item->mutex);
30723- if (item->refcount == 0) {
30724+ if (atomic_read(&item->refcount) == 0) {
30725 item->object = kzalloc(ref->size, GFP_KERNEL);
30726 if (unlikely(item->object == NULL)) {
30727 ret = -ENOMEM;
fe2de317 30728@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
6892158b
MT
30729 goto out_err;
30730
30731 }
30732- ++item->refcount;
30733+ atomic_inc(&item->refcount);
30734 ref->object = item->object;
30735 object = item->object;
30736 mutex_unlock(&item->mutex);
fe2de317 30737@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
6892158b
MT
30738 struct drm_global_item *item = &glob[ref->global_type];
30739
30740 mutex_lock(&item->mutex);
30741- BUG_ON(item->refcount == 0);
30742+ BUG_ON(atomic_read(&item->refcount) == 0);
30743 BUG_ON(ref->object != item->object);
30744- if (--item->refcount == 0) {
30745+ if (atomic_dec_and_test(&item->refcount)) {
30746 ref->release(ref);
30747 item->object = NULL;
30748 }
fe2de317
MT
30749diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
30750index ab1162d..42587b2 100644
30751--- a/drivers/gpu/drm/drm_info.c
30752+++ b/drivers/gpu/drm/drm_info.c
30753@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
6892158b
MT
30754 struct drm_local_map *map;
30755 struct drm_map_list *r_list;
30756
30757- /* Hardcoded from _DRM_FRAME_BUFFER,
30758- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
30759- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
30760- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
30761+ static const char * const types[] = {
30762+ [_DRM_FRAME_BUFFER] = "FB",
30763+ [_DRM_REGISTERS] = "REG",
30764+ [_DRM_SHM] = "SHM",
30765+ [_DRM_AGP] = "AGP",
30766+ [_DRM_SCATTER_GATHER] = "SG",
30767+ [_DRM_CONSISTENT] = "PCI",
30768+ [_DRM_GEM] = "GEM" };
30769 const char *type;
30770 int i;
30771
fe2de317 30772@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
6892158b
MT
30773 map = r_list->map;
30774 if (!map)
30775 continue;
30776- if (map->type < 0 || map->type > 5)
30777+ if (map->type >= ARRAY_SIZE(types))
30778 type = "??";
30779 else
30780 type = types[map->type];
fe2de317 30781@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
16454cff
MT
30782 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
30783 vma->vm_flags & VM_LOCKED ? 'l' : '-',
30784 vma->vm_flags & VM_IO ? 'i' : '-',
30785+#ifdef CONFIG_GRKERNSEC_HIDESYM
30786+ 0);
30787+#else
30788 vma->vm_pgoff);
30789+#endif
30790
30791 #if defined(__i386__)
30792 pgprot = pgprot_val(vma->vm_page_prot);
fe2de317 30793diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
5e856224 30794index 637fcc3..e890b33 100644
fe2de317
MT
30795--- a/drivers/gpu/drm/drm_ioc32.c
30796+++ b/drivers/gpu/drm/drm_ioc32.c
5e856224 30797@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
6e9df6a3
MT
30798 request = compat_alloc_user_space(nbytes);
30799 if (!access_ok(VERIFY_WRITE, request, nbytes))
30800 return -EFAULT;
30801- list = (struct drm_buf_desc *) (request + 1);
30802+ list = (struct drm_buf_desc __user *) (request + 1);
30803
30804 if (__put_user(count, &request->count)
30805 || __put_user(list, &request->list))
5e856224 30806@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
6e9df6a3
MT
30807 request = compat_alloc_user_space(nbytes);
30808 if (!access_ok(VERIFY_WRITE, request, nbytes))
30809 return -EFAULT;
30810- list = (struct drm_buf_pub *) (request + 1);
30811+ list = (struct drm_buf_pub __user *) (request + 1);
30812
30813 if (__put_user(count, &request->count)
30814 || __put_user(list, &request->list))
fe2de317 30815diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
5e856224 30816index 956fd38..e52167a 100644
fe2de317
MT
30817--- a/drivers/gpu/drm/drm_ioctl.c
30818+++ b/drivers/gpu/drm/drm_ioctl.c
5e856224 30819@@ -251,7 +251,7 @@ int drm_getstats(struct drm_device *dev, void *data,
58c5fc13
MT
30820 stats->data[i].value =
30821 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
30822 else
30823- stats->data[i].value = atomic_read(&dev->counts[i]);
30824+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
30825 stats->data[i].type = dev->types[i];
30826 }
30827
fe2de317 30828diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
5e856224 30829index c79c713..2048588 100644
fe2de317
MT
30830--- a/drivers/gpu/drm/drm_lock.c
30831+++ b/drivers/gpu/drm/drm_lock.c
5e856224 30832@@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
58c5fc13
MT
30833 if (drm_lock_take(&master->lock, lock->context)) {
30834 master->lock.file_priv = file_priv;
30835 master->lock.lock_time = jiffies;
30836- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
30837+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
30838 break; /* Got lock */
30839 }
30840
5e856224 30841@@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
58c5fc13
MT
30842 return -EINVAL;
30843 }
30844
30845- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
30846+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
30847
bc901d79
MT
30848 if (drm_lock_free(&master->lock, lock->context)) {
30849 /* FIXME: Should really bail out here. */
fe2de317 30850diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
5e856224 30851index 7f4b4e1..bf4def2 100644
fe2de317
MT
30852--- a/drivers/gpu/drm/i810/i810_dma.c
30853+++ b/drivers/gpu/drm/i810/i810_dma.c
5e856224 30854@@ -948,8 +948,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
58c5fc13
MT
30855 dma->buflist[vertex->idx],
30856 vertex->discard, vertex->used);
30857
30858- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30859- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30860+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30861+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30862 sarea_priv->last_enqueue = dev_priv->counter - 1;
30863 sarea_priv->last_dispatch = (int)hw_status[5];
30864
5e856224 30865@@ -1109,8 +1109,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
58c5fc13
MT
30866 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
30867 mc->last_render);
30868
30869- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30870- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30871+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30872+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30873 sarea_priv->last_enqueue = dev_priv->counter - 1;
30874 sarea_priv->last_dispatch = (int)hw_status[5];
30875
fe2de317
MT
30876diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
30877index c9339f4..f5e1b9d 100644
30878--- a/drivers/gpu/drm/i810/i810_drv.h
30879+++ b/drivers/gpu/drm/i810/i810_drv.h
8308f9c9
MT
30880@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
30881 int page_flipping;
30882
30883 wait_queue_head_t irq_queue;
30884- atomic_t irq_received;
30885- atomic_t irq_emitted;
30886+ atomic_unchecked_t irq_received;
30887+ atomic_unchecked_t irq_emitted;
30888
30889 int front_offset;
30890 } drm_i810_private_t;
fe2de317 30891diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
5e856224 30892index deaa657..e0fd296 100644
fe2de317
MT
30893--- a/drivers/gpu/drm/i915/i915_debugfs.c
30894+++ b/drivers/gpu/drm/i915/i915_debugfs.c
4c928ab7 30895@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
8308f9c9
MT
30896 I915_READ(GTIMR));
30897 }
30898 seq_printf(m, "Interrupts received: %d\n",
30899- atomic_read(&dev_priv->irq_received));
30900+ atomic_read_unchecked(&dev_priv->irq_received));
30901 for (i = 0; i < I915_NUM_RINGS; i++) {
6e9df6a3 30902 if (IS_GEN6(dev) || IS_GEN7(dev)) {
8308f9c9 30903 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
5e856224 30904@@ -1321,7 +1321,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
6e9df6a3
MT
30905 return ret;
30906
30907 if (opregion->header)
30908- seq_write(m, opregion->header, OPREGION_SIZE);
30909+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
30910
30911 mutex_unlock(&dev->struct_mutex);
30912
fe2de317 30913diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
5e856224 30914index ddfe3d9..f6e6b21 100644
fe2de317
MT
30915--- a/drivers/gpu/drm/i915/i915_dma.c
30916+++ b/drivers/gpu/drm/i915/i915_dma.c
5e856224 30917@@ -1175,7 +1175,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
efbe55a5
MT
30918 bool can_switch;
30919
30920 spin_lock(&dev->count_lock);
30921- can_switch = (dev->open_count == 0);
c52201e0 30922+ can_switch = (local_read(&dev->open_count) == 0);
efbe55a5
MT
30923 spin_unlock(&dev->count_lock);
30924 return can_switch;
30925 }
fe2de317 30926diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
5e856224 30927index 9689ca3..294f9c1 100644
fe2de317
MT
30928--- a/drivers/gpu/drm/i915/i915_drv.h
30929+++ b/drivers/gpu/drm/i915/i915_drv.h
5e856224 30930@@ -231,7 +231,7 @@ struct drm_i915_display_funcs {
15a11c5b
MT
30931 /* render clock increase/decrease */
30932 /* display clock increase/decrease */
30933 /* pll clock increase/decrease */
30934-};
30935+} __no_const;
30936
30937 struct intel_device_info {
30938 u8 gen;
5e856224 30939@@ -320,7 +320,7 @@ typedef struct drm_i915_private {
8308f9c9
MT
30940 int current_page;
30941 int page_flipping;
30942
30943- atomic_t irq_received;
30944+ atomic_unchecked_t irq_received;
8308f9c9
MT
30945
30946 /* protects the irq masks */
66a7e928 30947 spinlock_t irq_lock;
5e856224 30948@@ -896,7 +896,7 @@ struct drm_i915_gem_object {
8308f9c9
MT
30949 * will be page flipped away on the next vblank. When it
30950 * reaches 0, dev_priv->pending_flip_queue will be woken up.
30951 */
30952- atomic_t pending_flip;
30953+ atomic_unchecked_t pending_flip;
30954 };
30955
30956 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
5e856224 30957@@ -1276,7 +1276,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
71d190be
MT
30958 extern void intel_teardown_gmbus(struct drm_device *dev);
30959 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
30960 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
30961-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30962+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30963 {
30964 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
30965 }
fe2de317 30966diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
5e856224 30967index e159e33..cdcc663 100644
fe2de317
MT
30968--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30969+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
4c928ab7 30970@@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
15a11c5b 30971 i915_gem_clflush_object(obj);
66a7e928
MT
30972
30973 if (obj->base.pending_write_domain)
30974- cd->flips |= atomic_read(&obj->pending_flip);
30975+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
30976
30977 /* The actual obj->write_domain will be updated with
30978 * pending_write_domain after we emit the accumulated flush for all
4c928ab7 30979@@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
fe2de317
MT
30980
30981 static int
30982 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
30983- int count)
30984+ unsigned int count)
30985 {
30986- int i;
30987+ unsigned int i;
30988
30989 for (i = 0; i < count; i++) {
30990 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
30991diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
5e856224 30992index 5bd4361..0241a42 100644
fe2de317
MT
30993--- a/drivers/gpu/drm/i915/i915_irq.c
30994+++ b/drivers/gpu/drm/i915/i915_irq.c
30995@@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
15a11c5b
MT
30996 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
30997 struct drm_i915_master_private *master_priv;
30998
30999- atomic_inc(&dev_priv->irq_received);
31000+ atomic_inc_unchecked(&dev_priv->irq_received);
31001
31002 /* disable master interrupt before clearing iir */
31003 de_ier = I915_READ(DEIER);
4c928ab7 31004@@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
15a11c5b
MT
31005 struct drm_i915_master_private *master_priv;
31006 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
31007
31008- atomic_inc(&dev_priv->irq_received);
31009+ atomic_inc_unchecked(&dev_priv->irq_received);
31010
31011 if (IS_GEN6(dev))
31012 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
4c928ab7 31013@@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
66a7e928
MT
31014 int ret = IRQ_NONE, pipe;
31015 bool blc_event = false;
8308f9c9
MT
31016
31017- atomic_inc(&dev_priv->irq_received);
31018+ atomic_inc_unchecked(&dev_priv->irq_received);
31019
15a11c5b
MT
31020 iir = I915_READ(IIR);
31021
5e856224 31022@@ -1743,7 +1743,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
15a11c5b
MT
31023 {
31024 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
31025
31026- atomic_set(&dev_priv->irq_received, 0);
31027+ atomic_set_unchecked(&dev_priv->irq_received, 0);
31028
31029 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
31030 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
5e856224 31031@@ -1932,7 +1932,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
8308f9c9 31032 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
66a7e928 31033 int pipe;
8308f9c9
MT
31034
31035- atomic_set(&dev_priv->irq_received, 0);
31036+ atomic_set_unchecked(&dev_priv->irq_received, 0);
31037
31038 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
31039 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
fe2de317 31040diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
5e856224 31041index 2163818..cede019 100644
fe2de317
MT
31042--- a/drivers/gpu/drm/i915/intel_display.c
31043+++ b/drivers/gpu/drm/i915/intel_display.c
5e856224 31044@@ -2238,7 +2238,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
8308f9c9
MT
31045
31046 wait_event(dev_priv->pending_flip_queue,
31047 atomic_read(&dev_priv->mm.wedged) ||
31048- atomic_read(&obj->pending_flip) == 0);
31049+ atomic_read_unchecked(&obj->pending_flip) == 0);
31050
31051 /* Big Hammer, we also need to ensure that any pending
31052 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
5e856224 31053@@ -2859,7 +2859,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
8308f9c9
MT
31054 obj = to_intel_framebuffer(crtc->fb)->obj;
31055 dev_priv = crtc->dev->dev_private;
31056 wait_event(dev_priv->pending_flip_queue,
31057- atomic_read(&obj->pending_flip) == 0);
31058+ atomic_read_unchecked(&obj->pending_flip) == 0);
31059 }
31060
31061 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
5e856224 31062@@ -7171,7 +7171,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
8308f9c9
MT
31063
31064 atomic_clear_mask(1 << intel_crtc->plane,
31065 &obj->pending_flip.counter);
31066- if (atomic_read(&obj->pending_flip) == 0)
31067+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
31068 wake_up(&dev_priv->pending_flip_queue);
31069
31070 schedule_work(&work->work);
5e856224
MT
31071@@ -7354,7 +7354,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
31072 OUT_RING(fb->pitches[0] | obj->tiling_mode);
31073 OUT_RING(obj->gtt_offset);
31074
31075- pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
31076+ /* Contrary to the suggestions in the documentation,
31077+ * "Enable Panel Fitter" does not seem to be required when page
31078+ * flipping with a non-native mode, and worse causes a normal
31079+ * modeset to fail.
31080+ * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
31081+ */
31082+ pf = 0;
31083 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
31084 OUT_RING(pf | pipesrc);
31085 ADVANCE_LP_RING();
31086@@ -7461,7 +7467,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
8308f9c9
MT
31087 /* Block clients from rendering to the new back buffer until
31088 * the flip occurs and the object is no longer visible.
31089 */
31090- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
31091+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
31092
15a11c5b
MT
31093 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
31094 if (ret)
5e856224 31095@@ -7475,7 +7481,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
15a11c5b
MT
31096 return 0;
31097
31098 cleanup_pending:
31099- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
31100+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
15a11c5b
MT
31101 drm_gem_object_unreference(&work->old_fb_obj->base);
31102 drm_gem_object_unreference(&obj->base);
4c928ab7 31103 mutex_unlock(&dev->struct_mutex);
fe2de317
MT
31104diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
31105index 54558a0..2d97005 100644
31106--- a/drivers/gpu/drm/mga/mga_drv.h
31107+++ b/drivers/gpu/drm/mga/mga_drv.h
8308f9c9
MT
31108@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
31109 u32 clear_cmd;
31110 u32 maccess;
31111
31112- atomic_t vbl_received; /**< Number of vblanks received. */
31113+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
31114 wait_queue_head_t fence_queue;
31115- atomic_t last_fence_retired;
31116+ atomic_unchecked_t last_fence_retired;
31117 u32 next_fence_to_post;
31118
31119 unsigned int fb_cpp;
fe2de317
MT
31120diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
31121index 2581202..f230a8d9 100644
31122--- a/drivers/gpu/drm/mga/mga_irq.c
31123+++ b/drivers/gpu/drm/mga/mga_irq.c
31124@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
8308f9c9
MT
31125 if (crtc != 0)
31126 return 0;
31127
31128- return atomic_read(&dev_priv->vbl_received);
31129+ return atomic_read_unchecked(&dev_priv->vbl_received);
31130 }
31131
31132
fe2de317 31133@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
8308f9c9
MT
31134 /* VBLANK interrupt */
31135 if (status & MGA_VLINEPEN) {
31136 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
31137- atomic_inc(&dev_priv->vbl_received);
31138+ atomic_inc_unchecked(&dev_priv->vbl_received);
31139 drm_handle_vblank(dev, 0);
31140 handled = 1;
31141 }
fe2de317 31142@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
8308f9c9
MT
31143 if ((prim_start & ~0x03) != (prim_end & ~0x03))
31144 MGA_WRITE(MGA_PRIMEND, prim_end);
31145
31146- atomic_inc(&dev_priv->last_fence_retired);
31147+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
31148 DRM_WAKEUP(&dev_priv->fence_queue);
31149 handled = 1;
31150 }
fe2de317 31151@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
8308f9c9
MT
31152 * using fences.
31153 */
31154 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
31155- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
31156+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
31157 - *sequence) <= (1 << 23)));
31158
31159 *sequence = cur_fence;
fe2de317 31160diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
5e856224 31161index e5cbead..6c354a3 100644
fe2de317
MT
31162--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
31163+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
5e856224 31164@@ -199,7 +199,7 @@ struct methods {
15a11c5b
MT
31165 const char desc[8];
31166 void (*loadbios)(struct drm_device *, uint8_t *);
31167 const bool rw;
31168-};
31169+} __do_const;
31170
31171 static struct methods shadow_methods[] = {
31172 { "PRAMIN", load_vbios_pramin, true },
5e856224 31173@@ -5290,7 +5290,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
15a11c5b
MT
31174 struct bit_table {
31175 const char id;
31176 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
31177-};
31178+} __no_const;
31179
31180 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
31181
fe2de317 31182diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
5e856224 31183index b827098..c31a797 100644
fe2de317
MT
31184--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
31185+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
5e856224 31186@@ -242,7 +242,7 @@ struct nouveau_channel {
8308f9c9
MT
31187 struct list_head pending;
31188 uint32_t sequence;
31189 uint32_t sequence_ack;
31190- atomic_t last_sequence_irq;
31191+ atomic_unchecked_t last_sequence_irq;
6e9df6a3 31192 struct nouveau_vma vma;
8308f9c9
MT
31193 } fence;
31194
5e856224 31195@@ -323,7 +323,7 @@ struct nouveau_exec_engine {
15a11c5b
MT
31196 u32 handle, u16 class);
31197 void (*set_tile_region)(struct drm_device *dev, int i);
31198 void (*tlb_flush)(struct drm_device *, int engine);
31199-};
31200+} __no_const;
31201
31202 struct nouveau_instmem_engine {
31203 void *priv;
5e856224 31204@@ -345,13 +345,13 @@ struct nouveau_instmem_engine {
15a11c5b
MT
31205 struct nouveau_mc_engine {
31206 int (*init)(struct drm_device *dev);
31207 void (*takedown)(struct drm_device *dev);
31208-};
31209+} __no_const;
31210
31211 struct nouveau_timer_engine {
31212 int (*init)(struct drm_device *dev);
31213 void (*takedown)(struct drm_device *dev);
31214 uint64_t (*read)(struct drm_device *dev);
31215-};
31216+} __no_const;
31217
31218 struct nouveau_fb_engine {
31219 int num_tiles;
5e856224 31220@@ -566,7 +566,7 @@ struct nouveau_vram_engine {
15a11c5b
MT
31221 void (*put)(struct drm_device *, struct nouveau_mem **);
31222
31223 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
31224-};
31225+} __no_const;
66a7e928
MT
31226
31227 struct nouveau_engine {
31228 struct nouveau_instmem_engine instmem;
5e856224 31229@@ -714,7 +714,7 @@ struct drm_nouveau_private {
8308f9c9
MT
31230 struct drm_global_reference mem_global_ref;
31231 struct ttm_bo_global_ref bo_global_ref;
31232 struct ttm_bo_device bdev;
31233- atomic_t validate_sequence;
31234+ atomic_unchecked_t validate_sequence;
31235 } ttm;
31236
31237 struct {
fe2de317 31238diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
4c928ab7 31239index 2f6daae..c9d7b9e 100644
fe2de317
MT
31240--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
31241+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
31242@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
8308f9c9
MT
31243 if (USE_REFCNT(dev))
31244 sequence = nvchan_rd32(chan, 0x48);
31245 else
31246- sequence = atomic_read(&chan->fence.last_sequence_irq);
31247+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
31248
31249 if (chan->fence.sequence_ack == sequence)
31250 goto out;
fe2de317 31251@@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
883a9837
MT
31252 return ret;
31253 }
15a11c5b 31254
8308f9c9
MT
31255- atomic_set(&chan->fence.last_sequence_irq, 0);
31256+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
8308f9c9
MT
31257 return 0;
31258 }
66a7e928 31259
fe2de317 31260diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
4c928ab7 31261index 7ce3fde..cb3ea04 100644
fe2de317
MT
31262--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
31263+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
31264@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
8308f9c9
MT
31265 int trycnt = 0;
31266 int ret, i;
31267
31268- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
31269+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
31270 retry:
31271 if (++trycnt > 100000) {
31272 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
fe2de317 31273diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
5e856224 31274index f80c5e0..936baa7 100644
fe2de317
MT
31275--- a/drivers/gpu/drm/nouveau/nouveau_state.c
31276+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
5e856224 31277@@ -543,7 +543,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
efbe55a5
MT
31278 bool can_switch;
31279
31280 spin_lock(&dev->count_lock);
31281- can_switch = (dev->open_count == 0);
c52201e0 31282+ can_switch = (local_read(&dev->open_count) == 0);
efbe55a5
MT
31283 spin_unlock(&dev->count_lock);
31284 return can_switch;
31285 }
fe2de317
MT
31286diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
31287index dbdea8e..cd6eeeb 100644
31288--- a/drivers/gpu/drm/nouveau/nv04_graph.c
31289+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
6e9df6a3 31290@@ -554,7 +554,7 @@ static int
8308f9c9
MT
31291 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
31292 u32 class, u32 mthd, u32 data)
31293 {
31294- atomic_set(&chan->fence.last_sequence_irq, data);
31295+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
31296 return 0;
31297 }
31298
fe2de317 31299diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
4c928ab7 31300index bcac90b..53bfc76 100644
fe2de317
MT
31301--- a/drivers/gpu/drm/r128/r128_cce.c
31302+++ b/drivers/gpu/drm/r128/r128_cce.c
4c928ab7 31303@@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
8308f9c9
MT
31304
31305 /* GH: Simple idle check.
31306 */
31307- atomic_set(&dev_priv->idle_count, 0);
31308+ atomic_set_unchecked(&dev_priv->idle_count, 0);
31309
31310 /* We don't support anything other than bus-mastering ring mode,
31311 * but the ring can be in either AGP or PCI space for the ring
fe2de317
MT
31312diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
31313index 930c71b..499aded 100644
31314--- a/drivers/gpu/drm/r128/r128_drv.h
31315+++ b/drivers/gpu/drm/r128/r128_drv.h
8308f9c9
MT
31316@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
31317 int is_pci;
31318 unsigned long cce_buffers_offset;
31319
31320- atomic_t idle_count;
31321+ atomic_unchecked_t idle_count;
31322
31323 int page_flipping;
31324 int current_page;
31325 u32 crtc_offset;
31326 u32 crtc_offset_cntl;
31327
31328- atomic_t vbl_received;
31329+ atomic_unchecked_t vbl_received;
31330
31331 u32 color_fmt;
31332 unsigned int front_offset;
fe2de317
MT
31333diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
31334index 429d5a0..7e899ed 100644
31335--- a/drivers/gpu/drm/r128/r128_irq.c
31336+++ b/drivers/gpu/drm/r128/r128_irq.c
31337@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
8308f9c9
MT
31338 if (crtc != 0)
31339 return 0;
31340
31341- return atomic_read(&dev_priv->vbl_received);
31342+ return atomic_read_unchecked(&dev_priv->vbl_received);
31343 }
31344
31345 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
fe2de317 31346@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
8308f9c9
MT
31347 /* VBLANK interrupt */
31348 if (status & R128_CRTC_VBLANK_INT) {
31349 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
31350- atomic_inc(&dev_priv->vbl_received);
31351+ atomic_inc_unchecked(&dev_priv->vbl_received);
31352 drm_handle_vblank(dev, 0);
31353 return IRQ_HANDLED;
31354 }
fe2de317
MT
31355diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
31356index a9e33ce..09edd4b 100644
31357--- a/drivers/gpu/drm/r128/r128_state.c
31358+++ b/drivers/gpu/drm/r128/r128_state.c
31359@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
8308f9c9
MT
31360
31361 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
31362 {
31363- if (atomic_read(&dev_priv->idle_count) == 0)
31364+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
31365 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
31366 else
31367- atomic_set(&dev_priv->idle_count, 0);
31368+ atomic_set_unchecked(&dev_priv->idle_count, 0);
31369 }
31370
31371 #endif
fe2de317
MT
31372diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
31373index 5a82b6b..9e69c73 100644
31374--- a/drivers/gpu/drm/radeon/mkregtable.c
31375+++ b/drivers/gpu/drm/radeon/mkregtable.c
31376@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
ae4e228f
MT
31377 regex_t mask_rex;
31378 regmatch_t match[4];
31379 char buf[1024];
31380- size_t end;
31381+ long end;
31382 int len;
31383 int done = 0;
31384 int r;
31385 unsigned o;
31386 struct offset *offset;
31387 char last_reg_s[10];
31388- int last_reg;
31389+ unsigned long last_reg;
31390
31391 if (regcomp
31392 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
fe2de317 31393diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
5e856224 31394index 1668ec1..30ebdab 100644
fe2de317
MT
31395--- a/drivers/gpu/drm/radeon/radeon.h
31396+++ b/drivers/gpu/drm/radeon/radeon.h
5e856224 31397@@ -250,7 +250,7 @@ struct radeon_fence_driver {
fe2de317 31398 uint32_t scratch_reg;
5e856224
MT
31399 uint64_t gpu_addr;
31400 volatile uint32_t *cpu_addr;
fe2de317
MT
31401- atomic_t seq;
31402+ atomic_unchecked_t seq;
31403 uint32_t last_seq;
31404 unsigned long last_jiffies;
31405 unsigned long last_timeout;
5e856224 31406@@ -752,7 +752,7 @@ struct r600_blit_cp_primitives {
4c928ab7
MT
31407 int x2, int y2);
31408 void (*draw_auto)(struct radeon_device *rdev);
31409 void (*set_default_state)(struct radeon_device *rdev);
31410-};
31411+} __no_const;
31412
31413 struct r600_blit {
31414 struct mutex mutex;
5e856224 31415@@ -1201,7 +1201,7 @@ struct radeon_asic {
fe2de317
MT
31416 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
31417 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
31418 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
31419-};
31420+} __no_const;
31421
31422 /*
31423 * Asic structures
fe2de317 31424diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
5e856224 31425index 49f7cb7..2fcb48f 100644
fe2de317
MT
31426--- a/drivers/gpu/drm/radeon/radeon_device.c
31427+++ b/drivers/gpu/drm/radeon/radeon_device.c
4c928ab7 31428@@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
df50ba0c 31429 bool can_switch;
58c5fc13 31430
df50ba0c
MT
31431 spin_lock(&dev->count_lock);
31432- can_switch = (dev->open_count == 0);
c52201e0 31433+ can_switch = (local_read(&dev->open_count) == 0);
df50ba0c
MT
31434 spin_unlock(&dev->count_lock);
31435 return can_switch;
31436 }
fe2de317
MT
31437diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
31438index a1b59ca..86f2d44 100644
31439--- a/drivers/gpu/drm/radeon/radeon_drv.h
31440+++ b/drivers/gpu/drm/radeon/radeon_drv.h
8308f9c9
MT
31441@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
31442
31443 /* SW interrupt */
31444 wait_queue_head_t swi_queue;
31445- atomic_t swi_emitted;
31446+ atomic_unchecked_t swi_emitted;
31447 int vblank_crtc;
31448 uint32_t irq_enable_reg;
31449 uint32_t r500_disp_irq_reg;
fe2de317 31450diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
5e856224 31451index 4bd36a3..e66fe9c 100644
fe2de317
MT
31452--- a/drivers/gpu/drm/radeon/radeon_fence.c
31453+++ b/drivers/gpu/drm/radeon/radeon_fence.c
5e856224
MT
31454@@ -70,7 +70,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
31455 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
8308f9c9
MT
31456 return 0;
31457 }
5e856224
MT
31458- fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
31459+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv[fence->ring].seq);
31460 if (!rdev->ring[fence->ring].ready)
8308f9c9
MT
31461 /* FIXME: cp is not running assume everythings is done right
31462 * away
5e856224
MT
31463@@ -405,7 +405,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
31464 }
31465 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
31466 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
31467- radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
31468+ radeon_fence_write(rdev, atomic_read_unchecked(&rdev->fence_drv[ring].seq), ring);
31469 rdev->fence_drv[ring].initialized = true;
31470 DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
31471 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
31472@@ -418,7 +418,7 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
31473 rdev->fence_drv[ring].scratch_reg = -1;
31474 rdev->fence_drv[ring].cpu_addr = NULL;
31475 rdev->fence_drv[ring].gpu_addr = 0;
31476- atomic_set(&rdev->fence_drv[ring].seq, 0);
31477+ atomic_set_unchecked(&rdev->fence_drv[ring].seq, 0);
31478 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
31479 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
31480 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
fe2de317
MT
31481diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
31482index 48b7cea..342236f 100644
31483--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
31484+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
31485@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
71d190be
MT
31486 request = compat_alloc_user_space(sizeof(*request));
31487 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
31488 || __put_user(req32.param, &request->param)
31489- || __put_user((void __user *)(unsigned long)req32.value,
31490+ || __put_user((unsigned long)req32.value,
31491 &request->value))
31492 return -EFAULT;
31493
fe2de317 31494diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
4c928ab7 31495index 00da384..32f972d 100644
fe2de317
MT
31496--- a/drivers/gpu/drm/radeon/radeon_irq.c
31497+++ b/drivers/gpu/drm/radeon/radeon_irq.c
31498@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
8308f9c9
MT
31499 unsigned int ret;
31500 RING_LOCALS;
31501
31502- atomic_inc(&dev_priv->swi_emitted);
31503- ret = atomic_read(&dev_priv->swi_emitted);
31504+ atomic_inc_unchecked(&dev_priv->swi_emitted);
31505+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
31506
31507 BEGIN_RING(4);
31508 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
fe2de317 31509@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
8308f9c9
MT
31510 drm_radeon_private_t *dev_priv =
31511 (drm_radeon_private_t *) dev->dev_private;
31512
31513- atomic_set(&dev_priv->swi_emitted, 0);
31514+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
31515 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
31516
31517 dev->max_vblank_count = 0x001fffff;
fe2de317 31518diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
4c928ab7 31519index e8422ae..d22d4a8 100644
fe2de317
MT
31520--- a/drivers/gpu/drm/radeon/radeon_state.c
31521+++ b/drivers/gpu/drm/radeon/radeon_state.c
31522@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
ae4e228f
MT
31523 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
31524 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
31525
31526- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31527+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31528 sarea_priv->nbox * sizeof(depth_boxes[0])))
31529 return -EFAULT;
31530
fe2de317 31531@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
58c5fc13
MT
31532 {
31533 drm_radeon_private_t *dev_priv = dev->dev_private;
31534 drm_radeon_getparam_t *param = data;
31535- int value;
31536+ int value = 0;
31537
31538 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
31539
fe2de317 31540diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
5e856224 31541index c421e77..e6bf2e8 100644
fe2de317
MT
31542--- a/drivers/gpu/drm/radeon/radeon_ttm.c
31543+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
5e856224 31544@@ -842,8 +842,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
15a11c5b
MT
31545 }
31546 if (unlikely(ttm_vm_ops == NULL)) {
31547 ttm_vm_ops = vma->vm_ops;
58c5fc13
MT
31548- radeon_ttm_vm_ops = *ttm_vm_ops;
31549- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
15a11c5b
MT
31550+ pax_open_kernel();
31551+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
31552+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
31553+ pax_close_kernel();
31554 }
57199397
MT
31555 vma->vm_ops = &radeon_ttm_vm_ops;
31556 return 0;
fe2de317 31557diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
5e856224 31558index f68dff2..8df955c 100644
fe2de317
MT
31559--- a/drivers/gpu/drm/radeon/rs690.c
31560+++ b/drivers/gpu/drm/radeon/rs690.c
31561@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
71d190be
MT
31562 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
31563 rdev->pm.sideport_bandwidth.full)
31564 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
31565- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
31566+ read_delay_latency.full = dfixed_const(800 * 1000);
31567 read_delay_latency.full = dfixed_div(read_delay_latency,
31568 rdev->pm.igp_sideport_mclk);
31569+ a.full = dfixed_const(370);
31570+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
31571 } else {
31572 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
31573 rdev->pm.k8_bandwidth.full)
fe2de317 31574diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
5e856224 31575index 499debd..66fce72 100644
fe2de317
MT
31576--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
31577+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
31578@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
15a11c5b
MT
31579 static int ttm_pool_mm_shrink(struct shrinker *shrink,
31580 struct shrink_control *sc)
8308f9c9
MT
31581 {
31582- static atomic_t start_pool = ATOMIC_INIT(0);
31583+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
31584 unsigned i;
31585- unsigned pool_offset = atomic_add_return(1, &start_pool);
31586+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
31587 struct ttm_page_pool *pool;
15a11c5b 31588 int shrink_pages = sc->nr_to_scan;
8308f9c9 31589
fe2de317 31590diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
5e856224 31591index 88edacc..1e5412b 100644
fe2de317
MT
31592--- a/drivers/gpu/drm/via/via_drv.h
31593+++ b/drivers/gpu/drm/via/via_drv.h
8308f9c9
MT
31594@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
31595 typedef uint32_t maskarray_t[5];
31596
31597 typedef struct drm_via_irq {
31598- atomic_t irq_received;
31599+ atomic_unchecked_t irq_received;
31600 uint32_t pending_mask;
31601 uint32_t enable_mask;
31602 wait_queue_head_t irq_queue;
31603@@ -75,7 +75,7 @@ typedef struct drm_via_private {
31604 struct timeval last_vblank;
31605 int last_vblank_valid;
31606 unsigned usec_per_vblank;
31607- atomic_t vbl_received;
31608+ atomic_unchecked_t vbl_received;
31609 drm_via_state_t hc_state;
31610 char pci_buf[VIA_PCI_BUF_SIZE];
31611 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
fe2de317
MT
31612diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
31613index d391f48..10c8ca3 100644
31614--- a/drivers/gpu/drm/via/via_irq.c
31615+++ b/drivers/gpu/drm/via/via_irq.c
31616@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
8308f9c9
MT
31617 if (crtc != 0)
31618 return 0;
31619
31620- return atomic_read(&dev_priv->vbl_received);
31621+ return atomic_read_unchecked(&dev_priv->vbl_received);
31622 }
31623
31624 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
fe2de317 31625@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
8308f9c9
MT
31626
31627 status = VIA_READ(VIA_REG_INTERRUPT);
31628 if (status & VIA_IRQ_VBLANK_PENDING) {
31629- atomic_inc(&dev_priv->vbl_received);
31630- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
31631+ atomic_inc_unchecked(&dev_priv->vbl_received);
31632+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
31633 do_gettimeofday(&cur_vblank);
31634 if (dev_priv->last_vblank_valid) {
31635 dev_priv->usec_per_vblank =
fe2de317 31636@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
8308f9c9
MT
31637 dev_priv->last_vblank = cur_vblank;
31638 dev_priv->last_vblank_valid = 1;
31639 }
31640- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
31641+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
31642 DRM_DEBUG("US per vblank is: %u\n",
31643 dev_priv->usec_per_vblank);
31644 }
fe2de317 31645@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
8308f9c9
MT
31646
31647 for (i = 0; i < dev_priv->num_irqs; ++i) {
31648 if (status & cur_irq->pending_mask) {
31649- atomic_inc(&cur_irq->irq_received);
31650+ atomic_inc_unchecked(&cur_irq->irq_received);
31651 DRM_WAKEUP(&cur_irq->irq_queue);
31652 handled = 1;
31653 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
fe2de317 31654@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
8308f9c9
MT
31655 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31656 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
31657 masks[irq][4]));
31658- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
31659+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
31660 } else {
31661 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31662 (((cur_irq_sequence =
31663- atomic_read(&cur_irq->irq_received)) -
31664+ atomic_read_unchecked(&cur_irq->irq_received)) -
31665 *sequence) <= (1 << 23)));
31666 }
31667 *sequence = cur_irq_sequence;
fe2de317 31668@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
8308f9c9
MT
31669 }
31670
31671 for (i = 0; i < dev_priv->num_irqs; ++i) {
31672- atomic_set(&cur_irq->irq_received, 0);
31673+ atomic_set_unchecked(&cur_irq->irq_received, 0);
31674 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
31675 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
31676 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
fe2de317 31677@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
8308f9c9
MT
31678 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
31679 case VIA_IRQ_RELATIVE:
31680 irqwait->request.sequence +=
31681- atomic_read(&cur_irq->irq_received);
31682+ atomic_read_unchecked(&cur_irq->irq_received);
31683 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
31684 case VIA_IRQ_ABSOLUTE:
31685 break;
fe2de317 31686diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
4c928ab7 31687index dc27970..f18b008 100644
fe2de317
MT
31688--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31689+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
4c928ab7 31690@@ -260,7 +260,7 @@ struct vmw_private {
8308f9c9
MT
31691 * Fencing and IRQs.
31692 */
31693
4c928ab7
MT
31694- atomic_t marker_seq;
31695+ atomic_unchecked_t marker_seq;
8308f9c9
MT
31696 wait_queue_head_t fence_queue;
31697 wait_queue_head_t fifo_queue;
4c928ab7 31698 int fence_queue_waiters; /* Protected by hw_mutex */
fe2de317 31699diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
4c928ab7 31700index a0c2f12..68ae6cb 100644
fe2de317
MT
31701--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31702+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31703@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
8308f9c9
MT
31704 (unsigned int) min,
31705 (unsigned int) fifo->capabilities);
31706
4c928ab7
MT
31707- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31708+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31709 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
31710 vmw_marker_queue_init(&fifo->marker_queue);
8308f9c9 31711 return vmw_fifo_send_fence(dev_priv, &dummy);
4c928ab7 31712@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
6e9df6a3
MT
31713 if (reserveable)
31714 iowrite32(bytes, fifo_mem +
31715 SVGA_FIFO_RESERVED);
31716- return fifo_mem + (next_cmd >> 2);
31717+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
31718 } else {
31719 need_bounce = true;
31720 }
4c928ab7 31721@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
8308f9c9
MT
31722
31723 fm = vmw_fifo_reserve(dev_priv, bytes);
31724 if (unlikely(fm == NULL)) {
4c928ab7
MT
31725- *seqno = atomic_read(&dev_priv->marker_seq);
31726+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
8308f9c9 31727 ret = -ENOMEM;
4c928ab7 31728 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
8308f9c9 31729 false, 3*HZ);
4c928ab7 31730@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
8308f9c9
MT
31731 }
31732
31733 do {
4c928ab7
MT
31734- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
31735+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
31736 } while (*seqno == 0);
8308f9c9
MT
31737
31738 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
fe2de317 31739diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
4c928ab7 31740index cabc95f..14b3d77 100644
fe2de317
MT
31741--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31742+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
4c928ab7 31743@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
8308f9c9
MT
31744 * emitted. Then the fence is stale and signaled.
31745 */
31746
4c928ab7
MT
31747- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
31748+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
8308f9c9
MT
31749 > VMW_FENCE_WRAP);
31750
31751 return ret;
4c928ab7 31752@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
8308f9c9
MT
31753
31754 if (fifo_idle)
31755 down_read(&fifo_state->rwsem);
4c928ab7
MT
31756- signal_seq = atomic_read(&dev_priv->marker_seq);
31757+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
8308f9c9
MT
31758 ret = 0;
31759
31760 for (;;) {
4c928ab7
MT
31761diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31762index 8a8725c..afed796 100644
31763--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31764+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31765@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
31766 while (!vmw_lag_lt(queue, us)) {
31767 spin_lock(&queue->lock);
31768 if (list_empty(&queue->head))
31769- seqno = atomic_read(&dev_priv->marker_seq);
31770+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31771 else {
31772 marker = list_first_entry(&queue->head,
31773 struct vmw_marker, head);
fe2de317 31774diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
5e856224 31775index 75dbe34..f9204a8 100644
fe2de317
MT
31776--- a/drivers/hid/hid-core.c
31777+++ b/drivers/hid/hid-core.c
5e856224 31778@@ -2021,7 +2021,7 @@ static bool hid_ignore(struct hid_device *hdev)
8308f9c9
MT
31779
31780 int hid_add_device(struct hid_device *hdev)
31781 {
31782- static atomic_t id = ATOMIC_INIT(0);
31783+ static atomic_unchecked_t id = ATOMIC_INIT(0);
31784 int ret;
31785
31786 if (WARN_ON(hdev->status & HID_STAT_ADDED))
5e856224 31787@@ -2036,7 +2036,7 @@ int hid_add_device(struct hid_device *hdev)
8308f9c9
MT
31788 /* XXX hack, any other cleaner solution after the driver core
31789 * is converted to allow more than 20 bytes as the device name? */
31790 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
31791- hdev->vendor, hdev->product, atomic_inc_return(&id));
31792+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
31793
31794 hid_debug_register(hdev, dev_name(&hdev->dev));
31795 ret = device_add(&hdev->dev);
fe2de317 31796diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
5e856224 31797index b1ec0e2..c295a61 100644
fe2de317
MT
31798--- a/drivers/hid/usbhid/hiddev.c
31799+++ b/drivers/hid/usbhid/hiddev.c
31800@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
16454cff 31801 break;
ae4e228f 31802
df50ba0c
MT
31803 case HIDIOCAPPLICATION:
31804- if (arg < 0 || arg >= hid->maxapplication)
31805+ if (arg >= hid->maxapplication)
16454cff 31806 break;
df50ba0c
MT
31807
31808 for (i = 0; i < hid->maxcollection; i++)
4c928ab7
MT
31809diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
31810index 4065374..10ed7dc 100644
31811--- a/drivers/hv/channel.c
31812+++ b/drivers/hv/channel.c
31813@@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
31814 int ret = 0;
31815 int t;
31816
31817- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31818- atomic_inc(&vmbus_connection.next_gpadl_handle);
31819+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31820+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31821
31822 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31823 if (ret)
31824diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
5e856224 31825index 12aa97f..c0679f7 100644
4c928ab7
MT
31826--- a/drivers/hv/hv.c
31827+++ b/drivers/hv/hv.c
31828@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
31829 u64 output_address = (output) ? virt_to_phys(output) : 0;
31830 u32 output_address_hi = output_address >> 32;
31831 u32 output_address_lo = output_address & 0xFFFFFFFF;
31832- void *hypercall_page = hv_context.hypercall_page;
31833+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31834
31835 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
31836 "=a"(hv_status_lo) : "d" (control_hi),
31837diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
5e856224 31838index 6d7d286..92b0873 100644
4c928ab7
MT
31839--- a/drivers/hv/hyperv_vmbus.h
31840+++ b/drivers/hv/hyperv_vmbus.h
31841@@ -556,7 +556,7 @@ enum vmbus_connect_state {
31842 struct vmbus_connection {
31843 enum vmbus_connect_state conn_state;
31844
31845- atomic_t next_gpadl_handle;
31846+ atomic_unchecked_t next_gpadl_handle;
31847
31848 /*
31849 * Represents channel interrupts. Each bit position represents a
31850diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
5e856224 31851index a220e57..428f54d 100644
4c928ab7
MT
31852--- a/drivers/hv/vmbus_drv.c
31853+++ b/drivers/hv/vmbus_drv.c
31854@@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
31855 {
31856 int ret = 0;
31857
31858- static atomic_t device_num = ATOMIC_INIT(0);
31859+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
31860
31861 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
31862- atomic_inc_return(&device_num));
31863+ atomic_inc_return_unchecked(&device_num));
31864
31865 child_device_obj->device.bus = &hv_bus;
31866 child_device_obj->device.parent = &hv_acpi_dev->dev;
fe2de317 31867diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
5e856224 31868index 554f046..f8b4729 100644
fe2de317
MT
31869--- a/drivers/hwmon/acpi_power_meter.c
31870+++ b/drivers/hwmon/acpi_power_meter.c
31871@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
15a11c5b 31872 return res;
8308f9c9 31873
15a11c5b
MT
31874 temp /= 1000;
31875- if (temp < 0)
31876- return -EINVAL;
8308f9c9 31877
15a11c5b
MT
31878 mutex_lock(&resource->lock);
31879 resource->trip[attr->index - 7] = temp;
fe2de317 31880diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
5e856224 31881index 91fdd1f..b66a686 100644
fe2de317
MT
31882--- a/drivers/hwmon/sht15.c
31883+++ b/drivers/hwmon/sht15.c
15a11c5b 31884@@ -166,7 +166,7 @@ struct sht15_data {
8308f9c9 31885 int supply_uV;
15a11c5b 31886 bool supply_uV_valid;
8308f9c9
MT
31887 struct work_struct update_supply_work;
31888- atomic_t interrupt_handled;
31889+ atomic_unchecked_t interrupt_handled;
31890 };
31891
31892 /**
fe2de317 31893@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
8308f9c9
MT
31894 return ret;
31895
31896 gpio_direction_input(data->pdata->gpio_data);
31897- atomic_set(&data->interrupt_handled, 0);
31898+ atomic_set_unchecked(&data->interrupt_handled, 0);
31899
31900 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31901 if (gpio_get_value(data->pdata->gpio_data) == 0) {
31902 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
66a7e928 31903 /* Only relevant if the interrupt hasn't occurred. */
8308f9c9
MT
31904- if (!atomic_read(&data->interrupt_handled))
31905+ if (!atomic_read_unchecked(&data->interrupt_handled))
31906 schedule_work(&data->read_work);
31907 }
31908 ret = wait_event_timeout(data->wait_queue,
fe2de317 31909@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
15a11c5b 31910
8308f9c9
MT
31911 /* First disable the interrupt */
31912 disable_irq_nosync(irq);
31913- atomic_inc(&data->interrupt_handled);
31914+ atomic_inc_unchecked(&data->interrupt_handled);
31915 /* Then schedule a reading work struct */
15a11c5b 31916 if (data->state != SHT15_READING_NOTHING)
8308f9c9 31917 schedule_work(&data->read_work);
fe2de317 31918@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
15a11c5b
MT
31919 * If not, then start the interrupt again - care here as could
31920 * have gone low in meantime so verify it hasn't!
31921 */
8308f9c9
MT
31922- atomic_set(&data->interrupt_handled, 0);
31923+ atomic_set_unchecked(&data->interrupt_handled, 0);
31924 enable_irq(gpio_to_irq(data->pdata->gpio_data));
66a7e928 31925 /* If still not occurred or another handler has been scheduled */
8308f9c9
MT
31926 if (gpio_get_value(data->pdata->gpio_data)
31927- || atomic_read(&data->interrupt_handled))
31928+ || atomic_read_unchecked(&data->interrupt_handled))
31929 return;
31930 }
15a11c5b 31931
fe2de317
MT
31932diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
31933index 378fcb5..5e91fa8 100644
31934--- a/drivers/i2c/busses/i2c-amd756-s4882.c
31935+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
15a11c5b
MT
31936@@ -43,7 +43,7 @@
31937 extern struct i2c_adapter amd756_smbus;
31938
31939 static struct i2c_adapter *s4882_adapter;
31940-static struct i2c_algorithm *s4882_algo;
31941+static i2c_algorithm_no_const *s4882_algo;
31942
31943 /* Wrapper access functions for multiplexed SMBus */
31944 static DEFINE_MUTEX(amd756_lock);
fe2de317
MT
31945diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
31946index 29015eb..af2d8e9 100644
31947--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
31948+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
15a11c5b
MT
31949@@ -41,7 +41,7 @@
31950 extern struct i2c_adapter *nforce2_smbus;
66a7e928 31951
15a11c5b
MT
31952 static struct i2c_adapter *s4985_adapter;
31953-static struct i2c_algorithm *s4985_algo;
31954+static i2c_algorithm_no_const *s4985_algo;
66a7e928 31955
15a11c5b
MT
31956 /* Wrapper access functions for multiplexed SMBus */
31957 static DEFINE_MUTEX(nforce2_lock);
fe2de317
MT
31958diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
31959index d7a4833..7fae376 100644
31960--- a/drivers/i2c/i2c-mux.c
31961+++ b/drivers/i2c/i2c-mux.c
15a11c5b
MT
31962@@ -28,7 +28,7 @@
31963 /* multiplexer per channel data */
31964 struct i2c_mux_priv {
31965 struct i2c_adapter adap;
31966- struct i2c_algorithm algo;
31967+ i2c_algorithm_no_const algo;
31968
31969 struct i2c_adapter *parent;
31970 void *mux_dev; /* the mux chip/device */
fe2de317
MT
31971diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
31972index 57d00ca..0145194 100644
31973--- a/drivers/ide/aec62xx.c
31974+++ b/drivers/ide/aec62xx.c
31975@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
6e9df6a3
MT
31976 .cable_detect = atp86x_cable_detect,
31977 };
31978
31979-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
31980+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
31981 { /* 0: AEC6210 */
31982 .name = DRV_NAME,
31983 .init_chipset = init_chipset_aec62xx,
fe2de317
MT
31984diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
31985index 2c8016a..911a27c 100644
31986--- a/drivers/ide/alim15x3.c
31987+++ b/drivers/ide/alim15x3.c
31988@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
6e9df6a3
MT
31989 .dma_sff_read_status = ide_dma_sff_read_status,
31990 };
31991
31992-static const struct ide_port_info ali15x3_chipset __devinitdata = {
31993+static const struct ide_port_info ali15x3_chipset __devinitconst = {
31994 .name = DRV_NAME,
31995 .init_chipset = init_chipset_ali15x3,
31996 .init_hwif = init_hwif_ali15x3,
fe2de317
MT
31997diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
31998index 3747b25..56fc995 100644
31999--- a/drivers/ide/amd74xx.c
32000+++ b/drivers/ide/amd74xx.c
32001@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
6e9df6a3
MT
32002 .udma_mask = udma, \
32003 }
32004
32005-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
32006+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
32007 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
32008 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
32009 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
fe2de317
MT
32010diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
32011index 15f0ead..cb43480 100644
32012--- a/drivers/ide/atiixp.c
32013+++ b/drivers/ide/atiixp.c
32014@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
6e9df6a3
MT
32015 .cable_detect = atiixp_cable_detect,
32016 };
32017
32018-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
32019+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
32020 { /* 0: IXP200/300/400/700 */
32021 .name = DRV_NAME,
32022 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
fe2de317
MT
32023diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
32024index 5f80312..d1fc438 100644
32025--- a/drivers/ide/cmd64x.c
32026+++ b/drivers/ide/cmd64x.c
32027@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
6e9df6a3
MT
32028 .dma_sff_read_status = ide_dma_sff_read_status,
32029 };
32030
32031-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
32032+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
32033 { /* 0: CMD643 */
32034 .name = DRV_NAME,
32035 .init_chipset = init_chipset_cmd64x,
fe2de317
MT
32036diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
32037index 2c1e5f7..1444762 100644
32038--- a/drivers/ide/cs5520.c
32039+++ b/drivers/ide/cs5520.c
32040@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
6e9df6a3
MT
32041 .set_dma_mode = cs5520_set_dma_mode,
32042 };
32043
32044-static const struct ide_port_info cyrix_chipset __devinitdata = {
32045+static const struct ide_port_info cyrix_chipset __devinitconst = {
32046 .name = DRV_NAME,
32047 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
32048 .port_ops = &cs5520_port_ops,
fe2de317
MT
32049diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
32050index 4dc4eb9..49b40ad 100644
32051--- a/drivers/ide/cs5530.c
32052+++ b/drivers/ide/cs5530.c
32053@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
6e9df6a3
MT
32054 .udma_filter = cs5530_udma_filter,
32055 };
32056
32057-static const struct ide_port_info cs5530_chipset __devinitdata = {
32058+static const struct ide_port_info cs5530_chipset __devinitconst = {
32059 .name = DRV_NAME,
32060 .init_chipset = init_chipset_cs5530,
32061 .init_hwif = init_hwif_cs5530,
fe2de317
MT
32062diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
32063index 5059faf..18d4c85 100644
32064--- a/drivers/ide/cs5535.c
32065+++ b/drivers/ide/cs5535.c
32066@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
6e9df6a3
MT
32067 .cable_detect = cs5535_cable_detect,
32068 };
32069
32070-static const struct ide_port_info cs5535_chipset __devinitdata = {
32071+static const struct ide_port_info cs5535_chipset __devinitconst = {
32072 .name = DRV_NAME,
32073 .port_ops = &cs5535_port_ops,
32074 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
fe2de317 32075diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
4c928ab7 32076index 847553f..3ffb49d 100644
fe2de317
MT
32077--- a/drivers/ide/cy82c693.c
32078+++ b/drivers/ide/cy82c693.c
32079@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
6e9df6a3
MT
32080 .set_dma_mode = cy82c693_set_dma_mode,
32081 };
32082
32083-static const struct ide_port_info cy82c693_chipset __devinitdata = {
32084+static const struct ide_port_info cy82c693_chipset __devinitconst = {
32085 .name = DRV_NAME,
32086 .init_iops = init_iops_cy82c693,
32087 .port_ops = &cy82c693_port_ops,
fe2de317
MT
32088diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
32089index 58c51cd..4aec3b8 100644
32090--- a/drivers/ide/hpt366.c
32091+++ b/drivers/ide/hpt366.c
32092@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
6e9df6a3
MT
32093 }
32094 };
32095
32096-static const struct hpt_info hpt36x __devinitdata = {
32097+static const struct hpt_info hpt36x __devinitconst = {
32098 .chip_name = "HPT36x",
32099 .chip_type = HPT36x,
32100 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
fe2de317 32101@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
6e9df6a3
MT
32102 .timings = &hpt36x_timings
32103 };
32104
32105-static const struct hpt_info hpt370 __devinitdata = {
32106+static const struct hpt_info hpt370 __devinitconst = {
32107 .chip_name = "HPT370",
32108 .chip_type = HPT370,
32109 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
fe2de317 32110@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
6e9df6a3
MT
32111 .timings = &hpt37x_timings
32112 };
32113
32114-static const struct hpt_info hpt370a __devinitdata = {
32115+static const struct hpt_info hpt370a __devinitconst = {
32116 .chip_name = "HPT370A",
32117 .chip_type = HPT370A,
32118 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
fe2de317 32119@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
6e9df6a3
MT
32120 .timings = &hpt37x_timings
32121 };
32122
32123-static const struct hpt_info hpt374 __devinitdata = {
32124+static const struct hpt_info hpt374 __devinitconst = {
32125 .chip_name = "HPT374",
32126 .chip_type = HPT374,
32127 .udma_mask = ATA_UDMA5,
fe2de317 32128@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
6e9df6a3
MT
32129 .timings = &hpt37x_timings
32130 };
32131
32132-static const struct hpt_info hpt372 __devinitdata = {
32133+static const struct hpt_info hpt372 __devinitconst = {
32134 .chip_name = "HPT372",
32135 .chip_type = HPT372,
32136 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
fe2de317 32137@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
6e9df6a3
MT
32138 .timings = &hpt37x_timings
32139 };
32140
32141-static const struct hpt_info hpt372a __devinitdata = {
32142+static const struct hpt_info hpt372a __devinitconst = {
32143 .chip_name = "HPT372A",
32144 .chip_type = HPT372A,
32145 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
fe2de317 32146@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
6e9df6a3
MT
32147 .timings = &hpt37x_timings
32148 };
32149
32150-static const struct hpt_info hpt302 __devinitdata = {
32151+static const struct hpt_info hpt302 __devinitconst = {
32152 .chip_name = "HPT302",
32153 .chip_type = HPT302,
32154 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
fe2de317 32155@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
6e9df6a3
MT
32156 .timings = &hpt37x_timings
32157 };
32158
32159-static const struct hpt_info hpt371 __devinitdata = {
32160+static const struct hpt_info hpt371 __devinitconst = {
32161 .chip_name = "HPT371",
32162 .chip_type = HPT371,
32163 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
fe2de317 32164@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
6e9df6a3
MT
32165 .timings = &hpt37x_timings
32166 };
32167
32168-static const struct hpt_info hpt372n __devinitdata = {
32169+static const struct hpt_info hpt372n __devinitconst = {
32170 .chip_name = "HPT372N",
32171 .chip_type = HPT372N,
32172 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
fe2de317 32173@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
6e9df6a3
MT
32174 .timings = &hpt37x_timings
32175 };
32176
32177-static const struct hpt_info hpt302n __devinitdata = {
32178+static const struct hpt_info hpt302n __devinitconst = {
32179 .chip_name = "HPT302N",
32180 .chip_type = HPT302N,
32181 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
fe2de317 32182@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
6e9df6a3
MT
32183 .timings = &hpt37x_timings
32184 };
32185
32186-static const struct hpt_info hpt371n __devinitdata = {
32187+static const struct hpt_info hpt371n __devinitconst = {
32188 .chip_name = "HPT371N",
32189 .chip_type = HPT371N,
32190 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
fe2de317 32191@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
6e9df6a3
MT
32192 .dma_sff_read_status = ide_dma_sff_read_status,
32193 };
32194
32195-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
32196+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
32197 { /* 0: HPT36x */
32198 .name = DRV_NAME,
32199 .init_chipset = init_chipset_hpt366,
fe2de317 32200diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
4c928ab7 32201index 8126824..55a2798 100644
fe2de317
MT
32202--- a/drivers/ide/ide-cd.c
32203+++ b/drivers/ide/ide-cd.c
4c928ab7 32204@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
ae4e228f
MT
32205 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
32206 if ((unsigned long)buf & alignment
32207 || blk_rq_bytes(rq) & q->dma_pad_mask
32208- || object_is_on_stack(buf))
32209+ || object_starts_on_stack(buf))
32210 drive->dma = 0;
32211 }
32212 }
fe2de317 32213diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
5e856224 32214index 7f56b73..dab5b67 100644
fe2de317
MT
32215--- a/drivers/ide/ide-pci-generic.c
32216+++ b/drivers/ide/ide-pci-generic.c
32217@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
6e9df6a3
MT
32218 .udma_mask = ATA_UDMA6, \
32219 }
32220
32221-static const struct ide_port_info generic_chipsets[] __devinitdata = {
32222+static const struct ide_port_info generic_chipsets[] __devinitconst = {
32223 /* 0: Unknown */
32224 DECLARE_GENERIC_PCI_DEV(0),
32225
fe2de317
MT
32226diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
32227index 560e66d..d5dd180 100644
32228--- a/drivers/ide/it8172.c
32229+++ b/drivers/ide/it8172.c
32230@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
6e9df6a3
MT
32231 .set_dma_mode = it8172_set_dma_mode,
32232 };
32233
32234-static const struct ide_port_info it8172_port_info __devinitdata = {
32235+static const struct ide_port_info it8172_port_info __devinitconst = {
32236 .name = DRV_NAME,
32237 .port_ops = &it8172_port_ops,
32238 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
fe2de317
MT
32239diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
32240index 46816ba..1847aeb 100644
32241--- a/drivers/ide/it8213.c
32242+++ b/drivers/ide/it8213.c
32243@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
6e9df6a3
MT
32244 .cable_detect = it8213_cable_detect,
32245 };
32246
32247-static const struct ide_port_info it8213_chipset __devinitdata = {
32248+static const struct ide_port_info it8213_chipset __devinitconst = {
32249 .name = DRV_NAME,
32250 .enablebits = { {0x41, 0x80, 0x80} },
32251 .port_ops = &it8213_port_ops,
fe2de317
MT
32252diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
32253index 2e3169f..c5611db 100644
32254--- a/drivers/ide/it821x.c
32255+++ b/drivers/ide/it821x.c
32256@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
6e9df6a3
MT
32257 .cable_detect = it821x_cable_detect,
32258 };
32259
32260-static const struct ide_port_info it821x_chipset __devinitdata = {
32261+static const struct ide_port_info it821x_chipset __devinitconst = {
32262 .name = DRV_NAME,
32263 .init_chipset = init_chipset_it821x,
32264 .init_hwif = init_hwif_it821x,
fe2de317
MT
32265diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
32266index 74c2c4a..efddd7d 100644
32267--- a/drivers/ide/jmicron.c
32268+++ b/drivers/ide/jmicron.c
32269@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
6e9df6a3
MT
32270 .cable_detect = jmicron_cable_detect,
32271 };
32272
32273-static const struct ide_port_info jmicron_chipset __devinitdata = {
32274+static const struct ide_port_info jmicron_chipset __devinitconst = {
32275 .name = DRV_NAME,
32276 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
32277 .port_ops = &jmicron_port_ops,
fe2de317
MT
32278diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
32279index 95327a2..73f78d8 100644
32280--- a/drivers/ide/ns87415.c
32281+++ b/drivers/ide/ns87415.c
32282@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
6e9df6a3
MT
32283 .dma_sff_read_status = superio_dma_sff_read_status,
32284 };
32285
32286-static const struct ide_port_info ns87415_chipset __devinitdata = {
32287+static const struct ide_port_info ns87415_chipset __devinitconst = {
32288 .name = DRV_NAME,
32289 .init_hwif = init_hwif_ns87415,
32290 .tp_ops = &ns87415_tp_ops,
fe2de317
MT
32291diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
32292index 1a53a4c..39edc66 100644
32293--- a/drivers/ide/opti621.c
32294+++ b/drivers/ide/opti621.c
32295@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
6e9df6a3
MT
32296 .set_pio_mode = opti621_set_pio_mode,
32297 };
32298
32299-static const struct ide_port_info opti621_chipset __devinitdata = {
32300+static const struct ide_port_info opti621_chipset __devinitconst = {
32301 .name = DRV_NAME,
32302 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
32303 .port_ops = &opti621_port_ops,
fe2de317
MT
32304diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
32305index 9546fe2..2e5ceb6 100644
32306--- a/drivers/ide/pdc202xx_new.c
32307+++ b/drivers/ide/pdc202xx_new.c
32308@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
6e9df6a3
MT
32309 .udma_mask = udma, \
32310 }
32311
32312-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
32313+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
32314 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
32315 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
32316 };
fe2de317
MT
32317diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
32318index 3a35ec6..5634510 100644
32319--- a/drivers/ide/pdc202xx_old.c
32320+++ b/drivers/ide/pdc202xx_old.c
32321@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
6e9df6a3
MT
32322 .max_sectors = sectors, \
32323 }
32324
32325-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
32326+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
32327 { /* 0: PDC20246 */
32328 .name = DRV_NAME,
32329 .init_chipset = init_chipset_pdc202xx,
fe2de317 32330diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
4c928ab7 32331index 1892e81..fe0fd60 100644
fe2de317
MT
32332--- a/drivers/ide/piix.c
32333+++ b/drivers/ide/piix.c
32334@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
6e9df6a3
MT
32335 .udma_mask = udma, \
32336 }
32337
32338-static const struct ide_port_info piix_pci_info[] __devinitdata = {
32339+static const struct ide_port_info piix_pci_info[] __devinitconst = {
32340 /* 0: MPIIX */
32341 { /*
32342 * MPIIX actually has only a single IDE channel mapped to
fe2de317
MT
32343diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
32344index a6414a8..c04173e 100644
32345--- a/drivers/ide/rz1000.c
32346+++ b/drivers/ide/rz1000.c
32347@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
6e9df6a3
MT
32348 }
32349 }
32350
32351-static const struct ide_port_info rz1000_chipset __devinitdata = {
32352+static const struct ide_port_info rz1000_chipset __devinitconst = {
32353 .name = DRV_NAME,
32354 .host_flags = IDE_HFLAG_NO_DMA,
32355 };
fe2de317
MT
32356diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
32357index 356b9b5..d4758eb 100644
32358--- a/drivers/ide/sc1200.c
32359+++ b/drivers/ide/sc1200.c
32360@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
6e9df6a3
MT
32361 .dma_sff_read_status = ide_dma_sff_read_status,
32362 };
32363
32364-static const struct ide_port_info sc1200_chipset __devinitdata = {
32365+static const struct ide_port_info sc1200_chipset __devinitconst = {
32366 .name = DRV_NAME,
32367 .port_ops = &sc1200_port_ops,
32368 .dma_ops = &sc1200_dma_ops,
fe2de317
MT
32369diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
32370index b7f5b0c..9701038 100644
32371--- a/drivers/ide/scc_pata.c
32372+++ b/drivers/ide/scc_pata.c
32373@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
6e9df6a3
MT
32374 .dma_sff_read_status = scc_dma_sff_read_status,
32375 };
32376
32377-static const struct ide_port_info scc_chipset __devinitdata = {
32378+static const struct ide_port_info scc_chipset __devinitconst = {
32379 .name = "sccIDE",
32380 .init_iops = init_iops_scc,
32381 .init_dma = scc_init_dma,
fe2de317
MT
32382diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
32383index 35fb8da..24d72ef 100644
32384--- a/drivers/ide/serverworks.c
32385+++ b/drivers/ide/serverworks.c
32386@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
6e9df6a3
MT
32387 .cable_detect = svwks_cable_detect,
32388 };
32389
32390-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
32391+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
32392 { /* 0: OSB4 */
32393 .name = DRV_NAME,
32394 .init_chipset = init_chipset_svwks,
fe2de317
MT
32395diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
32396index ddeda44..46f7e30 100644
32397--- a/drivers/ide/siimage.c
32398+++ b/drivers/ide/siimage.c
32399@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
6e9df6a3
MT
32400 .udma_mask = ATA_UDMA6, \
32401 }
32402
32403-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
32404+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
32405 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
32406 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
32407 };
fe2de317
MT
32408diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
32409index 4a00225..09e61b4 100644
32410--- a/drivers/ide/sis5513.c
32411+++ b/drivers/ide/sis5513.c
32412@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
6e9df6a3
MT
32413 .cable_detect = sis_cable_detect,
32414 };
32415
32416-static const struct ide_port_info sis5513_chipset __devinitdata = {
32417+static const struct ide_port_info sis5513_chipset __devinitconst = {
32418 .name = DRV_NAME,
32419 .init_chipset = init_chipset_sis5513,
32420 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
fe2de317
MT
32421diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
32422index f21dc2a..d051cd2 100644
32423--- a/drivers/ide/sl82c105.c
32424+++ b/drivers/ide/sl82c105.c
32425@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
6e9df6a3
MT
32426 .dma_sff_read_status = ide_dma_sff_read_status,
32427 };
32428
32429-static const struct ide_port_info sl82c105_chipset __devinitdata = {
32430+static const struct ide_port_info sl82c105_chipset __devinitconst = {
32431 .name = DRV_NAME,
32432 .init_chipset = init_chipset_sl82c105,
32433 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
fe2de317
MT
32434diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
32435index 864ffe0..863a5e9 100644
32436--- a/drivers/ide/slc90e66.c
32437+++ b/drivers/ide/slc90e66.c
32438@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
6e9df6a3
MT
32439 .cable_detect = slc90e66_cable_detect,
32440 };
32441
32442-static const struct ide_port_info slc90e66_chipset __devinitdata = {
32443+static const struct ide_port_info slc90e66_chipset __devinitconst = {
32444 .name = DRV_NAME,
32445 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
32446 .port_ops = &slc90e66_port_ops,
fe2de317 32447diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
4c928ab7 32448index 4799d5c..1794678 100644
fe2de317
MT
32449--- a/drivers/ide/tc86c001.c
32450+++ b/drivers/ide/tc86c001.c
4c928ab7 32451@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
6e9df6a3
MT
32452 .dma_sff_read_status = ide_dma_sff_read_status,
32453 };
32454
32455-static const struct ide_port_info tc86c001_chipset __devinitdata = {
32456+static const struct ide_port_info tc86c001_chipset __devinitconst = {
32457 .name = DRV_NAME,
32458 .init_hwif = init_hwif_tc86c001,
32459 .port_ops = &tc86c001_port_ops,
fe2de317 32460diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
4c928ab7 32461index 281c914..55ce1b8 100644
fe2de317
MT
32462--- a/drivers/ide/triflex.c
32463+++ b/drivers/ide/triflex.c
32464@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
6e9df6a3
MT
32465 .set_dma_mode = triflex_set_mode,
32466 };
32467
32468-static const struct ide_port_info triflex_device __devinitdata = {
32469+static const struct ide_port_info triflex_device __devinitconst = {
32470 .name = DRV_NAME,
32471 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
32472 .port_ops = &triflex_port_ops,
fe2de317
MT
32473diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
32474index 4b42ca0..e494a98 100644
32475--- a/drivers/ide/trm290.c
32476+++ b/drivers/ide/trm290.c
32477@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
6e9df6a3
MT
32478 .dma_check = trm290_dma_check,
32479 };
32480
32481-static const struct ide_port_info trm290_chipset __devinitdata = {
32482+static const struct ide_port_info trm290_chipset __devinitconst = {
32483 .name = DRV_NAME,
32484 .init_hwif = init_hwif_trm290,
32485 .tp_ops = &trm290_tp_ops,
fe2de317
MT
32486diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
32487index f46f49c..eb77678 100644
32488--- a/drivers/ide/via82cxxx.c
32489+++ b/drivers/ide/via82cxxx.c
32490@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
6e9df6a3
MT
32491 .cable_detect = via82cxxx_cable_detect,
32492 };
32493
32494-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
32495+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
32496 .name = DRV_NAME,
32497 .init_chipset = init_chipset_via82cxxx,
32498 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
4c928ab7 32499diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
5e856224 32500index 73d4531..c90cd2d 100644
4c928ab7
MT
32501--- a/drivers/ieee802154/fakehard.c
32502+++ b/drivers/ieee802154/fakehard.c
32503@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
32504 phy->transmit_power = 0xbf;
32505
32506 dev->netdev_ops = &fake_ops;
32507- dev->ml_priv = &fake_mlme;
32508+ dev->ml_priv = (void *)&fake_mlme;
32509
32510 priv = netdev_priv(dev);
32511 priv->phy = phy;
fe2de317 32512diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
5e856224 32513index c889aae..6cf5aa7 100644
fe2de317
MT
32514--- a/drivers/infiniband/core/cm.c
32515+++ b/drivers/infiniband/core/cm.c
4c928ab7 32516@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
ae4e228f
MT
32517
32518 struct cm_counter_group {
32519 struct kobject obj;
32520- atomic_long_t counter[CM_ATTR_COUNT];
32521+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
32522 };
32523
32524 struct cm_counter_attribute {
4c928ab7 32525@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
ae4e228f
MT
32526 struct ib_mad_send_buf *msg = NULL;
32527 int ret;
32528
32529- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32530+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32531 counter[CM_REQ_COUNTER]);
32532
32533 /* Quick state check to discard duplicate REQs. */
4c928ab7 32534@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
ae4e228f
MT
32535 if (!cm_id_priv)
32536 return;
58c5fc13 32537
ae4e228f
MT
32538- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32539+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32540 counter[CM_REP_COUNTER]);
32541 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
32542 if (ret)
4c928ab7 32543@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
ae4e228f
MT
32544 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
32545 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
32546 spin_unlock_irq(&cm_id_priv->lock);
32547- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32548+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32549 counter[CM_RTU_COUNTER]);
32550 goto out;
32551 }
4c928ab7 32552@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
ae4e228f
MT
32553 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
32554 dreq_msg->local_comm_id);
32555 if (!cm_id_priv) {
32556- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32557+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32558 counter[CM_DREQ_COUNTER]);
32559 cm_issue_drep(work->port, work->mad_recv_wc);
32560 return -EINVAL;
4c928ab7 32561@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
ae4e228f
MT
32562 case IB_CM_MRA_REP_RCVD:
32563 break;
32564 case IB_CM_TIMEWAIT:
32565- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32566+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32567 counter[CM_DREQ_COUNTER]);
32568 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32569 goto unlock;
4c928ab7 32570@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
ae4e228f
MT
32571 cm_free_msg(msg);
32572 goto deref;
32573 case IB_CM_DREQ_RCVD:
32574- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32575+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32576 counter[CM_DREQ_COUNTER]);
32577 goto unlock;
32578 default:
4c928ab7 32579@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
ae4e228f
MT
32580 ib_modify_mad(cm_id_priv->av.port->mad_agent,
32581 cm_id_priv->msg, timeout)) {
32582 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
32583- atomic_long_inc(&work->port->
32584+ atomic_long_inc_unchecked(&work->port->
32585 counter_group[CM_RECV_DUPLICATES].
32586 counter[CM_MRA_COUNTER]);
32587 goto out;
4c928ab7 32588@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
ae4e228f
MT
32589 break;
32590 case IB_CM_MRA_REQ_RCVD:
32591 case IB_CM_MRA_REP_RCVD:
32592- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32593+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32594 counter[CM_MRA_COUNTER]);
32595 /* fall through */
32596 default:
4c928ab7 32597@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
ae4e228f
MT
32598 case IB_CM_LAP_IDLE:
32599 break;
32600 case IB_CM_MRA_LAP_SENT:
32601- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32602+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32603 counter[CM_LAP_COUNTER]);
32604 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32605 goto unlock;
4c928ab7 32606@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
ae4e228f
MT
32607 cm_free_msg(msg);
32608 goto deref;
32609 case IB_CM_LAP_RCVD:
32610- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32611+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32612 counter[CM_LAP_COUNTER]);
32613 goto unlock;
32614 default:
4c928ab7 32615@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
ae4e228f
MT
32616 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
32617 if (cur_cm_id_priv) {
32618 spin_unlock_irq(&cm.lock);
32619- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32620+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32621 counter[CM_SIDR_REQ_COUNTER]);
32622 goto out; /* Duplicate message. */
32623 }
4c928ab7 32624@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
ae4e228f
MT
32625 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
32626 msg->retries = 1;
32627
32628- atomic_long_add(1 + msg->retries,
32629+ atomic_long_add_unchecked(1 + msg->retries,
32630 &port->counter_group[CM_XMIT].counter[attr_index]);
32631 if (msg->retries)
32632- atomic_long_add(msg->retries,
32633+ atomic_long_add_unchecked(msg->retries,
32634 &port->counter_group[CM_XMIT_RETRIES].
32635 counter[attr_index]);
32636
4c928ab7 32637@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
ae4e228f
MT
32638 }
32639
32640 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
32641- atomic_long_inc(&port->counter_group[CM_RECV].
32642+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
32643 counter[attr_id - CM_ATTR_ID_OFFSET]);
32644
32645 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
4c928ab7 32646@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
ae4e228f
MT
32647 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
32648
32649 return sprintf(buf, "%ld\n",
32650- atomic_long_read(&group->counter[cm_attr->index]));
32651+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
32652 }
32653
df50ba0c 32654 static const struct sysfs_ops cm_counter_ops = {
fe2de317 32655diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
4c928ab7 32656index 176c8f9..2627b62 100644
fe2de317
MT
32657--- a/drivers/infiniband/core/fmr_pool.c
32658+++ b/drivers/infiniband/core/fmr_pool.c
4c928ab7 32659@@ -98,8 +98,8 @@ struct ib_fmr_pool {
8308f9c9
MT
32660
32661 struct task_struct *thread;
32662
32663- atomic_t req_ser;
32664- atomic_t flush_ser;
32665+ atomic_unchecked_t req_ser;
32666+ atomic_unchecked_t flush_ser;
32667
32668 wait_queue_head_t force_wait;
32669 };
4c928ab7 32670@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
8308f9c9
MT
32671 struct ib_fmr_pool *pool = pool_ptr;
32672
32673 do {
32674- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
32675+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
32676 ib_fmr_batch_release(pool);
32677
32678- atomic_inc(&pool->flush_ser);
32679+ atomic_inc_unchecked(&pool->flush_ser);
32680 wake_up_interruptible(&pool->force_wait);
32681
32682 if (pool->flush_function)
4c928ab7 32683@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
8308f9c9
MT
32684 }
32685
32686 set_current_state(TASK_INTERRUPTIBLE);
32687- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
32688+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
32689 !kthread_should_stop())
32690 schedule();
32691 __set_current_state(TASK_RUNNING);
4c928ab7 32692@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
8308f9c9
MT
32693 pool->dirty_watermark = params->dirty_watermark;
32694 pool->dirty_len = 0;
32695 spin_lock_init(&pool->pool_lock);
32696- atomic_set(&pool->req_ser, 0);
32697- atomic_set(&pool->flush_ser, 0);
32698+ atomic_set_unchecked(&pool->req_ser, 0);
32699+ atomic_set_unchecked(&pool->flush_ser, 0);
32700 init_waitqueue_head(&pool->force_wait);
32701
32702 pool->thread = kthread_run(ib_fmr_cleanup_thread,
4c928ab7 32703@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
8308f9c9
MT
32704 }
32705 spin_unlock_irq(&pool->pool_lock);
32706
32707- serial = atomic_inc_return(&pool->req_ser);
32708+ serial = atomic_inc_return_unchecked(&pool->req_ser);
32709 wake_up_process(pool->thread);
32710
32711 if (wait_event_interruptible(pool->force_wait,
32712- atomic_read(&pool->flush_ser) - serial >= 0))
32713+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
32714 return -EINTR;
32715
32716 return 0;
4c928ab7 32717@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
8308f9c9
MT
32718 } else {
32719 list_add_tail(&fmr->list, &pool->dirty_list);
32720 if (++pool->dirty_len >= pool->dirty_watermark) {
32721- atomic_inc(&pool->req_ser);
32722+ atomic_inc_unchecked(&pool->req_ser);
32723 wake_up_process(pool->thread);
32724 }
32725 }
fe2de317
MT
32726diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
32727index 40c8353..946b0e4 100644
32728--- a/drivers/infiniband/hw/cxgb4/mem.c
32729+++ b/drivers/infiniband/hw/cxgb4/mem.c
32730@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
8308f9c9
MT
32731 int err;
32732 struct fw_ri_tpte tpt;
32733 u32 stag_idx;
32734- static atomic_t key;
32735+ static atomic_unchecked_t key;
32736
32737 if (c4iw_fatal_error(rdev))
32738 return -EIO;
fe2de317 32739@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
8308f9c9
MT
32740 &rdev->resource.tpt_fifo_lock);
32741 if (!stag_idx)
32742 return -ENOMEM;
32743- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
32744+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
32745 }
32746 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
32747 __func__, stag_state, type, pdid, stag_idx);
fe2de317 32748diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
5e856224 32749index a4de9d5..5fa20c3 100644
fe2de317
MT
32750--- a/drivers/infiniband/hw/ipath/ipath_fs.c
32751+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
4c928ab7
MT
32752@@ -126,6 +126,8 @@ static const struct file_operations atomic_counters_ops = {
32753 };
66a7e928 32754
4c928ab7
MT
32755 static ssize_t flash_read(struct file *file, char __user *buf,
32756+ size_t count, loff_t *ppos) __size_overflow(3);
32757+static ssize_t flash_read(struct file *file, char __user *buf,
32758 size_t count, loff_t *ppos)
32759 {
32760 struct ipath_devdata *dd;
5e856224
MT
32761@@ -177,6 +179,8 @@ bail:
32762 }
32763
32764 static ssize_t flash_write(struct file *file, const char __user *buf,
32765+ size_t count, loff_t *ppos) __size_overflow(3);
32766+static ssize_t flash_write(struct file *file, const char __user *buf,
32767 size_t count, loff_t *ppos)
32768 {
32769 struct ipath_devdata *dd;
fe2de317
MT
32770diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
32771index 79b3dbc..96e5fcc 100644
32772--- a/drivers/infiniband/hw/ipath/ipath_rc.c
32773+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
32774@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
66a7e928
MT
32775 struct ib_atomic_eth *ateth;
32776 struct ipath_ack_entry *e;
32777 u64 vaddr;
32778- atomic64_t *maddr;
32779+ atomic64_unchecked_t *maddr;
32780 u64 sdata;
32781 u32 rkey;
32782 u8 next;
fe2de317 32783@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
66a7e928
MT
32784 IB_ACCESS_REMOTE_ATOMIC)))
32785 goto nack_acc_unlck;
32786 /* Perform atomic OP and save result. */
32787- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32788+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32789 sdata = be64_to_cpu(ateth->swap_data);
32790 e = &qp->s_ack_queue[qp->r_head_ack_queue];
32791 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
32792- (u64) atomic64_add_return(sdata, maddr) - sdata :
32793+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32794 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32795 be64_to_cpu(ateth->compare_data),
32796 sdata);
fe2de317
MT
32797diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
32798index 1f95bba..9530f87 100644
32799--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
32800+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
32801@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
66a7e928
MT
32802 unsigned long flags;
32803 struct ib_wc wc;
32804 u64 sdata;
32805- atomic64_t *maddr;
32806+ atomic64_unchecked_t *maddr;
32807 enum ib_wc_status send_status;
32808
32809 /*
32810@@ -382,11 +382,11 @@ again:
32811 IB_ACCESS_REMOTE_ATOMIC)))
32812 goto acc_err;
32813 /* Perform atomic OP and save result. */
32814- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32815+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32816 sdata = wqe->wr.wr.atomic.compare_add;
32817 *(u64 *) sqp->s_sge.sge.vaddr =
32818 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
32819- (u64) atomic64_add_return(sdata, maddr) - sdata :
32820+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32821 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32822 sdata, wqe->wr.wr.atomic.swap);
32823 goto send_comp;
fe2de317 32824diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
5e856224 32825index 7140199..da60063 100644
fe2de317
MT
32826--- a/drivers/infiniband/hw/nes/nes.c
32827+++ b/drivers/infiniband/hw/nes/nes.c
32828@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
8308f9c9
MT
32829 LIST_HEAD(nes_adapter_list);
32830 static LIST_HEAD(nes_dev_list);
32831
32832-atomic_t qps_destroyed;
32833+atomic_unchecked_t qps_destroyed;
32834
32835 static unsigned int ee_flsh_adapter;
32836 static unsigned int sysfs_nonidx_addr;
4c928ab7 32837@@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
8308f9c9
MT
32838 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
32839 struct nes_adapter *nesadapter = nesdev->nesadapter;
32840
32841- atomic_inc(&qps_destroyed);
32842+ atomic_inc_unchecked(&qps_destroyed);
32843
32844 /* Free the control structures */
32845
fe2de317 32846diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
5e856224 32847index c438e46..ca30356 100644
fe2de317
MT
32848--- a/drivers/infiniband/hw/nes/nes.h
32849+++ b/drivers/infiniband/hw/nes/nes.h
4c928ab7 32850@@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
fe2de317
MT
32851 extern unsigned int wqm_quanta;
32852 extern struct list_head nes_adapter_list;
32853
32854-extern atomic_t cm_connects;
32855-extern atomic_t cm_accepts;
32856-extern atomic_t cm_disconnects;
32857-extern atomic_t cm_closes;
32858-extern atomic_t cm_connecteds;
32859-extern atomic_t cm_connect_reqs;
32860-extern atomic_t cm_rejects;
32861-extern atomic_t mod_qp_timouts;
32862-extern atomic_t qps_created;
32863-extern atomic_t qps_destroyed;
32864-extern atomic_t sw_qps_destroyed;
32865+extern atomic_unchecked_t cm_connects;
32866+extern atomic_unchecked_t cm_accepts;
32867+extern atomic_unchecked_t cm_disconnects;
32868+extern atomic_unchecked_t cm_closes;
32869+extern atomic_unchecked_t cm_connecteds;
32870+extern atomic_unchecked_t cm_connect_reqs;
32871+extern atomic_unchecked_t cm_rejects;
32872+extern atomic_unchecked_t mod_qp_timouts;
32873+extern atomic_unchecked_t qps_created;
32874+extern atomic_unchecked_t qps_destroyed;
32875+extern atomic_unchecked_t sw_qps_destroyed;
32876 extern u32 mh_detected;
32877 extern u32 mh_pauses_sent;
32878 extern u32 cm_packets_sent;
4c928ab7 32879@@ -197,16 +197,16 @@ extern u32 cm_packets_created;
fe2de317
MT
32880 extern u32 cm_packets_received;
32881 extern u32 cm_packets_dropped;
32882 extern u32 cm_packets_retrans;
32883-extern atomic_t cm_listens_created;
32884-extern atomic_t cm_listens_destroyed;
32885+extern atomic_unchecked_t cm_listens_created;
32886+extern atomic_unchecked_t cm_listens_destroyed;
32887 extern u32 cm_backlog_drops;
32888-extern atomic_t cm_loopbacks;
32889-extern atomic_t cm_nodes_created;
32890-extern atomic_t cm_nodes_destroyed;
32891-extern atomic_t cm_accel_dropped_pkts;
32892-extern atomic_t cm_resets_recvd;
4c928ab7
MT
32893-extern atomic_t pau_qps_created;
32894-extern atomic_t pau_qps_destroyed;
fe2de317
MT
32895+extern atomic_unchecked_t cm_loopbacks;
32896+extern atomic_unchecked_t cm_nodes_created;
32897+extern atomic_unchecked_t cm_nodes_destroyed;
32898+extern atomic_unchecked_t cm_accel_dropped_pkts;
32899+extern atomic_unchecked_t cm_resets_recvd;
4c928ab7
MT
32900+extern atomic_unchecked_t pau_qps_created;
32901+extern atomic_unchecked_t pau_qps_destroyed;
fe2de317
MT
32902
32903 extern u32 int_mod_timer_init;
32904 extern u32 int_mod_cq_depth_256;
32905diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
5e856224 32906index a4972ab..1bcfc31 100644
fe2de317
MT
32907--- a/drivers/infiniband/hw/nes/nes_cm.c
32908+++ b/drivers/infiniband/hw/nes/nes_cm.c
8308f9c9
MT
32909@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
32910 u32 cm_packets_retrans;
32911 u32 cm_packets_created;
32912 u32 cm_packets_received;
32913-atomic_t cm_listens_created;
32914-atomic_t cm_listens_destroyed;
32915+atomic_unchecked_t cm_listens_created;
32916+atomic_unchecked_t cm_listens_destroyed;
32917 u32 cm_backlog_drops;
32918-atomic_t cm_loopbacks;
32919-atomic_t cm_nodes_created;
32920-atomic_t cm_nodes_destroyed;
32921-atomic_t cm_accel_dropped_pkts;
32922-atomic_t cm_resets_recvd;
32923+atomic_unchecked_t cm_loopbacks;
32924+atomic_unchecked_t cm_nodes_created;
32925+atomic_unchecked_t cm_nodes_destroyed;
32926+atomic_unchecked_t cm_accel_dropped_pkts;
32927+atomic_unchecked_t cm_resets_recvd;
32928
4c928ab7
MT
32929 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
32930 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
32931@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
8308f9c9
MT
32932
32933 static struct nes_cm_core *g_cm_core;
32934
32935-atomic_t cm_connects;
32936-atomic_t cm_accepts;
32937-atomic_t cm_disconnects;
32938-atomic_t cm_closes;
32939-atomic_t cm_connecteds;
32940-atomic_t cm_connect_reqs;
32941-atomic_t cm_rejects;
32942+atomic_unchecked_t cm_connects;
32943+atomic_unchecked_t cm_accepts;
32944+atomic_unchecked_t cm_disconnects;
32945+atomic_unchecked_t cm_closes;
32946+atomic_unchecked_t cm_connecteds;
32947+atomic_unchecked_t cm_connect_reqs;
32948+atomic_unchecked_t cm_rejects;
32949
4c928ab7
MT
32950 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
32951 {
5e856224 32952@@ -1274,7 +1274,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
8308f9c9
MT
32953 kfree(listener);
32954 listener = NULL;
32955 ret = 0;
32956- atomic_inc(&cm_listens_destroyed);
32957+ atomic_inc_unchecked(&cm_listens_destroyed);
32958 } else {
32959 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
32960 }
4c928ab7 32961@@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
8308f9c9
MT
32962 cm_node->rem_mac);
32963
32964 add_hte_node(cm_core, cm_node);
32965- atomic_inc(&cm_nodes_created);
32966+ atomic_inc_unchecked(&cm_nodes_created);
32967
32968 return cm_node;
32969 }
4c928ab7 32970@@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
8308f9c9
MT
32971 }
32972
32973 atomic_dec(&cm_core->node_cnt);
32974- atomic_inc(&cm_nodes_destroyed);
32975+ atomic_inc_unchecked(&cm_nodes_destroyed);
32976 nesqp = cm_node->nesqp;
32977 if (nesqp) {
32978 nesqp->cm_node = NULL;
4c928ab7 32979@@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
8308f9c9
MT
32980
32981 static void drop_packet(struct sk_buff *skb)
32982 {
32983- atomic_inc(&cm_accel_dropped_pkts);
32984+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
32985 dev_kfree_skb_any(skb);
32986 }
32987
4c928ab7 32988@@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
8308f9c9
MT
32989 {
32990
32991 int reset = 0; /* whether to send reset in case of err.. */
32992- atomic_inc(&cm_resets_recvd);
32993+ atomic_inc_unchecked(&cm_resets_recvd);
32994 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
32995 " refcnt=%d\n", cm_node, cm_node->state,
32996 atomic_read(&cm_node->ref_count));
4c928ab7 32997@@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
8308f9c9
MT
32998 rem_ref_cm_node(cm_node->cm_core, cm_node);
32999 return NULL;
33000 }
33001- atomic_inc(&cm_loopbacks);
33002+ atomic_inc_unchecked(&cm_loopbacks);
33003 loopbackremotenode->loopbackpartner = cm_node;
33004 loopbackremotenode->tcp_cntxt.rcv_wscale =
33005 NES_CM_DEFAULT_RCV_WND_SCALE;
4c928ab7
MT
33006@@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
33007 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
33008 else {
33009 rem_ref_cm_node(cm_core, cm_node);
33010- atomic_inc(&cm_accel_dropped_pkts);
33011+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
33012 dev_kfree_skb_any(skb);
33013 }
8308f9c9 33014 break;
5e856224 33015@@ -2881,7 +2881,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
8308f9c9
MT
33016
33017 if ((cm_id) && (cm_id->event_handler)) {
33018 if (issue_disconn) {
33019- atomic_inc(&cm_disconnects);
33020+ atomic_inc_unchecked(&cm_disconnects);
33021 cm_event.event = IW_CM_EVENT_DISCONNECT;
33022 cm_event.status = disconn_status;
33023 cm_event.local_addr = cm_id->local_addr;
5e856224 33024@@ -2903,7 +2903,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
8308f9c9
MT
33025 }
33026
33027 if (issue_close) {
33028- atomic_inc(&cm_closes);
33029+ atomic_inc_unchecked(&cm_closes);
33030 nes_disconnect(nesqp, 1);
33031
33032 cm_id->provider_data = nesqp;
5e856224 33033@@ -3039,7 +3039,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
8308f9c9
MT
33034
33035 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
33036 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
33037- atomic_inc(&cm_accepts);
33038+ atomic_inc_unchecked(&cm_accepts);
33039
33040 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
33041 netdev_refcnt_read(nesvnic->netdev));
5e856224 33042@@ -3241,7 +3241,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
8308f9c9 33043 struct nes_cm_core *cm_core;
4c928ab7 33044 u8 *start_buff;
8308f9c9
MT
33045
33046- atomic_inc(&cm_rejects);
33047+ atomic_inc_unchecked(&cm_rejects);
4c928ab7 33048 cm_node = (struct nes_cm_node *)cm_id->provider_data;
8308f9c9
MT
33049 loopback = cm_node->loopbackpartner;
33050 cm_core = cm_node->cm_core;
5e856224 33051@@ -3301,7 +3301,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
4c928ab7
MT
33052 ntohl(cm_id->local_addr.sin_addr.s_addr),
33053 ntohs(cm_id->local_addr.sin_port));
8308f9c9
MT
33054
33055- atomic_inc(&cm_connects);
33056+ atomic_inc_unchecked(&cm_connects);
33057 nesqp->active_conn = 1;
33058
33059 /* cache the cm_id in the qp */
5e856224 33060@@ -3407,7 +3407,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
8308f9c9
MT
33061 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
33062 return err;
33063 }
33064- atomic_inc(&cm_listens_created);
33065+ atomic_inc_unchecked(&cm_listens_created);
33066 }
33067
33068 cm_id->add_ref(cm_id);
5e856224 33069@@ -3508,7 +3508,7 @@ static void cm_event_connected(struct nes_cm_event *event)
4c928ab7
MT
33070
33071 if (nesqp->destroyed)
8308f9c9 33072 return;
8308f9c9
MT
33073- atomic_inc(&cm_connecteds);
33074+ atomic_inc_unchecked(&cm_connecteds);
33075 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
4c928ab7
MT
33076 " local port 0x%04X. jiffies = %lu.\n",
33077 nesqp->hwqp.qp_id,
5e856224 33078@@ -3695,7 +3695,7 @@ static void cm_event_reset(struct nes_cm_event *event)
8308f9c9
MT
33079
33080 cm_id->add_ref(cm_id);
33081 ret = cm_id->event_handler(cm_id, &cm_event);
33082- atomic_inc(&cm_closes);
33083+ atomic_inc_unchecked(&cm_closes);
33084 cm_event.event = IW_CM_EVENT_CLOSE;
15a11c5b 33085 cm_event.status = 0;
8308f9c9 33086 cm_event.provider_data = cm_id->provider_data;
5e856224 33087@@ -3731,7 +3731,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
8308f9c9
MT
33088 return;
33089 cm_id = cm_node->cm_id;
33090
33091- atomic_inc(&cm_connect_reqs);
33092+ atomic_inc_unchecked(&cm_connect_reqs);
33093 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
4c928ab7 33094 cm_node, cm_id, jiffies);
8308f9c9 33095
5e856224 33096@@ -3771,7 +3771,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
8308f9c9
MT
33097 return;
33098 cm_id = cm_node->cm_id;
33099
33100- atomic_inc(&cm_connect_reqs);
33101+ atomic_inc_unchecked(&cm_connect_reqs);
33102 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
4c928ab7
MT
33103 cm_node, cm_id, jiffies);
33104
33105diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
5e856224 33106index 3ba7be3..c81f6ff 100644
4c928ab7
MT
33107--- a/drivers/infiniband/hw/nes/nes_mgt.c
33108+++ b/drivers/infiniband/hw/nes/nes_mgt.c
33109@@ -40,8 +40,8 @@
33110 #include "nes.h"
33111 #include "nes_mgt.h"
33112
33113-atomic_t pau_qps_created;
33114-atomic_t pau_qps_destroyed;
33115+atomic_unchecked_t pau_qps_created;
33116+atomic_unchecked_t pau_qps_destroyed;
33117
33118 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
33119 {
33120@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
33121 {
33122 struct sk_buff *skb;
33123 unsigned long flags;
33124- atomic_inc(&pau_qps_destroyed);
33125+ atomic_inc_unchecked(&pau_qps_destroyed);
33126
33127 /* Free packets that have not yet been forwarded */
33128 /* Lock is acquired by skb_dequeue when removing the skb */
33129@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
33130 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
33131 skb_queue_head_init(&nesqp->pau_list);
33132 spin_lock_init(&nesqp->pau_lock);
33133- atomic_inc(&pau_qps_created);
33134+ atomic_inc_unchecked(&pau_qps_created);
33135 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
33136 }
8308f9c9 33137
fe2de317 33138diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
5e856224 33139index f3a3ecf..57d311d 100644
fe2de317
MT
33140--- a/drivers/infiniband/hw/nes/nes_nic.c
33141+++ b/drivers/infiniband/hw/nes/nes_nic.c
4c928ab7 33142@@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
8308f9c9
MT
33143 target_stat_values[++index] = mh_detected;
33144 target_stat_values[++index] = mh_pauses_sent;
33145 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
33146- target_stat_values[++index] = atomic_read(&cm_connects);
33147- target_stat_values[++index] = atomic_read(&cm_accepts);
33148- target_stat_values[++index] = atomic_read(&cm_disconnects);
33149- target_stat_values[++index] = atomic_read(&cm_connecteds);
33150- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
33151- target_stat_values[++index] = atomic_read(&cm_rejects);
33152- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
33153- target_stat_values[++index] = atomic_read(&qps_created);
33154- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
33155- target_stat_values[++index] = atomic_read(&qps_destroyed);
33156- target_stat_values[++index] = atomic_read(&cm_closes);
33157+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
33158+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
33159+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
33160+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
33161+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
33162+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
33163+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
33164+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
33165+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
33166+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
33167+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
33168 target_stat_values[++index] = cm_packets_sent;
33169 target_stat_values[++index] = cm_packets_bounced;
33170 target_stat_values[++index] = cm_packets_created;
33171 target_stat_values[++index] = cm_packets_received;
33172 target_stat_values[++index] = cm_packets_dropped;
33173 target_stat_values[++index] = cm_packets_retrans;
33174- target_stat_values[++index] = atomic_read(&cm_listens_created);
33175- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
33176+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
33177+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
33178 target_stat_values[++index] = cm_backlog_drops;
33179- target_stat_values[++index] = atomic_read(&cm_loopbacks);
33180- target_stat_values[++index] = atomic_read(&cm_nodes_created);
33181- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
33182- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
33183- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
33184+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
33185+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
33186+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
33187+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
33188+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
33189 target_stat_values[++index] = nesadapter->free_4kpbl;
33190 target_stat_values[++index] = nesadapter->free_256pbl;
33191 target_stat_values[++index] = int_mod_timer_init;
4c928ab7
MT
33192 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
33193 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
33194 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
33195- target_stat_values[++index] = atomic_read(&pau_qps_created);
33196- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
33197+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
33198+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
33199 }
33200
33201 /**
fe2de317 33202diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
5e856224 33203index 0927b5c..ed67986 100644
fe2de317
MT
33204--- a/drivers/infiniband/hw/nes/nes_verbs.c
33205+++ b/drivers/infiniband/hw/nes/nes_verbs.c
8308f9c9
MT
33206@@ -46,9 +46,9 @@
33207
33208 #include <rdma/ib_umem.h>
33209
33210-atomic_t mod_qp_timouts;
33211-atomic_t qps_created;
33212-atomic_t sw_qps_destroyed;
33213+atomic_unchecked_t mod_qp_timouts;
33214+atomic_unchecked_t qps_created;
33215+atomic_unchecked_t sw_qps_destroyed;
33216
33217 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
33218
fe2de317 33219@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
8308f9c9
MT
33220 if (init_attr->create_flags)
33221 return ERR_PTR(-EINVAL);
33222
33223- atomic_inc(&qps_created);
33224+ atomic_inc_unchecked(&qps_created);
33225 switch (init_attr->qp_type) {
33226 case IB_QPT_RC:
33227 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
fe2de317 33228@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
8308f9c9 33229 struct iw_cm_event cm_event;
4c928ab7 33230 int ret = 0;
8308f9c9
MT
33231
33232- atomic_inc(&sw_qps_destroyed);
33233+ atomic_inc_unchecked(&sw_qps_destroyed);
33234 nesqp->destroyed = 1;
33235
33236 /* Blow away the connection if it exists. */
fe2de317 33237diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
4c928ab7 33238index b881bdc..c2e360c 100644
fe2de317
MT
33239--- a/drivers/infiniband/hw/qib/qib.h
33240+++ b/drivers/infiniband/hw/qib/qib.h
6892158b 33241@@ -51,6 +51,7 @@
57199397
MT
33242 #include <linux/completion.h>
33243 #include <linux/kref.h>
33244 #include <linux/sched.h>
33245+#include <linux/slab.h>
33246
33247 #include "qib_common.h"
33248 #include "qib_verbs.h"
4c928ab7 33249diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
5e856224 33250index 05e0f17..0275789 100644
4c928ab7
MT
33251--- a/drivers/infiniband/hw/qib/qib_fs.c
33252+++ b/drivers/infiniband/hw/qib/qib_fs.c
33253@@ -267,6 +267,8 @@ static const struct file_operations qsfp_ops[] = {
33254 };
33255
33256 static ssize_t flash_read(struct file *file, char __user *buf,
33257+ size_t count, loff_t *ppos) __size_overflow(3);
33258+static ssize_t flash_read(struct file *file, char __user *buf,
33259 size_t count, loff_t *ppos)
33260 {
33261 struct qib_devdata *dd;
5e856224
MT
33262@@ -318,6 +320,8 @@ bail:
33263 }
33264
33265 static ssize_t flash_write(struct file *file, const char __user *buf,
33266+ size_t count, loff_t *ppos) __size_overflow(3);
33267+static ssize_t flash_write(struct file *file, const char __user *buf,
33268 size_t count, loff_t *ppos)
33269 {
33270 struct qib_devdata *dd;
fe2de317
MT
33271diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
33272index c351aa4..e6967c2 100644
33273--- a/drivers/input/gameport/gameport.c
33274+++ b/drivers/input/gameport/gameport.c
8308f9c9
MT
33275@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
33276 */
33277 static void gameport_init_port(struct gameport *gameport)
33278 {
33279- static atomic_t gameport_no = ATOMIC_INIT(0);
33280+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
33281
33282 __module_get(THIS_MODULE);
33283
33284 mutex_init(&gameport->drv_mutex);
33285 device_initialize(&gameport->dev);
33286 dev_set_name(&gameport->dev, "gameport%lu",
33287- (unsigned long)atomic_inc_return(&gameport_no) - 1);
33288+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
33289 gameport->dev.bus = &gameport_bus;
33290 gameport->dev.release = gameport_release_port;
33291 if (gameport->parent)
fe2de317 33292diff --git a/drivers/input/input.c b/drivers/input/input.c
5e856224 33293index 1f78c95..3cddc6c 100644
fe2de317
MT
33294--- a/drivers/input/input.c
33295+++ b/drivers/input/input.c
33296@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
8308f9c9
MT
33297 */
33298 int input_register_device(struct input_dev *dev)
33299 {
33300- static atomic_t input_no = ATOMIC_INIT(0);
33301+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
33302 struct input_handler *handler;
33303 const char *path;
33304 int error;
fe2de317 33305@@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
66a7e928 33306 dev->setkeycode = input_default_setkeycode;
8308f9c9
MT
33307
33308 dev_set_name(&dev->dev, "input%ld",
33309- (unsigned long) atomic_inc_return(&input_no) - 1);
33310+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
33311
33312 error = device_add(&dev->dev);
33313 if (error)
fe2de317 33314diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
4c928ab7 33315index b8d8611..7a4a04b 100644
fe2de317
MT
33316--- a/drivers/input/joystick/sidewinder.c
33317+++ b/drivers/input/joystick/sidewinder.c
66a7e928
MT
33318@@ -30,6 +30,7 @@
33319 #include <linux/kernel.h>
33320 #include <linux/module.h>
33321 #include <linux/slab.h>
33322+#include <linux/sched.h>
33323 #include <linux/init.h>
33324 #include <linux/input.h>
33325 #include <linux/gameport.h>
fe2de317 33326diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
5e856224 33327index fd7a0d5..a4af10c 100644
fe2de317
MT
33328--- a/drivers/input/joystick/xpad.c
33329+++ b/drivers/input/joystick/xpad.c
33330@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
8308f9c9
MT
33331
33332 static int xpad_led_probe(struct usb_xpad *xpad)
33333 {
33334- static atomic_t led_seq = ATOMIC_INIT(0);
33335+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
33336 long led_no;
33337 struct xpad_led *led;
33338 struct led_classdev *led_cdev;
fe2de317 33339@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
8308f9c9
MT
33340 if (!led)
33341 return -ENOMEM;
33342
33343- led_no = (long)atomic_inc_return(&led_seq) - 1;
33344+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
33345
33346 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
33347 led->xpad = xpad;
fe2de317
MT
33348diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
33349index 0110b5a..d3ad144 100644
33350--- a/drivers/input/mousedev.c
33351+++ b/drivers/input/mousedev.c
33352@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
ae4e228f
MT
33353
33354 spin_unlock_irq(&client->packet_lock);
33355
33356- if (copy_to_user(buffer, data, count))
33357+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
33358 return -EFAULT;
33359
33360 return count;
fe2de317
MT
33361diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
33362index ba70058..571d25d 100644
33363--- a/drivers/input/serio/serio.c
33364+++ b/drivers/input/serio/serio.c
33365@@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
8308f9c9
MT
33366 */
33367 static void serio_init_port(struct serio *serio)
33368 {
33369- static atomic_t serio_no = ATOMIC_INIT(0);
33370+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
33371
33372 __module_get(THIS_MODULE);
33373
fe2de317 33374@@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
8308f9c9
MT
33375 mutex_init(&serio->drv_mutex);
33376 device_initialize(&serio->dev);
33377 dev_set_name(&serio->dev, "serio%ld",
33378- (long)atomic_inc_return(&serio_no) - 1);
33379+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
33380 serio->dev.bus = &serio_bus;
33381 serio->dev.release = serio_release_port;
33382 serio->dev.groups = serio_device_attr_groups;
fe2de317
MT
33383diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
33384index e44933d..9ba484a 100644
33385--- a/drivers/isdn/capi/capi.c
33386+++ b/drivers/isdn/capi/capi.c
15a11c5b 33387@@ -83,8 +83,8 @@ struct capiminor {
8308f9c9
MT
33388
33389 struct capi20_appl *ap;
33390 u32 ncci;
33391- atomic_t datahandle;
33392- atomic_t msgid;
33393+ atomic_unchecked_t datahandle;
33394+ atomic_unchecked_t msgid;
33395
33396 struct tty_port port;
33397 int ttyinstop;
fe2de317 33398@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
8308f9c9
MT
33399 capimsg_setu16(s, 2, mp->ap->applid);
33400 capimsg_setu8 (s, 4, CAPI_DATA_B3);
33401 capimsg_setu8 (s, 5, CAPI_RESP);
33402- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
33403+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
33404 capimsg_setu32(s, 8, mp->ncci);
33405 capimsg_setu16(s, 12, datahandle);
33406 }
fe2de317 33407@@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
8308f9c9
MT
33408 mp->outbytes -= len;
33409 spin_unlock_bh(&mp->outlock);
33410
33411- datahandle = atomic_inc_return(&mp->datahandle);
33412+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
33413 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
33414 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
33415 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
33416 capimsg_setu16(skb->data, 2, mp->ap->applid);
33417 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
33418 capimsg_setu8 (skb->data, 5, CAPI_REQ);
33419- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
33420+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
33421 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
33422 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
33423 capimsg_setu16(skb->data, 16, len); /* Data length */
fe2de317
MT
33424diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
33425index db621db..825ea1a 100644
33426--- a/drivers/isdn/gigaset/common.c
33427+++ b/drivers/isdn/gigaset/common.c
33428@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
58c5fc13
MT
33429 cs->commands_pending = 0;
33430 cs->cur_at_seq = 0;
33431 cs->gotfwver = -1;
33432- cs->open_count = 0;
c52201e0 33433+ local_set(&cs->open_count, 0);
58c5fc13
MT
33434 cs->dev = NULL;
33435 cs->tty = NULL;
33436 cs->tty_dev = NULL;
fe2de317
MT
33437diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
33438index 212efaf..f187c6b 100644
33439--- a/drivers/isdn/gigaset/gigaset.h
33440+++ b/drivers/isdn/gigaset/gigaset.h
c52201e0
MT
33441@@ -35,6 +35,7 @@
33442 #include <linux/tty_driver.h>
33443 #include <linux/list.h>
6e9df6a3 33444 #include <linux/atomic.h>
c52201e0
MT
33445+#include <asm/local.h>
33446
33447 #define GIG_VERSION {0, 5, 0, 0}
33448 #define GIG_COMPAT {0, 4, 0, 0}
33449@@ -433,7 +434,7 @@ struct cardstate {
58c5fc13
MT
33450 spinlock_t cmdlock;
33451 unsigned curlen, cmdbytes;
33452
33453- unsigned open_count;
c52201e0 33454+ local_t open_count;
58c5fc13
MT
33455 struct tty_struct *tty;
33456 struct tasklet_struct if_wake_tasklet;
33457 unsigned control_state;
fe2de317 33458diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
4c928ab7 33459index ee0a549..a7c9798 100644
fe2de317
MT
33460--- a/drivers/isdn/gigaset/interface.c
33461+++ b/drivers/isdn/gigaset/interface.c
4c928ab7 33462@@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
15a11c5b 33463 }
58c5fc13
MT
33464 tty->driver_data = cs;
33465
33466- ++cs->open_count;
33467-
33468- if (cs->open_count == 1) {
c52201e0 33469+ if (local_inc_return(&cs->open_count) == 1) {
58c5fc13
MT
33470 spin_lock_irqsave(&cs->lock, flags);
33471 cs->tty = tty;
33472 spin_unlock_irqrestore(&cs->lock, flags);
4c928ab7 33473@@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
33474
33475 if (!cs->connected)
33476 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
33477- else if (!cs->open_count)
c52201e0 33478+ else if (!local_read(&cs->open_count))
58c5fc13
MT
33479 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33480 else {
33481- if (!--cs->open_count) {
c52201e0 33482+ if (!local_dec_return(&cs->open_count)) {
58c5fc13
MT
33483 spin_lock_irqsave(&cs->lock, flags);
33484 cs->tty = NULL;
33485 spin_unlock_irqrestore(&cs->lock, flags);
4c928ab7 33486@@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
58c5fc13
MT
33487 if (!cs->connected) {
33488 gig_dbg(DEBUG_IF, "not connected");
33489 retval = -ENODEV;
33490- } else if (!cs->open_count)
c52201e0 33491+ } else if (!local_read(&cs->open_count))
58c5fc13
MT
33492 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33493 else {
33494 retval = 0;
4c928ab7 33495@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
58c5fc13 33496 retval = -ENODEV;
6892158b
MT
33497 goto done;
33498 }
33499- if (!cs->open_count) {
c52201e0 33500+ if (!local_read(&cs->open_count)) {
58c5fc13 33501 dev_warn(cs->dev, "%s: device not opened\n", __func__);
6892158b
MT
33502 retval = -ENODEV;
33503 goto done;
4c928ab7 33504@@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
58c5fc13
MT
33505 if (!cs->connected) {
33506 gig_dbg(DEBUG_IF, "not connected");
33507 retval = -ENODEV;
33508- } else if (!cs->open_count)
c52201e0 33509+ } else if (!local_read(&cs->open_count))
58c5fc13
MT
33510 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33511 else if (cs->mstate != MS_LOCKED) {
33512 dev_warn(cs->dev, "can't write to unlocked device\n");
4c928ab7 33513@@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
ae4e228f
MT
33514
33515 if (!cs->connected)
58c5fc13 33516 gig_dbg(DEBUG_IF, "not connected");
ae4e228f 33517- else if (!cs->open_count)
c52201e0 33518+ else if (!local_read(&cs->open_count))
58c5fc13 33519 dev_warn(cs->dev, "%s: device not opened\n", __func__);
ae4e228f 33520 else if (cs->mstate != MS_LOCKED)
58c5fc13 33521 dev_warn(cs->dev, "can't write to unlocked device\n");
4c928ab7 33522@@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
58c5fc13
MT
33523
33524 if (!cs->connected)
33525 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
33526- else if (!cs->open_count)
c52201e0 33527+ else if (!local_read(&cs->open_count))
58c5fc13 33528 dev_warn(cs->dev, "%s: device not opened\n", __func__);
ae4e228f 33529 else
df50ba0c 33530 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
4c928ab7 33531@@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
58c5fc13
MT
33532
33533 if (!cs->connected)
33534 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
33535- else if (!cs->open_count)
c52201e0 33536+ else if (!local_read(&cs->open_count))
58c5fc13 33537 dev_warn(cs->dev, "%s: device not opened\n", __func__);
ae4e228f 33538 else
df50ba0c 33539 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
4c928ab7 33540@@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
58c5fc13
MT
33541 goto out;
33542 }
33543
33544- if (!cs->open_count) {
c52201e0 33545+ if (!local_read(&cs->open_count)) {
58c5fc13
MT
33546 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33547 goto out;
33548 }
fe2de317
MT
33549diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
33550index 2a57da59..e7a12ed 100644
33551--- a/drivers/isdn/hardware/avm/b1.c
33552+++ b/drivers/isdn/hardware/avm/b1.c
33553@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
ae4e228f
MT
33554 }
33555 if (left) {
33556 if (t4file->user) {
33557- if (copy_from_user(buf, dp, left))
bc901d79 33558+ if (left > sizeof buf || copy_from_user(buf, dp, left))
ae4e228f
MT
33559 return -EFAULT;
33560 } else {
33561 memcpy(buf, dp, left);
fe2de317 33562@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
ae4e228f
MT
33563 }
33564 if (left) {
33565 if (config->user) {
33566- if (copy_from_user(buf, dp, left))
bc901d79 33567+ if (left > sizeof buf || copy_from_user(buf, dp, left))
ae4e228f
MT
33568 return -EFAULT;
33569 } else {
33570 memcpy(buf, dp, left);
fe2de317
MT
33571diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
33572index 85784a7..a19ca98 100644
33573--- a/drivers/isdn/hardware/eicon/divasync.h
33574+++ b/drivers/isdn/hardware/eicon/divasync.h
15a11c5b
MT
33575@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
33576 } diva_didd_add_adapter_t;
33577 typedef struct _diva_didd_remove_adapter {
33578 IDI_CALL p_request;
33579-} diva_didd_remove_adapter_t;
33580+} __no_const diva_didd_remove_adapter_t;
33581 typedef struct _diva_didd_read_adapter_array {
33582 void * buffer;
33583 dword length;
fe2de317
MT
33584diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
33585index a3bd163..8956575 100644
33586--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
33587+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
15a11c5b
MT
33588@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
33589 typedef struct _diva_os_idi_adapter_interface {
33590 diva_init_card_proc_t cleanup_adapter_proc;
33591 diva_cmd_card_proc_t cmd_proc;
33592-} diva_os_idi_adapter_interface_t;
33593+} __no_const diva_os_idi_adapter_interface_t;
33594
33595 typedef struct _diva_os_xdi_adapter {
33596 struct list_head link;
fe2de317
MT
33597diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
33598index 1f355bb..43f1fea 100644
33599--- a/drivers/isdn/icn/icn.c
33600+++ b/drivers/isdn/icn/icn.c
33601@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
ae4e228f
MT
33602 if (count > len)
33603 count = len;
33604 if (user) {
33605- if (copy_from_user(msg, buf, count))
bc901d79 33606+ if (count > sizeof msg || copy_from_user(msg, buf, count))
ae4e228f
MT
33607 return -EFAULT;
33608 } else
33609 memcpy(msg, buf, count);
fe2de317 33610diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
4c928ab7 33611index b5fdcb7..5b6c59f 100644
fe2de317
MT
33612--- a/drivers/lguest/core.c
33613+++ b/drivers/lguest/core.c
df50ba0c 33614@@ -92,9 +92,17 @@ static __init int map_switcher(void)
58c5fc13
MT
33615 * it's worked so far. The end address needs +1 because __get_vm_area
33616 * allocates an extra guard page, so we need space for that.
33617 */
33618+
33619+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
33620+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
33621+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
33622+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
33623+#else
33624 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
33625 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
33626 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
33627+#endif
33628+
33629 if (!switcher_vma) {
33630 err = -ENOMEM;
33631 printk("lguest: could not map switcher pages high\n");
bc901d79
MT
33632@@ -119,7 +127,7 @@ static __init int map_switcher(void)
33633 * Now the Switcher is mapped at the right address, we can't fail!
6e9df6a3 33634 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
bc901d79
MT
33635 */
33636- memcpy(switcher_vma->addr, start_switcher_text,
33637+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
33638 end_switcher_text - start_switcher_text);
33639
33640 printk(KERN_INFO "lguest: mapped switcher at %p\n",
4c928ab7
MT
33641diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
33642index ff4a0bc..f5fdd9c 100644
33643--- a/drivers/lguest/lguest_user.c
33644+++ b/drivers/lguest/lguest_user.c
33645@@ -198,6 +198,7 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
33646 * Once our Guest is initialized, the Launcher makes it run by reading
33647 * from /dev/lguest.
33648 */
33649+static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) __size_overflow(3);
33650 static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
33651 {
33652 struct lguest *lg = file->private_data;
fe2de317 33653diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
5e856224 33654index 3980903..ce25c5e 100644
fe2de317
MT
33655--- a/drivers/lguest/x86/core.c
33656+++ b/drivers/lguest/x86/core.c
bc901d79
MT
33657@@ -59,7 +59,7 @@ static struct {
33658 /* Offset from where switcher.S was compiled to where we've copied it */
33659 static unsigned long switcher_offset(void)
33660 {
33661- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
33662+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
33663 }
33664
33665 /* This cpu's struct lguest_pages. */
fe2de317 33666@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
bc901d79
MT
33667 * These copies are pretty cheap, so we do them unconditionally: */
33668 /* Save the current Host top-level page directory.
33669 */
33670+
33671+#ifdef CONFIG_PAX_PER_CPU_PGD
33672+ pages->state.host_cr3 = read_cr3();
33673+#else
33674 pages->state.host_cr3 = __pa(current->mm->pgd);
33675+#endif
33676+
33677 /*
33678 * Set up the Guest's page tables to see this CPU's pages (and no
33679 * other CPU's pages).
6e9df6a3 33680@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
bc901d79
MT
33681 * compiled-in switcher code and the high-mapped copy we just made.
33682 */
33683 for (i = 0; i < IDT_ENTRIES; i++)
33684- default_idt_entries[i] += switcher_offset();
33685+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
33686
33687 /*
33688 * Set up the Switcher's per-cpu areas.
6e9df6a3 33689@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
bc901d79
MT
33690 * it will be undisturbed when we switch. To change %cs and jump we
33691 * need this structure to feed to Intel's "lcall" instruction.
33692 */
33693- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
33694+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
33695 lguest_entry.segment = LGUEST_CS;
33696
33697 /*
fe2de317
MT
33698diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
33699index 40634b0..4f5855e 100644
33700--- a/drivers/lguest/x86/switcher_32.S
33701+++ b/drivers/lguest/x86/switcher_32.S
bc901d79
MT
33702@@ -87,6 +87,7 @@
33703 #include <asm/page.h>
33704 #include <asm/segment.h>
33705 #include <asm/lguest.h>
33706+#include <asm/processor-flags.h>
33707
33708 // We mark the start of the code to copy
33709 // It's placed in .text tho it's never run here
33710@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
33711 // Changes type when we load it: damn Intel!
33712 // For after we switch over our page tables
33713 // That entry will be read-only: we'd crash.
33714+
33715+#ifdef CONFIG_PAX_KERNEXEC
33716+ mov %cr0, %edx
33717+ xor $X86_CR0_WP, %edx
33718+ mov %edx, %cr0
33719+#endif
33720+
33721 movl $(GDT_ENTRY_TSS*8), %edx
33722 ltr %dx
33723
33724@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
33725 // Let's clear it again for our return.
33726 // The GDT descriptor of the Host
33727 // Points to the table after two "size" bytes
33728- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
33729+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
33730 // Clear "used" from type field (byte 5, bit 2)
33731- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
33732+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
33733+
33734+#ifdef CONFIG_PAX_KERNEXEC
33735+ mov %cr0, %eax
33736+ xor $X86_CR0_WP, %eax
33737+ mov %eax, %cr0
33738+#endif
33739
33740 // Once our page table's switched, the Guest is live!
33741 // The Host fades as we run this final step.
33742@@ -295,13 +309,12 @@ deliver_to_host:
33743 // I consulted gcc, and it gave
33744 // These instructions, which I gladly credit:
33745 leal (%edx,%ebx,8), %eax
33746- movzwl (%eax),%edx
33747- movl 4(%eax), %eax
33748- xorw %ax, %ax
33749- orl %eax, %edx
33750+ movl 4(%eax), %edx
33751+ movw (%eax), %dx
33752 // Now the address of the handler's in %edx
33753 // We call it now: its "iret" drops us home.
33754- jmp *%edx
33755+ ljmp $__KERNEL_CS, $1f
33756+1: jmp *%edx
33757
33758 // Every interrupt can come to us here
33759 // But we must truly tell each apart.
fe2de317
MT
33760diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
33761index 4daf9e5..b8d1d0f 100644
33762--- a/drivers/macintosh/macio_asic.c
33763+++ b/drivers/macintosh/macio_asic.c
33764@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
6e9df6a3
MT
33765 * MacIO is matched against any Apple ID, it's probe() function
33766 * will then decide wether it applies or not
33767 */
33768-static const struct pci_device_id __devinitdata pci_ids [] = { {
33769+static const struct pci_device_id __devinitconst pci_ids [] = { {
33770 .vendor = PCI_VENDOR_ID_APPLE,
33771 .device = PCI_ANY_ID,
33772 .subvendor = PCI_ANY_ID,
fe2de317 33773diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
4c928ab7 33774index 1ce84ed..0fdd40a 100644
fe2de317
MT
33775--- a/drivers/md/dm-ioctl.c
33776+++ b/drivers/md/dm-ioctl.c
4c928ab7 33777@@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
16454cff
MT
33778 cmd == DM_LIST_VERSIONS_CMD)
33779 return 0;
33780
33781- if ((cmd == DM_DEV_CREATE_CMD)) {
33782+ if (cmd == DM_DEV_CREATE_CMD) {
33783 if (!*param->name) {
33784 DMWARN("name not supplied when creating device");
33785 return -EINVAL;
5e856224
MT
33786diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
33787index 1f23e04..08d9a20 100644
33788--- a/drivers/md/dm-log-userspace-transfer.c
33789+++ b/drivers/md/dm-log-userspace-transfer.c
33790@@ -134,7 +134,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
33791 {
33792 struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
33793
33794- if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
33795+ if (!capable(CAP_SYS_ADMIN))
33796 return;
33797
33798 spin_lock(&receiving_list_lock);
fe2de317 33799diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
5e856224 33800index 9bfd057..5373ff3 100644
fe2de317
MT
33801--- a/drivers/md/dm-raid1.c
33802+++ b/drivers/md/dm-raid1.c
15a11c5b 33803@@ -40,7 +40,7 @@ enum dm_raid1_error {
8308f9c9
MT
33804
33805 struct mirror {
33806 struct mirror_set *ms;
33807- atomic_t error_count;
33808+ atomic_unchecked_t error_count;
33809 unsigned long error_type;
33810 struct dm_dev *dev;
33811 sector_t offset;
fe2de317 33812@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
8308f9c9
MT
33813 struct mirror *m;
33814
33815 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
33816- if (!atomic_read(&m->error_count))
33817+ if (!atomic_read_unchecked(&m->error_count))
33818 return m;
33819
33820 return NULL;
fe2de317 33821@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
8308f9c9
MT
33822 * simple way to tell if a device has encountered
33823 * errors.
33824 */
33825- atomic_inc(&m->error_count);
33826+ atomic_inc_unchecked(&m->error_count);
33827
33828 if (test_and_set_bit(error_type, &m->error_type))
33829 return;
fe2de317 33830@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
8308f9c9
MT
33831 struct mirror *m = get_default_mirror(ms);
33832
33833 do {
33834- if (likely(!atomic_read(&m->error_count)))
33835+ if (likely(!atomic_read_unchecked(&m->error_count)))
33836 return m;
33837
33838 if (m-- == ms->mirror)
15a11c5b 33839@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
8308f9c9
MT
33840 {
33841 struct mirror *default_mirror = get_default_mirror(m->ms);
33842
33843- return !atomic_read(&default_mirror->error_count);
33844+ return !atomic_read_unchecked(&default_mirror->error_count);
33845 }
33846
33847 static int mirror_available(struct mirror_set *ms, struct bio *bio)
fe2de317 33848@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
8308f9c9
MT
33849 */
33850 if (likely(region_in_sync(ms, region, 1)))
33851 m = choose_mirror(ms, bio->bi_sector);
33852- else if (m && atomic_read(&m->error_count))
33853+ else if (m && atomic_read_unchecked(&m->error_count))
33854 m = NULL;
33855
33856 if (likely(m))
5e856224
MT
33857@@ -848,6 +848,10 @@ static void do_mirror(struct work_struct *work)
33858 static struct mirror_set *alloc_context(unsigned int nr_mirrors,
33859 uint32_t region_size,
33860 struct dm_target *ti,
33861+ struct dm_dirty_log *dl) __size_overflow(1);
33862+static struct mirror_set *alloc_context(unsigned int nr_mirrors,
33863+ uint32_t region_size,
33864+ struct dm_target *ti,
33865 struct dm_dirty_log *dl)
33866 {
33867 size_t len;
33868@@ -937,7 +941,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
8308f9c9
MT
33869 }
33870
33871 ms->mirror[mirror].ms = ms;
33872- atomic_set(&(ms->mirror[mirror].error_count), 0);
33873+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
33874 ms->mirror[mirror].error_type = 0;
33875 ms->mirror[mirror].offset = offset;
33876
5e856224 33877@@ -1347,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
8308f9c9
MT
33878 */
33879 static char device_status_char(struct mirror *m)
33880 {
33881- if (!atomic_read(&(m->error_count)))
33882+ if (!atomic_read_unchecked(&(m->error_count)))
33883 return 'A';
33884
33885 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
fe2de317 33886diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
5e856224 33887index 3d80cf0..7d98e1a 100644
fe2de317
MT
33888--- a/drivers/md/dm-stripe.c
33889+++ b/drivers/md/dm-stripe.c
8308f9c9
MT
33890@@ -20,7 +20,7 @@ struct stripe {
33891 struct dm_dev *dev;
33892 sector_t physical_start;
33893
33894- atomic_t error_count;
33895+ atomic_unchecked_t error_count;
33896 };
33897
33898 struct stripe_c {
5e856224
MT
33899@@ -55,6 +55,7 @@ static void trigger_event(struct work_struct *work)
33900 dm_table_event(sc->ti->table);
33901 }
33902
33903+static inline struct stripe_c *alloc_context(unsigned int stripes) __size_overflow(1);
33904 static inline struct stripe_c *alloc_context(unsigned int stripes)
33905 {
33906 size_t len;
33907@@ -192,7 +193,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
8308f9c9
MT
33908 kfree(sc);
33909 return r;
33910 }
33911- atomic_set(&(sc->stripe[i].error_count), 0);
33912+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
33913 }
33914
33915 ti->private = sc;
5e856224 33916@@ -314,7 +315,7 @@ static int stripe_status(struct dm_target *ti,
8308f9c9
MT
33917 DMEMIT("%d ", sc->stripes);
33918 for (i = 0; i < sc->stripes; i++) {
33919 DMEMIT("%s ", sc->stripe[i].dev->name);
33920- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
33921+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
33922 'D' : 'A';
33923 }
33924 buffer[i] = '\0';
5e856224 33925@@ -361,8 +362,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
8308f9c9
MT
33926 */
33927 for (i = 0; i < sc->stripes; i++)
33928 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
33929- atomic_inc(&(sc->stripe[i].error_count));
33930- if (atomic_read(&(sc->stripe[i].error_count)) <
33931+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
33932+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
33933 DM_IO_ERROR_THRESHOLD)
33934 schedule_work(&sc->trigger_event);
33935 }
fe2de317 33936diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
5e856224 33937index 63cc542..8d45caf3 100644
fe2de317
MT
33938--- a/drivers/md/dm-table.c
33939+++ b/drivers/md/dm-table.c
4c928ab7 33940@@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
58c5fc13
MT
33941 if (!dev_size)
33942 return 0;
33943
33944- if ((start >= dev_size) || (start + len > dev_size)) {
33945+ if ((start >= dev_size) || (len > dev_size - start)) {
33946 DMWARN("%s: %s too small for target: "
33947 "start=%llu, len=%llu, dev_size=%llu",
33948 dm_device_name(ti->table->md), bdevname(bdev, b),
4c928ab7
MT
33949diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
33950index 237571a..fb6d19b 100644
33951--- a/drivers/md/dm-thin-metadata.c
33952+++ b/drivers/md/dm-thin-metadata.c
33953@@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33954
33955 pmd->info.tm = tm;
33956 pmd->info.levels = 2;
33957- pmd->info.value_type.context = pmd->data_sm;
33958+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33959 pmd->info.value_type.size = sizeof(__le64);
33960 pmd->info.value_type.inc = data_block_inc;
33961 pmd->info.value_type.dec = data_block_dec;
33962@@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33963
33964 pmd->bl_info.tm = tm;
33965 pmd->bl_info.levels = 1;
33966- pmd->bl_info.value_type.context = pmd->data_sm;
33967+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33968 pmd->bl_info.value_type.size = sizeof(__le64);
33969 pmd->bl_info.value_type.inc = data_block_inc;
33970 pmd->bl_info.value_type.dec = data_block_dec;
fe2de317 33971diff --git a/drivers/md/dm.c b/drivers/md/dm.c
5e856224 33972index b89c548..2af3ce4 100644
fe2de317
MT
33973--- a/drivers/md/dm.c
33974+++ b/drivers/md/dm.c
5e856224 33975@@ -176,9 +176,9 @@ struct mapped_device {
fe2de317
MT
33976 /*
33977 * Event handling.
33978 */
33979- atomic_t event_nr;
33980+ atomic_unchecked_t event_nr;
33981 wait_queue_head_t eventq;
33982- atomic_t uevent_seq;
33983+ atomic_unchecked_t uevent_seq;
33984 struct list_head uevent_list;
33985 spinlock_t uevent_lock; /* Protect access to uevent_list */
33986
5e856224 33987@@ -1844,8 +1844,8 @@ static struct mapped_device *alloc_dev(int minor)
fe2de317
MT
33988 rwlock_init(&md->map_lock);
33989 atomic_set(&md->holders, 1);
33990 atomic_set(&md->open_count, 0);
33991- atomic_set(&md->event_nr, 0);
33992- atomic_set(&md->uevent_seq, 0);
33993+ atomic_set_unchecked(&md->event_nr, 0);
33994+ atomic_set_unchecked(&md->uevent_seq, 0);
33995 INIT_LIST_HEAD(&md->uevent_list);
33996 spin_lock_init(&md->uevent_lock);
33997
5e856224 33998@@ -1979,7 +1979,7 @@ static void event_callback(void *context)
fe2de317
MT
33999
34000 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
34001
34002- atomic_inc(&md->event_nr);
34003+ atomic_inc_unchecked(&md->event_nr);
34004 wake_up(&md->eventq);
34005 }
34006
5e856224 34007@@ -2621,18 +2621,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
fe2de317
MT
34008
34009 uint32_t dm_next_uevent_seq(struct mapped_device *md)
34010 {
34011- return atomic_add_return(1, &md->uevent_seq);
34012+ return atomic_add_return_unchecked(1, &md->uevent_seq);
34013 }
34014
34015 uint32_t dm_get_event_nr(struct mapped_device *md)
34016 {
34017- return atomic_read(&md->event_nr);
34018+ return atomic_read_unchecked(&md->event_nr);
34019 }
34020
34021 int dm_wait_event(struct mapped_device *md, int event_nr)
34022 {
34023 return wait_event_interruptible(md->eventq,
34024- (event_nr != atomic_read(&md->event_nr)));
34025+ (event_nr != atomic_read_unchecked(&md->event_nr)));
34026 }
34027
34028 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
34029diff --git a/drivers/md/md.c b/drivers/md/md.c
5e856224 34030index 58027d8..d9cddcd 100644
fe2de317
MT
34031--- a/drivers/md/md.c
34032+++ b/drivers/md/md.c
5e856224 34033@@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
8308f9c9
MT
34034 * start build, activate spare
34035 */
34036 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
34037-static atomic_t md_event_count;
34038+static atomic_unchecked_t md_event_count;
4c928ab7 34039 void md_new_event(struct mddev *mddev)
8308f9c9
MT
34040 {
34041- atomic_inc(&md_event_count);
34042+ atomic_inc_unchecked(&md_event_count);
34043 wake_up(&md_event_waiters);
34044 }
34045 EXPORT_SYMBOL_GPL(md_new_event);
5e856224 34046@@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
8308f9c9 34047 */
4c928ab7 34048 static void md_new_event_inintr(struct mddev *mddev)
8308f9c9
MT
34049 {
34050- atomic_inc(&md_event_count);
34051+ atomic_inc_unchecked(&md_event_count);
34052 wake_up(&md_event_waiters);
34053 }
34054
5e856224 34055@@ -1524,7 +1524,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
8308f9c9
MT
34056
34057 rdev->preferred_minor = 0xffff;
34058 rdev->data_offset = le64_to_cpu(sb->data_offset);
34059- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
34060+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
34061
34062 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
34063 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
5e856224 34064@@ -1743,7 +1743,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
8308f9c9
MT
34065 else
34066 sb->resync_offset = cpu_to_le64(0);
34067
34068- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
34069+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
34070
34071 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
34072 sb->size = cpu_to_le64(mddev->dev_sectors);
5e856224 34073@@ -2689,7 +2689,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
8308f9c9 34074 static ssize_t
4c928ab7 34075 errors_show(struct md_rdev *rdev, char *page)
8308f9c9
MT
34076 {
34077- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
34078+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
34079 }
34080
34081 static ssize_t
5e856224 34082@@ -2698,7 +2698,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
8308f9c9
MT
34083 char *e;
34084 unsigned long n = simple_strtoul(buf, &e, 10);
34085 if (*buf && (*e == 0 || *e == '\n')) {
34086- atomic_set(&rdev->corrected_errors, n);
34087+ atomic_set_unchecked(&rdev->corrected_errors, n);
34088 return len;
34089 }
34090 return -EINVAL;
5e856224 34091@@ -3084,8 +3084,8 @@ int md_rdev_init(struct md_rdev *rdev)
6e9df6a3
MT
34092 rdev->sb_loaded = 0;
34093 rdev->bb_page = NULL;
8308f9c9
MT
34094 atomic_set(&rdev->nr_pending, 0);
34095- atomic_set(&rdev->read_errors, 0);
34096- atomic_set(&rdev->corrected_errors, 0);
34097+ atomic_set_unchecked(&rdev->read_errors, 0);
34098+ atomic_set_unchecked(&rdev->corrected_errors, 0);
34099
34100 INIT_LIST_HEAD(&rdev->same_set);
34101 init_waitqueue_head(&rdev->blocked_wait);
5e856224 34102@@ -6736,7 +6736,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
8308f9c9
MT
34103
34104 spin_unlock(&pers_lock);
34105 seq_printf(seq, "\n");
6e9df6a3
MT
34106- seq->poll_event = atomic_read(&md_event_count);
34107+ seq->poll_event = atomic_read_unchecked(&md_event_count);
8308f9c9
MT
34108 return 0;
34109 }
34110 if (v == (void*)2) {
5e856224 34111@@ -6828,7 +6828,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
58c5fc13
MT
34112 chunk_kb ? "KB" : "B");
34113 if (bitmap->file) {
34114 seq_printf(seq, ", file: ");
34115- seq_path(seq, &bitmap->file->f_path, " \t\n");
34116+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
34117 }
34118
34119 seq_printf(seq, "\n");
5e856224 34120@@ -6859,7 +6859,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
6e9df6a3
MT
34121 return error;
34122
34123 seq = file->private_data;
34124- seq->poll_event = atomic_read(&md_event_count);
34125+ seq->poll_event = atomic_read_unchecked(&md_event_count);
8308f9c9
MT
34126 return error;
34127 }
6e9df6a3 34128
5e856224 34129@@ -6873,7 +6873,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
8308f9c9
MT
34130 /* always allow read */
34131 mask = POLLIN | POLLRDNORM;
34132
6e9df6a3
MT
34133- if (seq->poll_event != atomic_read(&md_event_count))
34134+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
8308f9c9
MT
34135 mask |= POLLERR | POLLPRI;
34136 return mask;
34137 }
5e856224 34138@@ -6917,7 +6917,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
58c5fc13
MT
34139 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
34140 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
34141 (int)part_stat_read(&disk->part0, sectors[1]) -
34142- atomic_read(&disk->sync_io);
34143+ atomic_read_unchecked(&disk->sync_io);
34144 /* sync IO will cause sync_io to increase before the disk_stats
34145 * as sync_io is counted when a request starts, and
34146 * disk_stats is counted when it completes.
fe2de317 34147diff --git a/drivers/md/md.h b/drivers/md/md.h
5e856224 34148index 44c63df..b795d1a 100644
fe2de317
MT
34149--- a/drivers/md/md.h
34150+++ b/drivers/md/md.h
5e856224 34151@@ -93,13 +93,13 @@ struct md_rdev {
8308f9c9
MT
34152 * only maintained for arrays that
34153 * support hot removal
34154 */
34155- atomic_t read_errors; /* number of consecutive read errors that
34156+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
34157 * we have tried to ignore.
34158 */
34159 struct timespec last_read_error; /* monotonic time since our
34160 * last read error
34161 */
34162- atomic_t corrected_errors; /* number of corrected read errors,
34163+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
34164 * for reporting to userspace and storing
34165 * in superblock.
34166 */
5e856224 34167@@ -421,7 +421,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
58c5fc13
MT
34168
34169 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
34170 {
34171- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
34172+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
34173 }
34174
4c928ab7
MT
34175 struct md_personality
34176diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
34177index 50ed53b..4f29d7d 100644
34178--- a/drivers/md/persistent-data/dm-space-map-checker.c
34179+++ b/drivers/md/persistent-data/dm-space-map-checker.c
34180@@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
34181 /*----------------------------------------------------------------*/
34182
34183 struct sm_checker {
34184- struct dm_space_map sm;
34185+ dm_space_map_no_const sm;
34186
34187 struct count_array old_counts;
34188 struct count_array counts;
34189diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
34190index fc469ba..2d91555 100644
34191--- a/drivers/md/persistent-data/dm-space-map-disk.c
34192+++ b/drivers/md/persistent-data/dm-space-map-disk.c
34193@@ -23,7 +23,7 @@
34194 * Space map interface.
34195 */
34196 struct sm_disk {
34197- struct dm_space_map sm;
34198+ dm_space_map_no_const sm;
34199
34200 struct ll_disk ll;
34201 struct ll_disk old_ll;
34202diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
34203index e89ae5e..062e4c2 100644
34204--- a/drivers/md/persistent-data/dm-space-map-metadata.c
34205+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
34206@@ -43,7 +43,7 @@ struct block_op {
34207 };
34208
34209 struct sm_metadata {
34210- struct dm_space_map sm;
34211+ dm_space_map_no_const sm;
34212
34213 struct ll_disk ll;
34214 struct ll_disk old_ll;
34215diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
34216index 1cbfc6b..56e1dbb 100644
34217--- a/drivers/md/persistent-data/dm-space-map.h
34218+++ b/drivers/md/persistent-data/dm-space-map.h
34219@@ -60,6 +60,7 @@ struct dm_space_map {
34220 int (*root_size)(struct dm_space_map *sm, size_t *result);
34221 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
34222 };
34223+typedef struct dm_space_map __no_const dm_space_map_no_const;
34224
34225 /*----------------------------------------------------------------*/
34226
fe2de317 34227diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
5e856224 34228index edc735a..e9b97f1 100644
fe2de317
MT
34229--- a/drivers/md/raid1.c
34230+++ b/drivers/md/raid1.c
5e856224 34231@@ -1645,7 +1645,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
fe2de317
MT
34232 if (r1_sync_page_io(rdev, sect, s,
34233 bio->bi_io_vec[idx].bv_page,
34234 READ) != 0)
34235- atomic_add(s, &rdev->corrected_errors);
34236+ atomic_add_unchecked(s, &rdev->corrected_errors);
34237 }
34238 sectors -= s;
34239 sect += s;
5e856224 34240@@ -1859,7 +1859,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
fe2de317
MT
34241 test_bit(In_sync, &rdev->flags)) {
34242 if (r1_sync_page_io(rdev, sect, s,
34243 conf->tmppage, READ)) {
34244- atomic_add(s, &rdev->corrected_errors);
34245+ atomic_add_unchecked(s, &rdev->corrected_errors);
34246 printk(KERN_INFO
34247 "md/raid1:%s: read error corrected "
34248 "(%d sectors at %llu on %s)\n",
34249diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
5e856224 34250index 1898389..a3aa617 100644
fe2de317
MT
34251--- a/drivers/md/raid10.c
34252+++ b/drivers/md/raid10.c
5e856224 34253@@ -1636,7 +1636,7 @@ static void end_sync_read(struct bio *bio, int error)
6e9df6a3
MT
34254 /* The write handler will notice the lack of
34255 * R10BIO_Uptodate and record any errors etc
34256 */
8308f9c9
MT
34257- atomic_add(r10_bio->sectors,
34258+ atomic_add_unchecked(r10_bio->sectors,
34259 &conf->mirrors[d].rdev->corrected_errors);
6e9df6a3
MT
34260
34261 /* for reconstruct, we always reschedule after a read.
5e856224 34262@@ -1987,7 +1987,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
8308f9c9
MT
34263 {
34264 struct timespec cur_time_mon;
34265 unsigned long hours_since_last;
34266- unsigned int read_errors = atomic_read(&rdev->read_errors);
34267+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
34268
34269 ktime_get_ts(&cur_time_mon);
34270
5e856224 34271@@ -2009,9 +2009,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
8308f9c9
MT
34272 * overflowing the shift of read_errors by hours_since_last.
34273 */
34274 if (hours_since_last >= 8 * sizeof(read_errors))
34275- atomic_set(&rdev->read_errors, 0);
34276+ atomic_set_unchecked(&rdev->read_errors, 0);
34277 else
34278- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
34279+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
34280 }
34281
4c928ab7 34282 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
5e856224 34283@@ -2065,8 +2065,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
15a11c5b 34284 return;
8308f9c9 34285
15a11c5b
MT
34286 check_decay_read_errors(mddev, rdev);
34287- atomic_inc(&rdev->read_errors);
34288- if (atomic_read(&rdev->read_errors) > max_read_errors) {
34289+ atomic_inc_unchecked(&rdev->read_errors);
34290+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
34291 char b[BDEVNAME_SIZE];
34292 bdevname(rdev->bdev, b);
34293
5e856224 34294@@ -2074,7 +2074,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
15a11c5b
MT
34295 "md/raid10:%s: %s: Raid device exceeded "
34296 "read_error threshold [cur %d:max %d]\n",
34297 mdname(mddev), b,
34298- atomic_read(&rdev->read_errors), max_read_errors);
34299+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
34300 printk(KERN_NOTICE
34301 "md/raid10:%s: %s: Failing raid device\n",
34302 mdname(mddev), b);
5e856224 34303@@ -2223,7 +2223,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
6e9df6a3
MT
34304 (unsigned long long)(
34305 sect + rdev->data_offset),
34306 bdevname(rdev->bdev, b));
8308f9c9
MT
34307- atomic_add(s, &rdev->corrected_errors);
34308+ atomic_add_unchecked(s, &rdev->corrected_errors);
6e9df6a3
MT
34309 }
34310
34311 rdev_dec_pending(rdev, mddev);
fe2de317 34312diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
5e856224 34313index d1162e5..c7cd902 100644
fe2de317
MT
34314--- a/drivers/md/raid5.c
34315+++ b/drivers/md/raid5.c
5e856224 34316@@ -1687,18 +1687,18 @@ static void raid5_end_read_request(struct bio * bi, int error)
6e9df6a3
MT
34317 (unsigned long long)(sh->sector
34318 + rdev->data_offset),
34319 bdevname(rdev->bdev, b));
34320- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
34321+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
8308f9c9
MT
34322 clear_bit(R5_ReadError, &sh->dev[i].flags);
34323 clear_bit(R5_ReWrite, &sh->dev[i].flags);
34324 }
5e856224
MT
34325- if (atomic_read(&rdev->read_errors))
34326- atomic_set(&rdev->read_errors, 0);
34327+ if (atomic_read_unchecked(&rdev->read_errors))
34328+ atomic_set_unchecked(&rdev->read_errors, 0);
8308f9c9 34329 } else {
5e856224 34330 const char *bdn = bdevname(rdev->bdev, b);
8308f9c9 34331 int retry = 0;
8308f9c9
MT
34332
34333 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
34334- atomic_inc(&rdev->read_errors);
34335+ atomic_inc_unchecked(&rdev->read_errors);
5e856224 34336 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
6e9df6a3
MT
34337 printk_ratelimited(
34338 KERN_WARNING
5e856224 34339@@ -1727,7 +1727,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
6e9df6a3
MT
34340 (unsigned long long)(sh->sector
34341 + rdev->data_offset),
34342 bdn);
8308f9c9
MT
34343- else if (atomic_read(&rdev->read_errors)
34344+ else if (atomic_read_unchecked(&rdev->read_errors)
34345 > conf->max_nr_stripes)
34346 printk(KERN_WARNING
34347 "md/raid:%s: Too many read errors, failing device %s.\n",
fe2de317 34348diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
5e856224 34349index ce4f858..7bcfb46 100644
fe2de317
MT
34350--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
34351+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
4c928ab7 34352@@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
6e9df6a3
MT
34353 .subvendor = _subvend, .subdevice = _subdev, \
34354 .driver_data = (unsigned long)&_driverdata }
34355
34356-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
34357+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
34358 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
34359 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
34360 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
fe2de317
MT
34361diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
34362index a7d876f..8c21b61 100644
34363--- a/drivers/media/dvb/dvb-core/dvb_demux.h
34364+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
6e9df6a3 34365@@ -73,7 +73,7 @@ struct dvb_demux_feed {
15a11c5b
MT
34366 union {
34367 dmx_ts_cb ts;
34368 dmx_section_cb sec;
34369- } cb;
34370+ } __no_const cb;
34371
34372 struct dvb_demux *demux;
34373 void *priv;
fe2de317 34374diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
5e856224 34375index 00a6732..70a682e 100644
fe2de317
MT
34376--- a/drivers/media/dvb/dvb-core/dvbdev.c
34377+++ b/drivers/media/dvb/dvb-core/dvbdev.c
34378@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
ae4e228f
MT
34379 const struct dvb_device *template, void *priv, int type)
34380 {
34381 struct dvb_device *dvbdev;
16454cff 34382- struct file_operations *dvbdevfops;
15a11c5b 34383+ file_operations_no_const *dvbdevfops;
ae4e228f
MT
34384 struct device *clsdev;
34385 int minor;
16454cff 34386 int id;
fe2de317 34387diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
5e856224 34388index 3940bb0..fb3952a 100644
fe2de317
MT
34389--- a/drivers/media/dvb/dvb-usb/cxusb.c
34390+++ b/drivers/media/dvb/dvb-usb/cxusb.c
5e856224
MT
34391@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
34392
15a11c5b 34393 struct dib0700_adapter_state {
5e856224 34394 int (*set_param_save) (struct dvb_frontend *);
15a11c5b
MT
34395-};
34396+} __no_const;
34397
5e856224
MT
34398 static int dib7070_set_param_override(struct dvb_frontend *fe)
34399 {
fe2de317 34400diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
5e856224 34401index 451c5a7..649f711 100644
fe2de317
MT
34402--- a/drivers/media/dvb/dvb-usb/dw2102.c
34403+++ b/drivers/media/dvb/dvb-usb/dw2102.c
15a11c5b
MT
34404@@ -95,7 +95,7 @@ struct su3000_state {
34405
34406 struct s6x0_state {
34407 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
34408-};
34409+} __no_const;
34410
34411 /* debug */
34412 static int dvb_usb_dw2102_debug;
fe2de317 34413diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
4c928ab7 34414index 404f63a..4796533 100644
fe2de317
MT
34415--- a/drivers/media/dvb/frontends/dib3000.h
34416+++ b/drivers/media/dvb/frontends/dib3000.h
6e9df6a3
MT
34417@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
34418 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
15a11c5b
MT
34419 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
34420 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
6e9df6a3
MT
34421-};
34422+} __no_const;
15a11c5b
MT
34423
34424 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
34425 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
fe2de317 34426diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
5e856224 34427index 8418c02..8555013 100644
fe2de317
MT
34428--- a/drivers/media/dvb/ngene/ngene-cards.c
34429+++ b/drivers/media/dvb/ngene/ngene-cards.c
34430@@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
6e9df6a3
MT
34431
34432 /****************************************************************************/
34433
34434-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
34435+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
34436 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
34437 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
34438 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
fe2de317
MT
34439diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
34440index 16a089f..ab1667d 100644
34441--- a/drivers/media/radio/radio-cadet.c
34442+++ b/drivers/media/radio/radio-cadet.c
34443@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
6e9df6a3
MT
34444 unsigned char readbuf[RDS_BUFFER];
34445 int i = 0;
34446
34447+ if (count > RDS_BUFFER)
34448+ return -EFAULT;
34449 mutex_lock(&dev->lock);
34450 if (dev->rdsstat == 0) {
34451 dev->rdsstat = 1;
fe2de317
MT
34452diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
34453index 9cde353..8c6a1c3 100644
34454--- a/drivers/media/video/au0828/au0828.h
34455+++ b/drivers/media/video/au0828/au0828.h
6e9df6a3
MT
34456@@ -191,7 +191,7 @@ struct au0828_dev {
34457
34458 /* I2C */
34459 struct i2c_adapter i2c_adap;
34460- struct i2c_algorithm i2c_algo;
34461+ i2c_algorithm_no_const i2c_algo;
34462 struct i2c_client i2c_client;
34463 u32 i2c_rc;
34464
5e856224
MT
34465diff --git a/drivers/media/video/cpia2/cpia2_core.c b/drivers/media/video/cpia2/cpia2_core.c
34466index ee91e295..04ad048 100644
34467--- a/drivers/media/video/cpia2/cpia2_core.c
34468+++ b/drivers/media/video/cpia2/cpia2_core.c
34469@@ -86,6 +86,7 @@ static inline unsigned long kvirt_to_pa(unsigned long adr)
34470 return ret;
34471 }
34472
34473+static void *rvmalloc(unsigned long size) __size_overflow(1);
34474 static void *rvmalloc(unsigned long size)
34475 {
34476 void *mem;
34477diff --git a/drivers/media/video/cx18/cx18-alsa-pcm.c b/drivers/media/video/cx18/cx18-alsa-pcm.c
34478index 82d195b..181103c 100644
34479--- a/drivers/media/video/cx18/cx18-alsa-pcm.c
34480+++ b/drivers/media/video/cx18/cx18-alsa-pcm.c
34481@@ -229,6 +229,8 @@ static int snd_cx18_pcm_ioctl(struct snd_pcm_substream *substream,
34482
34483
34484 static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
34485+ size_t size) __size_overflow(2);
34486+static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
34487 size_t size)
34488 {
34489 struct snd_pcm_runtime *runtime = subs->runtime;
34490diff --git a/drivers/media/video/cx231xx/cx231xx-audio.c b/drivers/media/video/cx231xx/cx231xx-audio.c
34491index a2c2b7d..8f1bec7 100644
34492--- a/drivers/media/video/cx231xx/cx231xx-audio.c
34493+++ b/drivers/media/video/cx231xx/cx231xx-audio.c
34494@@ -389,6 +389,8 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
34495 }
34496
34497 static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
34498+ size_t size) __size_overflow(2);
34499+static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
34500 size_t size)
34501 {
34502 struct snd_pcm_runtime *runtime = subs->runtime;
fe2de317 34503diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
5e856224 34504index 04bf662..e0ac026 100644
fe2de317
MT
34505--- a/drivers/media/video/cx88/cx88-alsa.c
34506+++ b/drivers/media/video/cx88/cx88-alsa.c
34507@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
6e9df6a3
MT
34508 * Only boards with eeprom and byte 1 at eeprom=1 have it
34509 */
34510
34511-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
34512+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
34513 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
34514 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
34515 {0, }
5e856224
MT
34516diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
34517index e2a7b77..753d0ee 100644
34518--- a/drivers/media/video/em28xx/em28xx-audio.c
34519+++ b/drivers/media/video/em28xx/em28xx-audio.c
34520@@ -225,6 +225,8 @@ static int em28xx_init_audio_isoc(struct em28xx *dev)
34521 }
34522
34523 static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
34524+ size_t size) __size_overflow(2);
34525+static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
34526 size_t size)
34527 {
34528 struct snd_pcm_runtime *runtime = subs->runtime;
34529diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
34530index b09a3c8..6dcba0a 100644
34531--- a/drivers/media/video/meye.c
34532+++ b/drivers/media/video/meye.c
34533@@ -72,6 +72,7 @@ static struct meye meye;
34534 /****************************************************************************/
34535 /* Memory allocation routines (stolen from bttv-driver.c) */
34536 /****************************************************************************/
34537+static void *rvmalloc(unsigned long size) __size_overflow(1);
34538 static void *rvmalloc(unsigned long size)
34539 {
34540 void *mem;
4c928ab7 34541diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
5e856224 34542index 1fb7d5b..3901e77 100644
4c928ab7
MT
34543--- a/drivers/media/video/omap/omap_vout.c
34544+++ b/drivers/media/video/omap/omap_vout.c
34545@@ -64,7 +64,6 @@ enum omap_vout_channels {
34546 OMAP_VIDEO2,
34547 };
66a7e928 34548
4c928ab7
MT
34549-static struct videobuf_queue_ops video_vbq_ops;
34550 /* Variables configurable through module params*/
34551 static u32 video1_numbuffers = 3;
34552 static u32 video2_numbuffers = 3;
5e856224 34553@@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *file)
4c928ab7
MT
34554 {
34555 struct videobuf_queue *q;
34556 struct omap_vout_device *vout = NULL;
34557+ static struct videobuf_queue_ops video_vbq_ops = {
34558+ .buf_setup = omap_vout_buffer_setup,
34559+ .buf_prepare = omap_vout_buffer_prepare,
34560+ .buf_release = omap_vout_buffer_release,
34561+ .buf_queue = omap_vout_buffer_queue,
34562+ };
34563
34564 vout = video_drvdata(file);
34565 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
5e856224 34566@@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *file)
4c928ab7 34567 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
66a7e928 34568
4c928ab7
MT
34569 q = &vout->vbq;
34570- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
34571- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
34572- video_vbq_ops.buf_release = omap_vout_buffer_release;
34573- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
34574 spin_lock_init(&vout->vbq_lock);
34575
34576 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
fe2de317
MT
34577diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
34578index 305e6aa..0143317 100644
34579--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
34580+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
6e9df6a3
MT
34581@@ -196,7 +196,7 @@ struct pvr2_hdw {
34582
34583 /* I2C stuff */
34584 struct i2c_adapter i2c_adap;
34585- struct i2c_algorithm i2c_algo;
34586+ i2c_algorithm_no_const i2c_algo;
34587 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
34588 int i2c_cx25840_hack_state;
34589 int i2c_linked;
4c928ab7
MT
34590diff --git a/drivers/media/video/saa7164/saa7164-encoder.c b/drivers/media/video/saa7164/saa7164-encoder.c
34591index 2fd38a0..ddec3c4 100644
34592--- a/drivers/media/video/saa7164/saa7164-encoder.c
34593+++ b/drivers/media/video/saa7164/saa7164-encoder.c
34594@@ -1136,6 +1136,8 @@ struct saa7164_user_buffer *saa7164_enc_next_buf(struct saa7164_port *port)
34595 }
66a7e928 34596
4c928ab7
MT
34597 static ssize_t fops_read(struct file *file, char __user *buffer,
34598+ size_t count, loff_t *pos) __size_overflow(3);
34599+static ssize_t fops_read(struct file *file, char __user *buffer,
34600 size_t count, loff_t *pos)
34601 {
34602 struct saa7164_encoder_fh *fh = file->private_data;
34603diff --git a/drivers/media/video/saa7164/saa7164-vbi.c b/drivers/media/video/saa7164/saa7164-vbi.c
34604index e2e0341..b80056c 100644
34605--- a/drivers/media/video/saa7164/saa7164-vbi.c
34606+++ b/drivers/media/video/saa7164/saa7164-vbi.c
34607@@ -1081,6 +1081,8 @@ struct saa7164_user_buffer *saa7164_vbi_next_buf(struct saa7164_port *port)
34608 }
66a7e928 34609
4c928ab7
MT
34610 static ssize_t fops_read(struct file *file, char __user *buffer,
34611+ size_t count, loff_t *pos) __size_overflow(3);
34612+static ssize_t fops_read(struct file *file, char __user *buffer,
34613 size_t count, loff_t *pos)
34614 {
34615 struct saa7164_vbi_fh *fh = file->private_data;
fe2de317 34616diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
5e856224 34617index 4ed1c7c2..8f15e13 100644
fe2de317
MT
34618--- a/drivers/media/video/timblogiw.c
34619+++ b/drivers/media/video/timblogiw.c
4c928ab7 34620@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
15a11c5b
MT
34621
34622 /* Platform device functions */
34623
34624-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
34625+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
34626 .vidioc_querycap = timblogiw_querycap,
34627 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
34628 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
4c928ab7 34629@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
6e9df6a3
MT
34630 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
34631 };
34632
34633-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
34634+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
34635 .owner = THIS_MODULE,
34636 .open = timblogiw_open,
34637 .release = timblogiw_close,
5e856224
MT
34638diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
34639index c969111..a7910f4 100644
34640--- a/drivers/media/video/videobuf-dma-contig.c
34641+++ b/drivers/media/video/videobuf-dma-contig.c
34642@@ -184,6 +184,7 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
34643 return ret;
34644 }
34645
34646+static struct videobuf_buffer *__videobuf_alloc_vb(size_t size) __size_overflow(1);
34647 static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
34648 {
34649 struct videobuf_dma_contig_memory *mem;
34650diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
34651index f300dea..5fc9c4a 100644
34652--- a/drivers/media/video/videobuf-dma-sg.c
34653+++ b/drivers/media/video/videobuf-dma-sg.c
34654@@ -419,6 +419,7 @@ static const struct vm_operations_struct videobuf_vm_ops = {
34655 struct videobuf_dma_sg_memory
34656 */
34657
34658+static struct videobuf_buffer *__videobuf_alloc_vb(size_t size) __size_overflow(1);
34659 static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
34660 {
34661 struct videobuf_dma_sg_memory *mem;
34662diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c
34663index df14258..12cc7a3 100644
34664--- a/drivers/media/video/videobuf-vmalloc.c
34665+++ b/drivers/media/video/videobuf-vmalloc.c
34666@@ -135,6 +135,7 @@ static const struct vm_operations_struct videobuf_vm_ops = {
34667 struct videobuf_dma_sg_memory
34668 */
34669
34670+static struct videobuf_buffer *__videobuf_alloc_vb(size_t size) __size_overflow(1);
34671 static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
34672 {
34673 struct videobuf_vmalloc_memory *mem;
fe2de317 34674diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
5e856224 34675index a7dc467..a55c423 100644
fe2de317
MT
34676--- a/drivers/message/fusion/mptbase.c
34677+++ b/drivers/message/fusion/mptbase.c
5e856224 34678@@ -6754,8 +6754,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
6892158b
MT
34679 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
34680 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
57199397
MT
34681
34682+#ifdef CONFIG_GRKERNSEC_HIDESYM
6892158b 34683+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
57199397 34684+#else
6892158b 34685 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
57199397
MT
34686 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
34687+#endif
34688+
34689 /*
34690 * Rounding UP to nearest 4-kB boundary here...
34691 */
fe2de317 34692diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
5e856224 34693index 551262e..7551198 100644
fe2de317
MT
34694--- a/drivers/message/fusion/mptsas.c
34695+++ b/drivers/message/fusion/mptsas.c
4c928ab7 34696@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
df50ba0c
MT
34697 return 0;
34698 }
34699
34700+static inline void
34701+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34702+{
34703+ if (phy_info->port_details) {
34704+ phy_info->port_details->rphy = rphy;
34705+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34706+ ioc->name, rphy));
34707+ }
34708+
34709+ if (rphy) {
34710+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34711+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34712+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34713+ ioc->name, rphy, rphy->dev.release));
34714+ }
34715+}
34716+
34717 /* no mutex */
34718 static void
34719 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
4c928ab7 34720@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
df50ba0c
MT
34721 return NULL;
34722 }
34723
34724-static inline void
34725-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34726-{
34727- if (phy_info->port_details) {
34728- phy_info->port_details->rphy = rphy;
34729- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34730- ioc->name, rphy));
34731- }
34732-
34733- if (rphy) {
34734- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34735- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34736- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34737- ioc->name, rphy, rphy->dev.release));
34738- }
34739-}
34740-
34741 static inline struct sas_port *
34742 mptsas_get_port(struct mptsas_phyinfo *phy_info)
34743 {
fe2de317 34744diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
4c928ab7 34745index 0c3ced7..1fe34ec 100644
fe2de317
MT
34746--- a/drivers/message/fusion/mptscsih.c
34747+++ b/drivers/message/fusion/mptscsih.c
4c928ab7 34748@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
6892158b
MT
34749
34750 h = shost_priv(SChost);
34751
34752- if (h) {
34753- if (h->info_kbuf == NULL)
34754- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34755- return h->info_kbuf;
34756- h->info_kbuf[0] = '\0';
34757+ if (!h)
34758+ return NULL;
34759
34760- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34761- h->info_kbuf[size-1] = '\0';
34762- }
34763+ if (h->info_kbuf == NULL)
34764+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34765+ return h->info_kbuf;
34766+ h->info_kbuf[0] = '\0';
34767+
34768+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34769+ h->info_kbuf[size-1] = '\0';
34770
34771 return h->info_kbuf;
34772 }
fe2de317 34773diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
5e856224 34774index 6d115c7..58ff7fd 100644
fe2de317
MT
34775--- a/drivers/message/i2o/i2o_proc.c
34776+++ b/drivers/message/i2o/i2o_proc.c
df50ba0c 34777@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
58c5fc13
MT
34778 "Array Controller Device"
34779 };
34780
34781-static char *chtostr(u8 * chars, int n)
34782-{
34783- char tmp[256];
34784- tmp[0] = 0;
34785- return strncat(tmp, (char *)chars, n);
34786-}
34787-
34788 static int i2o_report_query_status(struct seq_file *seq, int block_status,
34789 char *group)
34790 {
fe2de317 34791@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
58c5fc13
MT
34792
34793 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
34794 seq_printf(seq, "%-#8x", ddm_table.module_id);
34795- seq_printf(seq, "%-29s",
34796- chtostr(ddm_table.module_name_version, 28));
34797+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
34798 seq_printf(seq, "%9d ", ddm_table.data_size);
34799 seq_printf(seq, "%8d", ddm_table.code_size);
34800
fe2de317 34801@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
58c5fc13
MT
34802
34803 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
34804 seq_printf(seq, "%-#8x", dst->module_id);
34805- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
34806- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
34807+ seq_printf(seq, "%-.28s", dst->module_name_version);
34808+ seq_printf(seq, "%-.8s", dst->date);
34809 seq_printf(seq, "%8d ", dst->module_size);
34810 seq_printf(seq, "%8d ", dst->mpb_size);
34811 seq_printf(seq, "0x%04x", dst->module_flags);
fe2de317 34812@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
58c5fc13
MT
34813 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
34814 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
34815 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
34816- seq_printf(seq, "Vendor info : %s\n",
34817- chtostr((u8 *) (work32 + 2), 16));
34818- seq_printf(seq, "Product info : %s\n",
34819- chtostr((u8 *) (work32 + 6), 16));
34820- seq_printf(seq, "Description : %s\n",
34821- chtostr((u8 *) (work32 + 10), 16));
34822- seq_printf(seq, "Product rev. : %s\n",
34823- chtostr((u8 *) (work32 + 14), 8));
34824+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
34825+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
34826+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
34827+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
34828
34829 seq_printf(seq, "Serial number : ");
34830 print_serial_number(seq, (u8 *) (work32 + 16),
fe2de317 34831@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
58c5fc13
MT
34832 }
34833
34834 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
34835- seq_printf(seq, "Module name : %s\n",
34836- chtostr(result.module_name, 24));
34837- seq_printf(seq, "Module revision : %s\n",
34838- chtostr(result.module_rev, 8));
34839+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
34840+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
34841
34842 seq_printf(seq, "Serial number : ");
34843 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
fe2de317 34844@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
58c5fc13
MT
34845 return 0;
34846 }
34847
34848- seq_printf(seq, "Device name : %s\n",
34849- chtostr(result.device_name, 64));
34850- seq_printf(seq, "Service name : %s\n",
34851- chtostr(result.service_name, 64));
34852- seq_printf(seq, "Physical name : %s\n",
34853- chtostr(result.physical_location, 64));
34854- seq_printf(seq, "Instance number : %s\n",
34855- chtostr(result.instance_number, 4));
34856+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
34857+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
34858+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
34859+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
34860
34861 return 0;
34862 }
fe2de317
MT
34863diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
34864index a8c08f3..155fe3d 100644
34865--- a/drivers/message/i2o/iop.c
34866+++ b/drivers/message/i2o/iop.c
34867@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
8308f9c9
MT
34868
34869 spin_lock_irqsave(&c->context_list_lock, flags);
34870
34871- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
34872- atomic_inc(&c->context_list_counter);
34873+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
34874+ atomic_inc_unchecked(&c->context_list_counter);
34875
34876- entry->context = atomic_read(&c->context_list_counter);
34877+ entry->context = atomic_read_unchecked(&c->context_list_counter);
34878
34879 list_add(&entry->list, &c->context_list);
34880
fe2de317 34881@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
8308f9c9
MT
34882
34883 #if BITS_PER_LONG == 64
34884 spin_lock_init(&c->context_list_lock);
34885- atomic_set(&c->context_list_counter, 0);
34886+ atomic_set_unchecked(&c->context_list_counter, 0);
34887 INIT_LIST_HEAD(&c->context_list);
34888 #endif
34889
fe2de317 34890diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
4c928ab7 34891index 7ce65f4..e66e9bc 100644
fe2de317
MT
34892--- a/drivers/mfd/abx500-core.c
34893+++ b/drivers/mfd/abx500-core.c
4c928ab7 34894@@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
66a7e928 34895
15a11c5b
MT
34896 struct abx500_device_entry {
34897 struct list_head list;
34898- struct abx500_ops ops;
34899+ abx500_ops_no_const ops;
66a7e928
MT
34900 struct device *dev;
34901 };
34902
fe2de317 34903diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
5e856224 34904index a9223ed..4127b13 100644
fe2de317
MT
34905--- a/drivers/mfd/janz-cmodio.c
34906+++ b/drivers/mfd/janz-cmodio.c
57199397
MT
34907@@ -13,6 +13,7 @@
34908
34909 #include <linux/kernel.h>
34910 #include <linux/module.h>
34911+#include <linux/slab.h>
34912 #include <linux/init.h>
34913 #include <linux/pci.h>
34914 #include <linux/interrupt.h>
fe2de317 34915diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
5e856224 34916index a981e2a..5ca0c8b 100644
fe2de317
MT
34917--- a/drivers/misc/lis3lv02d/lis3lv02d.c
34918+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
5e856224 34919@@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
66a7e928
MT
34920 * the lid is closed. This leads to interrupts as soon as a little move
34921 * is done.
34922 */
4c928ab7
MT
34923- atomic_inc(&lis3->count);
34924+ atomic_inc_unchecked(&lis3->count);
66a7e928 34925
4c928ab7
MT
34926 wake_up_interruptible(&lis3->misc_wait);
34927 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
5e856224 34928@@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
4c928ab7
MT
34929 if (lis3->pm_dev)
34930 pm_runtime_get_sync(lis3->pm_dev);
66a7e928 34931
4c928ab7
MT
34932- atomic_set(&lis3->count, 0);
34933+ atomic_set_unchecked(&lis3->count, 0);
66a7e928
MT
34934 return 0;
34935 }
34936
5e856224 34937@@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
4c928ab7 34938 add_wait_queue(&lis3->misc_wait, &wait);
66a7e928
MT
34939 while (true) {
34940 set_current_state(TASK_INTERRUPTIBLE);
4c928ab7
MT
34941- data = atomic_xchg(&lis3->count, 0);
34942+ data = atomic_xchg_unchecked(&lis3->count, 0);
66a7e928
MT
34943 if (data)
34944 break;
34945
5e856224 34946@@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
4c928ab7
MT
34947 struct lis3lv02d, miscdev);
34948
34949 poll_wait(file, &lis3->misc_wait, wait);
34950- if (atomic_read(&lis3->count))
34951+ if (atomic_read_unchecked(&lis3->count))
66a7e928
MT
34952 return POLLIN | POLLRDNORM;
34953 return 0;
34954 }
fe2de317 34955diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
4c928ab7 34956index 2b1482a..5d33616 100644
fe2de317
MT
34957--- a/drivers/misc/lis3lv02d/lis3lv02d.h
34958+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
4c928ab7 34959@@ -266,7 +266,7 @@ struct lis3lv02d {
66a7e928
MT
34960 struct input_polled_dev *idev; /* input device */
34961 struct platform_device *pdev; /* platform device */
34962 struct regulator_bulk_data regulators[2];
34963- atomic_t count; /* interrupt count after last read */
34964+ atomic_unchecked_t count; /* interrupt count after last read */
34965 union axis_conversion ac; /* hw -> logical axis */
34966 int mapped_btns[3];
34967
fe2de317
MT
34968diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
34969index 2f30bad..c4c13d0 100644
34970--- a/drivers/misc/sgi-gru/gruhandles.c
34971+++ b/drivers/misc/sgi-gru/gruhandles.c
34972@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
ae4e228f
MT
34973 unsigned long nsec;
34974
34975 nsec = CLKS2NSEC(clks);
34976- atomic_long_inc(&mcs_op_statistics[op].count);
34977- atomic_long_add(nsec, &mcs_op_statistics[op].total);
34978+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
34979+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
34980 if (mcs_op_statistics[op].max < nsec)
34981 mcs_op_statistics[op].max = nsec;
34982 }
fe2de317 34983diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
5e856224 34984index 950dbe9..eeef0f8 100644
fe2de317
MT
34985--- a/drivers/misc/sgi-gru/gruprocfs.c
34986+++ b/drivers/misc/sgi-gru/gruprocfs.c
ae4e228f
MT
34987@@ -32,9 +32,9 @@
34988
34989 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
34990
34991-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
34992+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
34993 {
34994- unsigned long val = atomic_long_read(v);
34995+ unsigned long val = atomic_long_read_unchecked(v);
34996
34997 seq_printf(s, "%16lu %s\n", val, id);
34998 }
fe2de317 34999@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
ae4e228f
MT
35000
35001 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
35002 for (op = 0; op < mcsop_last; op++) {
35003- count = atomic_long_read(&mcs_op_statistics[op].count);
35004- total = atomic_long_read(&mcs_op_statistics[op].total);
35005+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
35006+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
35007 max = mcs_op_statistics[op].max;
35008 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
35009 count ? total / count : 0, max);
fe2de317
MT
35010diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
35011index 5c3ce24..4915ccb 100644
35012--- a/drivers/misc/sgi-gru/grutables.h
35013+++ b/drivers/misc/sgi-gru/grutables.h
ae4e228f
MT
35014@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
35015 * GRU statistics.
35016 */
35017 struct gru_stats_s {
35018- atomic_long_t vdata_alloc;
35019- atomic_long_t vdata_free;
35020- atomic_long_t gts_alloc;
35021- atomic_long_t gts_free;
35022- atomic_long_t gms_alloc;
35023- atomic_long_t gms_free;
35024- atomic_long_t gts_double_allocate;
35025- atomic_long_t assign_context;
35026- atomic_long_t assign_context_failed;
35027- atomic_long_t free_context;
35028- atomic_long_t load_user_context;
35029- atomic_long_t load_kernel_context;
35030- atomic_long_t lock_kernel_context;
35031- atomic_long_t unlock_kernel_context;
35032- atomic_long_t steal_user_context;
35033- atomic_long_t steal_kernel_context;
35034- atomic_long_t steal_context_failed;
35035- atomic_long_t nopfn;
35036- atomic_long_t asid_new;
35037- atomic_long_t asid_next;
35038- atomic_long_t asid_wrap;
35039- atomic_long_t asid_reuse;
35040- atomic_long_t intr;
35041- atomic_long_t intr_cbr;
35042- atomic_long_t intr_tfh;
35043- atomic_long_t intr_spurious;
35044- atomic_long_t intr_mm_lock_failed;
35045- atomic_long_t call_os;
35046- atomic_long_t call_os_wait_queue;
35047- atomic_long_t user_flush_tlb;
35048- atomic_long_t user_unload_context;
35049- atomic_long_t user_exception;
35050- atomic_long_t set_context_option;
35051- atomic_long_t check_context_retarget_intr;
35052- atomic_long_t check_context_unload;
35053- atomic_long_t tlb_dropin;
35054- atomic_long_t tlb_preload_page;
35055- atomic_long_t tlb_dropin_fail_no_asid;
35056- atomic_long_t tlb_dropin_fail_upm;
35057- atomic_long_t tlb_dropin_fail_invalid;
35058- atomic_long_t tlb_dropin_fail_range_active;
35059- atomic_long_t tlb_dropin_fail_idle;
35060- atomic_long_t tlb_dropin_fail_fmm;
35061- atomic_long_t tlb_dropin_fail_no_exception;
35062- atomic_long_t tfh_stale_on_fault;
35063- atomic_long_t mmu_invalidate_range;
35064- atomic_long_t mmu_invalidate_page;
35065- atomic_long_t flush_tlb;
35066- atomic_long_t flush_tlb_gru;
35067- atomic_long_t flush_tlb_gru_tgh;
35068- atomic_long_t flush_tlb_gru_zero_asid;
ae4e228f
MT
35069+ atomic_long_unchecked_t vdata_alloc;
35070+ atomic_long_unchecked_t vdata_free;
35071+ atomic_long_unchecked_t gts_alloc;
35072+ atomic_long_unchecked_t gts_free;
35073+ atomic_long_unchecked_t gms_alloc;
35074+ atomic_long_unchecked_t gms_free;
35075+ atomic_long_unchecked_t gts_double_allocate;
35076+ atomic_long_unchecked_t assign_context;
35077+ atomic_long_unchecked_t assign_context_failed;
35078+ atomic_long_unchecked_t free_context;
35079+ atomic_long_unchecked_t load_user_context;
35080+ atomic_long_unchecked_t load_kernel_context;
35081+ atomic_long_unchecked_t lock_kernel_context;
35082+ atomic_long_unchecked_t unlock_kernel_context;
35083+ atomic_long_unchecked_t steal_user_context;
35084+ atomic_long_unchecked_t steal_kernel_context;
35085+ atomic_long_unchecked_t steal_context_failed;
35086+ atomic_long_unchecked_t nopfn;
35087+ atomic_long_unchecked_t asid_new;
35088+ atomic_long_unchecked_t asid_next;
35089+ atomic_long_unchecked_t asid_wrap;
35090+ atomic_long_unchecked_t asid_reuse;
35091+ atomic_long_unchecked_t intr;
35092+ atomic_long_unchecked_t intr_cbr;
35093+ atomic_long_unchecked_t intr_tfh;
35094+ atomic_long_unchecked_t intr_spurious;
35095+ atomic_long_unchecked_t intr_mm_lock_failed;
35096+ atomic_long_unchecked_t call_os;
35097+ atomic_long_unchecked_t call_os_wait_queue;
35098+ atomic_long_unchecked_t user_flush_tlb;
35099+ atomic_long_unchecked_t user_unload_context;
35100+ atomic_long_unchecked_t user_exception;
35101+ atomic_long_unchecked_t set_context_option;
35102+ atomic_long_unchecked_t check_context_retarget_intr;
35103+ atomic_long_unchecked_t check_context_unload;
35104+ atomic_long_unchecked_t tlb_dropin;
35105+ atomic_long_unchecked_t tlb_preload_page;
35106+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
35107+ atomic_long_unchecked_t tlb_dropin_fail_upm;
35108+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
35109+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
35110+ atomic_long_unchecked_t tlb_dropin_fail_idle;
35111+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
35112+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
35113+ atomic_long_unchecked_t tfh_stale_on_fault;
35114+ atomic_long_unchecked_t mmu_invalidate_range;
35115+ atomic_long_unchecked_t mmu_invalidate_page;
35116+ atomic_long_unchecked_t flush_tlb;
35117+ atomic_long_unchecked_t flush_tlb_gru;
35118+ atomic_long_unchecked_t flush_tlb_gru_tgh;
35119+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
fe2de317
MT
35120
35121- atomic_long_t copy_gpa;
35122- atomic_long_t read_gpa;
ae4e228f
MT
35123+ atomic_long_unchecked_t copy_gpa;
35124+ atomic_long_unchecked_t read_gpa;
fe2de317
MT
35125
35126- atomic_long_t mesq_receive;
35127- atomic_long_t mesq_receive_none;
35128- atomic_long_t mesq_send;
35129- atomic_long_t mesq_send_failed;
35130- atomic_long_t mesq_noop;
35131- atomic_long_t mesq_send_unexpected_error;
35132- atomic_long_t mesq_send_lb_overflow;
35133- atomic_long_t mesq_send_qlimit_reached;
35134- atomic_long_t mesq_send_amo_nacked;
35135- atomic_long_t mesq_send_put_nacked;
35136- atomic_long_t mesq_page_overflow;
35137- atomic_long_t mesq_qf_locked;
35138- atomic_long_t mesq_qf_noop_not_full;
35139- atomic_long_t mesq_qf_switch_head_failed;
35140- atomic_long_t mesq_qf_unexpected_error;
35141- atomic_long_t mesq_noop_unexpected_error;
35142- atomic_long_t mesq_noop_lb_overflow;
35143- atomic_long_t mesq_noop_qlimit_reached;
35144- atomic_long_t mesq_noop_amo_nacked;
35145- atomic_long_t mesq_noop_put_nacked;
35146- atomic_long_t mesq_noop_page_overflow;
ae4e228f
MT
35147+ atomic_long_unchecked_t mesq_receive;
35148+ atomic_long_unchecked_t mesq_receive_none;
35149+ atomic_long_unchecked_t mesq_send;
35150+ atomic_long_unchecked_t mesq_send_failed;
35151+ atomic_long_unchecked_t mesq_noop;
35152+ atomic_long_unchecked_t mesq_send_unexpected_error;
35153+ atomic_long_unchecked_t mesq_send_lb_overflow;
35154+ atomic_long_unchecked_t mesq_send_qlimit_reached;
35155+ atomic_long_unchecked_t mesq_send_amo_nacked;
35156+ atomic_long_unchecked_t mesq_send_put_nacked;
35157+ atomic_long_unchecked_t mesq_page_overflow;
35158+ atomic_long_unchecked_t mesq_qf_locked;
35159+ atomic_long_unchecked_t mesq_qf_noop_not_full;
35160+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
35161+ atomic_long_unchecked_t mesq_qf_unexpected_error;
35162+ atomic_long_unchecked_t mesq_noop_unexpected_error;
35163+ atomic_long_unchecked_t mesq_noop_lb_overflow;
35164+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
35165+ atomic_long_unchecked_t mesq_noop_amo_nacked;
35166+ atomic_long_unchecked_t mesq_noop_put_nacked;
35167+ atomic_long_unchecked_t mesq_noop_page_overflow;
58c5fc13 35168
58c5fc13 35169 };
58c5fc13 35170
fe2de317 35171@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
ae4e228f 35172 tghop_invalidate, mcsop_last};
58c5fc13 35173
ae4e228f
MT
35174 struct mcs_op_statistic {
35175- atomic_long_t count;
35176- atomic_long_t total;
35177+ atomic_long_unchecked_t count;
35178+ atomic_long_unchecked_t total;
35179 unsigned long max;
58c5fc13
MT
35180 };
35181
fe2de317 35182@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
58c5fc13 35183
ae4e228f
MT
35184 #define STAT(id) do { \
35185 if (gru_options & OPT_STATS) \
35186- atomic_long_inc(&gru_stats.id); \
35187+ atomic_long_inc_unchecked(&gru_stats.id); \
35188 } while (0)
58c5fc13 35189
ae4e228f 35190 #ifdef CONFIG_SGI_GRU_DEBUG
fe2de317
MT
35191diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
35192index 851b2f2..a4ec097 100644
35193--- a/drivers/misc/sgi-xp/xp.h
35194+++ b/drivers/misc/sgi-xp/xp.h
35195@@ -289,7 +289,7 @@ struct xpc_interface {
35196 xpc_notify_func, void *);
35197 void (*received) (short, int, void *);
35198 enum xp_retval (*partid_to_nasids) (short, void *);
35199-};
35200+} __no_const;
35201
35202 extern struct xpc_interface xpc_interface;
35203
35204diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
35205index b94d5f7..7f494c5 100644
35206--- a/drivers/misc/sgi-xp/xpc.h
35207+++ b/drivers/misc/sgi-xp/xpc.h
6e9df6a3
MT
35208@@ -835,6 +835,7 @@ struct xpc_arch_operations {
35209 void (*received_payload) (struct xpc_channel *, void *);
35210 void (*notify_senders_of_disconnect) (struct xpc_channel *);
35211 };
35212+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
35213
35214 /* struct xpc_partition act_state values (for XPC HB) */
35215
fe2de317 35216@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
6e9df6a3
MT
35217 /* found in xpc_main.c */
35218 extern struct device *xpc_part;
35219 extern struct device *xpc_chan;
35220-extern struct xpc_arch_operations xpc_arch_ops;
35221+extern xpc_arch_operations_no_const xpc_arch_ops;
35222 extern int xpc_disengage_timelimit;
35223 extern int xpc_disengage_timedout;
35224 extern int xpc_activate_IRQ_rcvd;
fe2de317
MT
35225diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
35226index 8d082b4..aa749ae 100644
35227--- a/drivers/misc/sgi-xp/xpc_main.c
35228+++ b/drivers/misc/sgi-xp/xpc_main.c
35229@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
6e9df6a3
MT
35230 .notifier_call = xpc_system_die,
35231 };
35232
35233-struct xpc_arch_operations xpc_arch_ops;
35234+xpc_arch_operations_no_const xpc_arch_ops;
35235
35236 /*
35237 * Timer function to enforce the timelimit on the partition disengage.
fe2de317 35238diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
5e856224 35239index 6ebdc40..9edf5d8 100644
fe2de317
MT
35240--- a/drivers/mmc/host/sdhci-pci.c
35241+++ b/drivers/mmc/host/sdhci-pci.c
5e856224 35242@@ -631,7 +631,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
6e9df6a3
MT
35243 .probe = via_probe,
35244 };
35245
35246-static const struct pci_device_id pci_ids[] __devinitdata = {
35247+static const struct pci_device_id pci_ids[] __devinitconst = {
35248 {
35249 .vendor = PCI_VENDOR_ID_RICOH,
35250 .device = PCI_DEVICE_ID_RICOH_R5C822,
fe2de317 35251diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
5e856224 35252index 87a431c..4959b43 100644
fe2de317
MT
35253--- a/drivers/mtd/devices/doc2000.c
35254+++ b/drivers/mtd/devices/doc2000.c
5e856224 35255@@ -764,7 +764,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
58c5fc13
MT
35256
35257 /* The ECC will not be calculated correctly if less than 512 is written */
35258 /* DBB-
35259- if (len != 0x200 && eccbuf)
35260+ if (len != 0x200)
35261 printk(KERN_WARNING
35262 "ECC needs a full sector write (adr: %lx size %lx)\n",
35263 (long) to, (long) len);
fe2de317 35264diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
5e856224 35265index 9eacf67..4534b5b 100644
fe2de317
MT
35266--- a/drivers/mtd/devices/doc2001.c
35267+++ b/drivers/mtd/devices/doc2001.c
5e856224 35268@@ -384,7 +384,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
ae4e228f
MT
35269 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
35270
58c5fc13 35271 /* Don't allow read past end of device */
ae4e228f
MT
35272- if (from >= this->totlen)
35273+ if (from >= this->totlen || !len)
58c5fc13 35274 return -EINVAL;
58c5fc13
MT
35275
35276 /* Don't allow a single read to cross a 512-byte block boundary */
fe2de317 35277diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
4c928ab7 35278index 3984d48..28aa897 100644
fe2de317
MT
35279--- a/drivers/mtd/nand/denali.c
35280+++ b/drivers/mtd/nand/denali.c
15a11c5b 35281@@ -26,6 +26,7 @@
57199397
MT
35282 #include <linux/pci.h>
35283 #include <linux/mtd/mtd.h>
35284 #include <linux/module.h>
35285+#include <linux/slab.h>
35286
35287 #include "denali.h"
35288
fe2de317 35289diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
5e856224 35290index 51b9d6a..52af9a7 100644
fe2de317
MT
35291--- a/drivers/mtd/nftlmount.c
35292+++ b/drivers/mtd/nftlmount.c
66a7e928
MT
35293@@ -24,6 +24,7 @@
35294 #include <asm/errno.h>
35295 #include <linux/delay.h>
35296 #include <linux/slab.h>
35297+#include <linux/sched.h>
35298 #include <linux/mtd/mtd.h>
35299 #include <linux/mtd/nand.h>
35300 #include <linux/mtd/nftl.h>
4c928ab7 35301diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
5e856224 35302index e2cdebf..d48183a 100644
4c928ab7
MT
35303--- a/drivers/mtd/ubi/debug.c
35304+++ b/drivers/mtd/ubi/debug.c
35305@@ -338,6 +338,8 @@ out:
35306
35307 /* Write an UBI debugfs file */
35308 static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
35309+ size_t count, loff_t *ppos) __size_overflow(3);
35310+static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
35311 size_t count, loff_t *ppos)
35312 {
35313 unsigned long ubi_num = (unsigned long)file->private_data;
35314diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
5e856224 35315index 071f4c8..440862e 100644
4c928ab7
MT
35316--- a/drivers/net/ethernet/atheros/atlx/atl2.c
35317+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
5e856224 35318@@ -2862,7 +2862,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
6e9df6a3
MT
35319 */
35320
35321 #define ATL2_PARAM(X, desc) \
35322- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
35323+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
35324 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
35325 MODULE_PARM_DESC(X, desc);
35326 #else
4c928ab7 35327diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
5e856224 35328index 66da39f..5dc436d 100644
4c928ab7
MT
35329--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
35330+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
5e856224 35331@@ -473,7 +473,7 @@ struct bnx2x_rx_mode_obj {
6e9df6a3
MT
35332
35333 int (*wait_comp)(struct bnx2x *bp,
35334 struct bnx2x_rx_mode_ramrod_params *p);
35335-};
35336+} __no_const;
35337
35338 /********************** Set multicast group ***********************************/
35339
4c928ab7 35340diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
5e856224 35341index aea8f72..fcebf75 100644
4c928ab7
MT
35342--- a/drivers/net/ethernet/broadcom/tg3.h
35343+++ b/drivers/net/ethernet/broadcom/tg3.h
5e856224 35344@@ -140,6 +140,7 @@
4c928ab7
MT
35345 #define CHIPREV_ID_5750_A0 0x4000
35346 #define CHIPREV_ID_5750_A1 0x4001
35347 #define CHIPREV_ID_5750_A3 0x4003
35348+#define CHIPREV_ID_5750_C1 0x4201
35349 #define CHIPREV_ID_5750_C2 0x4202
35350 #define CHIPREV_ID_5752_A0_HW 0x5000
35351 #define CHIPREV_ID_5752_A0 0x6000
5e856224
MT
35352diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
35353index 47a8435..248e4b3 100644
35354--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
35355+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
35356@@ -1052,6 +1052,8 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold");
35357 * be copied but there is no memory for the copy.
35358 */
35359 static inline struct sk_buff *get_packet(struct pci_dev *pdev,
35360+ struct freelQ *fl, unsigned int len) __size_overflow(3);
35361+static inline struct sk_buff *get_packet(struct pci_dev *pdev,
35362 struct freelQ *fl, unsigned int len)
35363 {
35364 struct sk_buff *skb;
4c928ab7 35365diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
5e856224 35366index c4e8643..0979484 100644
4c928ab7
MT
35367--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
35368+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
fe2de317 35369@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
15a11c5b
MT
35370 */
35371 struct l2t_skb_cb {
35372 arp_failure_handler_func arp_failure_handler;
35373-};
35374+} __no_const;
35375
35376 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
35377
4c928ab7 35378diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
5e856224 35379index cfb60e1..94af340 100644
4c928ab7
MT
35380--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
35381+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
35382@@ -611,6 +611,8 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
35383 * of the SW ring.
35384 */
35385 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
35386+ size_t sw_size, dma_addr_t * phys, void *metadata) __size_overflow(2,4);
35387+static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
35388 size_t sw_size, dma_addr_t * phys, void *metadata)
35389 {
35390 size_t len = nelem * elem_size;
5e856224
MT
35391@@ -777,6 +779,8 @@ static inline unsigned int flits_to_desc(unsigned int n)
35392 * be copied but there is no memory for the copy.
35393 */
35394 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
35395+ unsigned int len, unsigned int drop_thres) __size_overflow(3);
35396+static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
35397 unsigned int len, unsigned int drop_thres)
35398 {
35399 struct sk_buff *skb = NULL;
4c928ab7 35400diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
5e856224 35401index 2dae795..73037d2 100644
4c928ab7
MT
35402--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
35403+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
35404@@ -593,6 +593,9 @@ static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
35405 */
35406 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
35407 size_t sw_size, dma_addr_t *phys, void *metadata,
35408+ size_t stat_size, int node) __size_overflow(2,4);
35409+static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
35410+ size_t sw_size, dma_addr_t *phys, void *metadata,
35411 size_t stat_size, int node)
35412 {
35413 size_t len = nelem * elem_size + stat_size;
35414diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
5e856224 35415index 0bd585b..d954ca5 100644
4c928ab7
MT
35416--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
35417+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
5e856224 35418@@ -729,6 +729,9 @@ static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
4c928ab7
MT
35419 */
35420 static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
35421 size_t swsize, dma_addr_t *busaddrp, void *swringp,
35422+ size_t stat_size) __size_overflow(2,4);
35423+static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
35424+ size_t swsize, dma_addr_t *busaddrp, void *swringp,
35425 size_t stat_size)
35426 {
35427 /*
35428diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
5e856224 35429index 4d71f5a..8004440 100644
4c928ab7
MT
35430--- a/drivers/net/ethernet/dec/tulip/de4x5.c
35431+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
5e856224 35432@@ -5392,7 +5392,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4c928ab7
MT
35433 for (i=0; i<ETH_ALEN; i++) {
35434 tmp.addr[i] = dev->dev_addr[i];
35435 }
35436- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
35437+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
35438 break;
66a7e928 35439
4c928ab7 35440 case DE4X5_SET_HWADDR: /* Set the hardware address */
5e856224 35441@@ -5432,7 +5432,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4c928ab7
MT
35442 spin_lock_irqsave(&lp->lock, flags);
35443 memcpy(&statbuf, &lp->pktStats, ioc->len);
35444 spin_unlock_irqrestore(&lp->lock, flags);
35445- if (copy_to_user(ioc->data, &statbuf, ioc->len))
35446+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
35447 return -EFAULT;
35448 break;
35449 }
35450diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
35451index 14d5b61..1398636 100644
35452--- a/drivers/net/ethernet/dec/tulip/eeprom.c
35453+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
35454@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
35455 {NULL}};
66a7e928 35456
66a7e928 35457
4c928ab7
MT
35458-static const char *block_name[] __devinitdata = {
35459+static const char *block_name[] __devinitconst = {
35460 "21140 non-MII",
35461 "21140 MII PHY",
35462 "21142 Serial PHY",
35463diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
5e856224 35464index 52da7b2..4ddfe1c 100644
4c928ab7
MT
35465--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
35466+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
35467@@ -236,7 +236,7 @@ struct pci_id_info {
35468 int drv_flags; /* Driver use, intended as capability flags. */
35469 };
35470
35471-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
35472+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
35473 { /* Sometime a Level-One switch card. */
35474 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
35475 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
5e856224
MT
35476diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
35477index b2dc2c8..2e09edb 100644
35478--- a/drivers/net/ethernet/dlink/dl2k.c
35479+++ b/drivers/net/ethernet/dlink/dl2k.c
35480@@ -1259,55 +1259,21 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
35481 {
35482 int phy_addr;
35483 struct netdev_private *np = netdev_priv(dev);
35484- struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru;
35485-
35486- struct netdev_desc *desc;
35487- int i;
35488+ struct mii_ioctl_data *miidata = if_mii(rq);
35489
35490 phy_addr = np->phy_addr;
35491 switch (cmd) {
35492- case SIOCDEVPRIVATE:
35493+ case SIOCGMIIPHY:
35494+ miidata->phy_id = phy_addr;
35495 break;
35496-
35497- case SIOCDEVPRIVATE + 1:
35498- miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);
35499+ case SIOCGMIIREG:
35500+ miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num);
35501 break;
35502- case SIOCDEVPRIVATE + 2:
35503- mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value);
35504+ case SIOCSMIIREG:
35505+ if (!capable(CAP_NET_ADMIN))
35506+ return -EPERM;
35507+ mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in);
35508 break;
35509- case SIOCDEVPRIVATE + 3:
35510- break;
35511- case SIOCDEVPRIVATE + 4:
35512- break;
35513- case SIOCDEVPRIVATE + 5:
35514- netif_stop_queue (dev);
35515- break;
35516- case SIOCDEVPRIVATE + 6:
35517- netif_wake_queue (dev);
35518- break;
35519- case SIOCDEVPRIVATE + 7:
35520- printk
35521- ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",
35522- netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx,
35523- np->old_rx);
35524- break;
35525- case SIOCDEVPRIVATE + 8:
35526- printk("TX ring:\n");
35527- for (i = 0; i < TX_RING_SIZE; i++) {
35528- desc = &np->tx_ring[i];
35529- printk
35530- ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
35531- i,
35532- (u32) (np->tx_ring_dma + i * sizeof (*desc)),
35533- (u32)le64_to_cpu(desc->next_desc),
35534- (u32)le64_to_cpu(desc->status),
35535- (u32)(le64_to_cpu(desc->fraginfo) >> 32),
35536- (u32)le64_to_cpu(desc->fraginfo));
35537- printk ("\n");
35538- }
35539- printk ("\n");
35540- break;
35541-
35542 default:
35543 return -EOPNOTSUPP;
35544 }
35545diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
35546index ba0adca..30c2da3 100644
35547--- a/drivers/net/ethernet/dlink/dl2k.h
35548+++ b/drivers/net/ethernet/dlink/dl2k.h
35549@@ -365,13 +365,6 @@ struct ioctl_data {
35550 char *data;
35551 };
35552
35553-struct mii_data {
35554- __u16 reserved;
35555- __u16 reg_num;
35556- __u16 in_value;
35557- __u16 out_value;
35558-};
35559-
35560 /* The Rx and Tx buffer descriptors. */
35561 struct netdev_desc {
35562 __le64 next_desc;
4c928ab7 35563diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
5e856224 35564index 28a3a9b..d96cb63 100644
4c928ab7
MT
35565--- a/drivers/net/ethernet/dlink/sundance.c
35566+++ b/drivers/net/ethernet/dlink/sundance.c
35567@@ -218,7 +218,7 @@ enum {
35568 struct pci_id_info {
35569 const char *name;
35570 };
35571-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
35572+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
35573 {"D-Link DFE-550TX FAST Ethernet Adapter"},
35574 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
35575 {"D-Link DFE-580TX 4 port Server Adapter"},
35576diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
5e856224 35577index e703d64..d62ecf9 100644
4c928ab7
MT
35578--- a/drivers/net/ethernet/emulex/benet/be_main.c
35579+++ b/drivers/net/ethernet/emulex/benet/be_main.c
5e856224 35580@@ -402,7 +402,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
4c928ab7
MT
35581
35582 if (wrapped)
35583 newacc += 65536;
35584- ACCESS_ONCE(*acc) = newacc;
35585+ ACCESS_ONCE_RW(*acc) = newacc;
35586 }
35587
35588 void be_parse_stats(struct be_adapter *adapter)
35589diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
5e856224 35590index 47f85c3..82ab6c4 100644
4c928ab7
MT
35591--- a/drivers/net/ethernet/faraday/ftgmac100.c
35592+++ b/drivers/net/ethernet/faraday/ftgmac100.c
5e856224 35593@@ -31,6 +31,8 @@
4c928ab7
MT
35594 #include <linux/netdevice.h>
35595 #include <linux/phy.h>
35596 #include <linux/platform_device.h>
35597+#include <linux/interrupt.h>
35598+#include <linux/irqreturn.h>
35599 #include <net/ip.h>
35600
35601 #include "ftgmac100.h"
35602diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
5e856224 35603index bb336a0..4b472da 100644
4c928ab7
MT
35604--- a/drivers/net/ethernet/faraday/ftmac100.c
35605+++ b/drivers/net/ethernet/faraday/ftmac100.c
5e856224 35606@@ -31,6 +31,8 @@
4c928ab7
MT
35607 #include <linux/module.h>
35608 #include <linux/netdevice.h>
35609 #include <linux/platform_device.h>
35610+#include <linux/interrupt.h>
35611+#include <linux/irqreturn.h>
35612
35613 #include "ftmac100.h"
35614
35615diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
5e856224 35616index c82d444..0007fb4 100644
4c928ab7
MT
35617--- a/drivers/net/ethernet/fealnx.c
35618+++ b/drivers/net/ethernet/fealnx.c
35619@@ -150,7 +150,7 @@ struct chip_info {
35620 int flags;
35621 };
35622
35623-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
35624+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
35625 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
35626 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
35627 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
35628diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
35629index e1159e5..e18684d 100644
35630--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
35631+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
35632@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
ae4e228f
MT
35633 {
35634 struct e1000_hw *hw = &adapter->hw;
35635 struct e1000_mac_info *mac = &hw->mac;
16454cff 35636- struct e1000_mac_operations *func = &mac->ops;
15a11c5b 35637+ e1000_mac_operations_no_const *func = &mac->ops;
4c928ab7
MT
35638
35639 /* Set media type */
35640 switch (adapter->pdev->device) {
35641diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
35642index a3e65fd..f451444 100644
35643--- a/drivers/net/ethernet/intel/e1000e/82571.c
35644+++ b/drivers/net/ethernet/intel/e1000e/82571.c
35645@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
ae4e228f
MT
35646 {
35647 struct e1000_hw *hw = &adapter->hw;
35648 struct e1000_mac_info *mac = &hw->mac;
16454cff 35649- struct e1000_mac_operations *func = &mac->ops;
15a11c5b 35650+ e1000_mac_operations_no_const *func = &mac->ops;
4c928ab7
MT
35651 u32 swsm = 0;
35652 u32 swsm2 = 0;
35653 bool force_clear_smbi = false;
35654diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
fe2de317 35655index 2967039..ca8c40c 100644
4c928ab7
MT
35656--- a/drivers/net/ethernet/intel/e1000e/hw.h
35657+++ b/drivers/net/ethernet/intel/e1000e/hw.h
6e9df6a3 35658@@ -778,6 +778,7 @@ struct e1000_mac_operations {
15a11c5b
MT
35659 void (*write_vfta)(struct e1000_hw *, u32, u32);
35660 s32 (*read_mac_addr)(struct e1000_hw *);
ae4e228f 35661 };
15a11c5b 35662+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
ae4e228f 35663
6e9df6a3
MT
35664 /*
35665 * When to use various PHY register access functions:
35666@@ -818,6 +819,7 @@ struct e1000_phy_operations {
15a11c5b
MT
35667 void (*power_up)(struct e1000_hw *);
35668 void (*power_down)(struct e1000_hw *);
ae4e228f 35669 };
15a11c5b 35670+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
ae4e228f 35671
15a11c5b
MT
35672 /* Function pointers for the NVM. */
35673 struct e1000_nvm_operations {
6e9df6a3 35674@@ -829,9 +831,10 @@ struct e1000_nvm_operations {
15a11c5b
MT
35675 s32 (*validate)(struct e1000_hw *);
35676 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
ae4e228f 35677 };
15a11c5b 35678+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
ae4e228f
MT
35679
35680 struct e1000_mac_info {
15a11c5b
MT
35681- struct e1000_mac_operations ops;
35682+ e1000_mac_operations_no_const ops;
66a7e928
MT
35683 u8 addr[ETH_ALEN];
35684 u8 perm_addr[ETH_ALEN];
15a11c5b 35685
6e9df6a3 35686@@ -872,7 +875,7 @@ struct e1000_mac_info {
bc901d79
MT
35687 };
35688
35689 struct e1000_phy_info {
15a11c5b
MT
35690- struct e1000_phy_operations ops;
35691+ e1000_phy_operations_no_const ops;
bc901d79
MT
35692
35693 enum e1000_phy_type type;
15a11c5b 35694
6e9df6a3 35695@@ -906,7 +909,7 @@ struct e1000_phy_info {
ae4e228f
MT
35696 };
35697
35698 struct e1000_nvm_info {
15a11c5b
MT
35699- struct e1000_nvm_operations ops;
35700+ e1000_nvm_operations_no_const ops;
ae4e228f
MT
35701
35702 enum e1000_nvm_type type;
15a11c5b 35703 enum e1000_nvm_override override;
4c928ab7 35704diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
5e856224 35705index f67cbd3..cef9e3d 100644
4c928ab7
MT
35706--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
35707+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
15a11c5b
MT
35708@@ -314,6 +314,7 @@ struct e1000_mac_operations {
35709 s32 (*read_mac_addr)(struct e1000_hw *);
35710 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
66a7e928 35711 };
15a11c5b 35712+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
66a7e928 35713
15a11c5b
MT
35714 struct e1000_phy_operations {
35715 s32 (*acquire)(struct e1000_hw *);
35716@@ -330,6 +331,7 @@ struct e1000_phy_operations {
35717 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
35718 s32 (*write_reg)(struct e1000_hw *, u32, u16);
66a7e928 35719 };
15a11c5b 35720+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
66a7e928 35721
15a11c5b
MT
35722 struct e1000_nvm_operations {
35723 s32 (*acquire)(struct e1000_hw *);
35724@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
35725 s32 (*update)(struct e1000_hw *);
35726 s32 (*validate)(struct e1000_hw *);
66a7e928 35727 };
15a11c5b 35728+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
ae4e228f
MT
35729
35730 struct e1000_info {
35731 s32 (*get_invariants)(struct e1000_hw *);
15a11c5b 35732@@ -350,7 +353,7 @@ struct e1000_info {
ae4e228f 35733 extern const struct e1000_info e1000_82575_info;
bc901d79
MT
35734
35735 struct e1000_mac_info {
15a11c5b
MT
35736- struct e1000_mac_operations ops;
35737+ e1000_mac_operations_no_const ops;
bc901d79
MT
35738
35739 u8 addr[6];
15a11c5b
MT
35740 u8 perm_addr[6];
35741@@ -388,7 +391,7 @@ struct e1000_mac_info {
bc901d79
MT
35742 };
35743
35744 struct e1000_phy_info {
15a11c5b
MT
35745- struct e1000_phy_operations ops;
35746+ e1000_phy_operations_no_const ops;
bc901d79
MT
35747
35748 enum e1000_phy_type type;
15a11c5b
MT
35749
35750@@ -423,7 +426,7 @@ struct e1000_phy_info {
ae4e228f
MT
35751 };
35752
35753 struct e1000_nvm_info {
15a11c5b
MT
35754- struct e1000_nvm_operations ops;
35755+ e1000_nvm_operations_no_const ops;
ae4e228f 35756 enum e1000_nvm_type type;
66a7e928 35757 enum e1000_nvm_override override;
15a11c5b
MT
35758
35759@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
35760 s32 (*check_for_ack)(struct e1000_hw *, u16);
35761 s32 (*check_for_rst)(struct e1000_hw *, u16);
35762 };
35763+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
35764
35765 struct e1000_mbx_stats {
35766 u32 msgs_tx;
35767@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
bc901d79
MT
35768 };
35769
15a11c5b
MT
35770 struct e1000_mbx_info {
35771- struct e1000_mbx_operations ops;
35772+ e1000_mbx_operations_no_const ops;
35773 struct e1000_mbx_stats stats;
35774 u32 timeout;
35775 u32 usec_delay;
4c928ab7 35776diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
5e856224 35777index 57db3c6..aa825fc 100644
4c928ab7
MT
35778--- a/drivers/net/ethernet/intel/igbvf/vf.h
35779+++ b/drivers/net/ethernet/intel/igbvf/vf.h
15a11c5b
MT
35780@@ -189,9 +189,10 @@ struct e1000_mac_operations {
35781 s32 (*read_mac_addr)(struct e1000_hw *);
35782 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
35783 };
35784+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
35785
bc901d79 35786 struct e1000_mac_info {
15a11c5b
MT
35787- struct e1000_mac_operations ops;
35788+ e1000_mac_operations_no_const ops;
bc901d79
MT
35789 u8 addr[6];
35790 u8 perm_addr[6];
66a7e928 35791
15a11c5b
MT
35792@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
35793 s32 (*check_for_ack)(struct e1000_hw *);
35794 s32 (*check_for_rst)(struct e1000_hw *);
35795 };
35796+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
66a7e928 35797
15a11c5b
MT
35798 struct e1000_mbx_stats {
35799 u32 msgs_tx;
35800@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
35801 };
66a7e928 35802
15a11c5b
MT
35803 struct e1000_mbx_info {
35804- struct e1000_mbx_operations ops;
35805+ e1000_mbx_operations_no_const ops;
35806 struct e1000_mbx_stats stats;
35807 u32 timeout;
35808 u32 usec_delay;
4c928ab7 35809diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
5e856224 35810index 9b95bef..7e254ee 100644
4c928ab7
MT
35811--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35812+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35813@@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
15a11c5b
MT
35814 s32 (*update_checksum)(struct ixgbe_hw *);
35815 u16 (*calc_checksum)(struct ixgbe_hw *);
66a7e928 35816 };
15a11c5b 35817+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
66a7e928 35818
15a11c5b
MT
35819 struct ixgbe_mac_operations {
35820 s32 (*init_hw)(struct ixgbe_hw *);
4c928ab7 35821@@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
6e9df6a3
MT
35822 /* Manageability interface */
35823 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
66a7e928 35824 };
15a11c5b 35825+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
66a7e928 35826
15a11c5b
MT
35827 struct ixgbe_phy_operations {
35828 s32 (*identify)(struct ixgbe_hw *);
4c928ab7 35829@@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
15a11c5b
MT
35830 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
35831 s32 (*check_overtemp)(struct ixgbe_hw *);
66a7e928 35832 };
15a11c5b 35833+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
66a7e928
MT
35834
35835 struct ixgbe_eeprom_info {
35836- struct ixgbe_eeprom_operations ops;
15a11c5b 35837+ ixgbe_eeprom_operations_no_const ops;
66a7e928
MT
35838 enum ixgbe_eeprom_type type;
35839 u32 semaphore_delay;
35840 u16 word_size;
4c928ab7 35841@@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
66a7e928
MT
35842
35843 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
35844 struct ixgbe_mac_info {
35845- struct ixgbe_mac_operations ops;
15a11c5b 35846+ ixgbe_mac_operations_no_const ops;
66a7e928 35847 enum ixgbe_mac_type type;
5e856224
MT
35848 u8 addr[ETH_ALEN];
35849 u8 perm_addr[ETH_ALEN];
4c928ab7 35850@@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
66a7e928
MT
35851 };
35852
35853 struct ixgbe_phy_info {
35854- struct ixgbe_phy_operations ops;
15a11c5b 35855+ ixgbe_phy_operations_no_const ops;
66a7e928
MT
35856 struct mdio_if_info mdio;
35857 enum ixgbe_phy_type type;
35858 u32 id;
4c928ab7 35859@@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
15a11c5b
MT
35860 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
35861 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
66a7e928 35862 };
15a11c5b 35863+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
66a7e928 35864
15a11c5b
MT
35865 struct ixgbe_mbx_stats {
35866 u32 msgs_tx;
4c928ab7 35867@@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
15a11c5b 35868 };
66a7e928 35869
15a11c5b
MT
35870 struct ixgbe_mbx_info {
35871- struct ixgbe_mbx_operations ops;
35872+ ixgbe_mbx_operations_no_const ops;
35873 struct ixgbe_mbx_stats stats;
35874 u32 timeout;
35875 u32 usec_delay;
4c928ab7 35876diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
5e856224 35877index 25c951d..cc7cf33 100644
4c928ab7
MT
35878--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
35879+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
15a11c5b
MT
35880@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
35881 s32 (*clear_vfta)(struct ixgbe_hw *);
35882 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
66a7e928 35883 };
15a11c5b 35884+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
66a7e928 35885
15a11c5b
MT
35886 enum ixgbe_mac_type {
35887 ixgbe_mac_unknown = 0,
35888@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
66a7e928
MT
35889 };
35890
15a11c5b
MT
35891 struct ixgbe_mac_info {
35892- struct ixgbe_mac_operations ops;
35893+ ixgbe_mac_operations_no_const ops;
35894 u8 addr[6];
35895 u8 perm_addr[6];
35896
35897@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
35898 s32 (*check_for_ack)(struct ixgbe_hw *);
35899 s32 (*check_for_rst)(struct ixgbe_hw *);
66a7e928 35900 };
15a11c5b
MT
35901+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35902
35903 struct ixgbe_mbx_stats {
35904 u32 msgs_tx;
35905@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
66a7e928
MT
35906 };
35907
15a11c5b
MT
35908 struct ixgbe_mbx_info {
35909- struct ixgbe_mbx_operations ops;
35910+ ixgbe_mbx_operations_no_const ops;
35911 struct ixgbe_mbx_stats stats;
35912 u32 timeout;
35913 u32 udelay;
4c928ab7 35914diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
5e856224 35915index 8bf22b6..7f5baaa 100644
4c928ab7
MT
35916--- a/drivers/net/ethernet/mellanox/mlx4/main.c
35917+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
5e856224 35918@@ -41,6 +41,7 @@
66a7e928
MT
35919 #include <linux/slab.h>
35920 #include <linux/io-mapping.h>
5e856224 35921 #include <linux/delay.h>
66a7e928
MT
35922+#include <linux/sched.h>
35923
35924 #include <linux/mlx4/device.h>
35925 #include <linux/mlx4/doorbell.h>
4c928ab7
MT
35926diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35927index 5046a64..71ca936 100644
35928--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
35929+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35930@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
35931 void (*link_down)(struct __vxge_hw_device *devh);
35932 void (*crit_err)(struct __vxge_hw_device *devh,
35933 enum vxge_hw_event type, u64 ext_data);
35934-};
35935+} __no_const;
15a11c5b 35936
4c928ab7
MT
35937 /*
35938 * struct __vxge_hw_blockpool_entry - Block private data structure
35939diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35940index 4a518a3..936b334 100644
35941--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35942+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35943@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
35944 struct vxge_hw_mempool_dma *dma_object,
35945 u32 index,
35946 u32 is_last);
35947-};
35948+} __no_const;
15a11c5b 35949
4c928ab7
MT
35950 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
35951 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
35952diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
5e856224 35953index bbacb37..d60887d 100644
4c928ab7
MT
35954--- a/drivers/net/ethernet/realtek/r8169.c
35955+++ b/drivers/net/ethernet/realtek/r8169.c
5e856224 35956@@ -695,17 +695,17 @@ struct rtl8169_private {
4c928ab7
MT
35957 struct mdio_ops {
35958 void (*write)(void __iomem *, int, int);
35959 int (*read)(void __iomem *, int);
35960- } mdio_ops;
35961+ } __no_const mdio_ops;
15a11c5b 35962
4c928ab7
MT
35963 struct pll_power_ops {
35964 void (*down)(struct rtl8169_private *);
35965 void (*up)(struct rtl8169_private *);
35966- } pll_power_ops;
35967+ } __no_const pll_power_ops;
15a11c5b 35968
4c928ab7
MT
35969 struct jumbo_ops {
35970 void (*enable)(struct rtl8169_private *);
35971 void (*disable)(struct rtl8169_private *);
35972- } jumbo_ops;
35973+ } __no_const jumbo_ops;
15a11c5b 35974
4c928ab7
MT
35975 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
35976 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
35977diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
5e856224 35978index 5b118cd..858b523 100644
4c928ab7
MT
35979--- a/drivers/net/ethernet/sis/sis190.c
35980+++ b/drivers/net/ethernet/sis/sis190.c
5e856224 35981@@ -1622,7 +1622,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
4c928ab7
MT
35982 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
35983 struct net_device *dev)
15a11c5b 35984 {
4c928ab7
MT
35985- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
35986+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
35987 struct sis190_private *tp = netdev_priv(dev);
35988 struct pci_dev *isa_bridge;
35989 u8 reg, tmp8;
35990diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
5e856224 35991index c07cfe9..81cbf7e 100644
4c928ab7
MT
35992--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35993+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
5e856224 35994@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
15a11c5b 35995
4c928ab7 35996 writel(value, ioaddr + MMC_CNTRL);
15a11c5b 35997
4c928ab7
MT
35998- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35999- MMC_CNTRL, value);
36000+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
36001+// MMC_CNTRL, value);
15a11c5b
MT
36002 }
36003
4c928ab7 36004 /* To mask all all interrupts.*/
5e856224
MT
36005diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
36006index dec5836..6d4db7d 100644
36007--- a/drivers/net/hyperv/hyperv_net.h
36008+++ b/drivers/net/hyperv/hyperv_net.h
36009@@ -97,7 +97,7 @@ struct rndis_device {
36010
36011 enum rndis_device_state state;
36012 bool link_state;
36013- atomic_t new_req_id;
36014+ atomic_unchecked_t new_req_id;
36015
36016 spinlock_t request_lock;
36017 struct list_head req_list;
36018diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
36019index 133b7fb..d58c559 100644
36020--- a/drivers/net/hyperv/rndis_filter.c
36021+++ b/drivers/net/hyperv/rndis_filter.c
36022@@ -96,7 +96,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
36023 * template
36024 */
36025 set = &rndis_msg->msg.set_req;
36026- set->req_id = atomic_inc_return(&dev->new_req_id);
36027+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36028
36029 /* Add to the request list */
36030 spin_lock_irqsave(&dev->request_lock, flags);
36031@@ -627,7 +627,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
36032
36033 /* Setup the rndis set */
36034 halt = &request->request_msg.msg.halt_req;
36035- halt->req_id = atomic_inc_return(&dev->new_req_id);
36036+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36037
36038 /* Ignore return since this msg is optional. */
36039 rndis_filter_send_request(dev, request);
36040diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
36041index 58dc117..f140c77 100644
36042--- a/drivers/net/macvtap.c
36043+++ b/drivers/net/macvtap.c
36044@@ -526,6 +526,8 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
36045 }
36046 base = (unsigned long)from->iov_base + offset1;
36047 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
36048+ if (i + size >= MAX_SKB_FRAGS)
36049+ return -EFAULT;
36050 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
36051 if ((num_pages != size) ||
36052 (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
4c928ab7 36053diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
5e856224 36054index 3ed983c..a1bb418 100644
4c928ab7
MT
36055--- a/drivers/net/ppp/ppp_generic.c
36056+++ b/drivers/net/ppp/ppp_generic.c
5e856224 36057@@ -986,7 +986,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
15a11c5b
MT
36058 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
36059 struct ppp_stats stats;
36060 struct ppp_comp_stats cstats;
36061- char *vers;
36062
36063 switch (cmd) {
36064 case SIOCGPPPSTATS:
5e856224 36065@@ -1008,8 +1007,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
15a11c5b 36066 break;
66a7e928 36067
15a11c5b
MT
36068 case SIOCGPPPVER:
36069- vers = PPP_VERSION;
36070- if (copy_to_user(addr, vers, strlen(vers) + 1))
36071+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
36072 break;
36073 err = 0;
36074 break;
fe2de317
MT
36075diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
36076index 515f122..41dd273 100644
36077--- a/drivers/net/tokenring/abyss.c
36078+++ b/drivers/net/tokenring/abyss.c
36079@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
15a11c5b
MT
36080
36081 static int __init abyss_init (void)
36082 {
36083- abyss_netdev_ops = tms380tr_netdev_ops;
36084+ pax_open_kernel();
36085+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
36086
36087- abyss_netdev_ops.ndo_open = abyss_open;
36088- abyss_netdev_ops.ndo_stop = abyss_close;
36089+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
36090+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
36091+ pax_close_kernel();
36092
36093 return pci_register_driver(&abyss_driver);
36094 }
fe2de317
MT
36095diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
36096index 6153cfd..cf69c1c 100644
36097--- a/drivers/net/tokenring/madgemc.c
36098+++ b/drivers/net/tokenring/madgemc.c
36099@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
15a11c5b
MT
36100
36101 static int __init madgemc_init (void)
36102 {
36103- madgemc_netdev_ops = tms380tr_netdev_ops;
36104- madgemc_netdev_ops.ndo_open = madgemc_open;
36105- madgemc_netdev_ops.ndo_stop = madgemc_close;
36106+ pax_open_kernel();
36107+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
36108+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
36109+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
36110+ pax_close_kernel();
36111
36112 return mca_register_driver (&madgemc_driver);
36113 }
fe2de317
MT
36114diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
36115index 8d362e6..f91cc52 100644
36116--- a/drivers/net/tokenring/proteon.c
36117+++ b/drivers/net/tokenring/proteon.c
15a11c5b
MT
36118@@ -353,9 +353,11 @@ static int __init proteon_init(void)
36119 struct platform_device *pdev;
36120 int i, num = 0, err = 0;
36121
36122- proteon_netdev_ops = tms380tr_netdev_ops;
36123- proteon_netdev_ops.ndo_open = proteon_open;
36124- proteon_netdev_ops.ndo_stop = tms380tr_close;
36125+ pax_open_kernel();
36126+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
36127+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
36128+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
36129+ pax_close_kernel();
36130
36131 err = platform_driver_register(&proteon_driver);
36132 if (err)
fe2de317
MT
36133diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
36134index 46db5c5..37c1536 100644
36135--- a/drivers/net/tokenring/skisa.c
36136+++ b/drivers/net/tokenring/skisa.c
15a11c5b
MT
36137@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
36138 struct platform_device *pdev;
36139 int i, num = 0, err = 0;
36140
36141- sk_isa_netdev_ops = tms380tr_netdev_ops;
36142- sk_isa_netdev_ops.ndo_open = sk_isa_open;
36143- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
36144+ pax_open_kernel();
36145+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
36146+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
36147+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
36148+ pax_close_kernel();
36149
36150 err = platform_driver_register(&sk_isa_driver);
36151 if (err)
fe2de317 36152diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
5e856224 36153index e1324b4..e1b0041 100644
fe2de317
MT
36154--- a/drivers/net/usb/hso.c
36155+++ b/drivers/net/usb/hso.c
c52201e0
MT
36156@@ -71,7 +71,7 @@
36157 #include <asm/byteorder.h>
36158 #include <linux/serial_core.h>
36159 #include <linux/serial.h>
36160-
36161+#include <asm/local.h>
36162
36163 #define MOD_AUTHOR "Option Wireless"
36164 #define MOD_DESCRIPTION "USB High Speed Option driver"
6892158b 36165@@ -257,7 +257,7 @@ struct hso_serial {
58c5fc13
MT
36166
36167 /* from usb_serial_port */
36168 struct tty_struct *tty;
36169- int open_count;
c52201e0 36170+ local_t open_count;
58c5fc13
MT
36171 spinlock_t serial_lock;
36172
36173 int (*write_data) (struct hso_serial *serial);
fe2de317 36174@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
58c5fc13
MT
36175 struct urb *urb;
36176
36177 urb = serial->rx_urb[0];
36178- if (serial->open_count > 0) {
c52201e0 36179+ if (local_read(&serial->open_count) > 0) {
58c5fc13
MT
36180 count = put_rxbuf_data(urb, serial);
36181 if (count == -1)
36182 return;
fe2de317 36183@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
58c5fc13
MT
36184 DUMP1(urb->transfer_buffer, urb->actual_length);
36185
36186 /* Anyone listening? */
36187- if (serial->open_count == 0)
c52201e0 36188+ if (local_read(&serial->open_count) == 0)
58c5fc13
MT
36189 return;
36190
36191 if (status == 0) {
fe2de317 36192@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
36193 spin_unlock_irq(&serial->serial_lock);
36194
36195 /* check for port already opened, if not set the termios */
36196- serial->open_count++;
36197- if (serial->open_count == 1) {
c52201e0 36198+ if (local_inc_return(&serial->open_count) == 1) {
58c5fc13
MT
36199 serial->rx_state = RX_IDLE;
36200 /* Force default termio settings */
57199397 36201 _hso_serial_set_termios(tty, NULL);
fe2de317 36202@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
36203 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
36204 if (result) {
36205 hso_stop_serial_device(serial->parent);
36206- serial->open_count--;
c52201e0 36207+ local_dec(&serial->open_count);
58c5fc13
MT
36208 kref_put(&serial->parent->ref, hso_serial_ref_free);
36209 }
36210 } else {
fe2de317 36211@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
36212
36213 /* reset the rts and dtr */
36214 /* do the actual close */
36215- serial->open_count--;
c52201e0 36216+ local_dec(&serial->open_count);
ae4e228f 36217
58c5fc13
MT
36218- if (serial->open_count <= 0) {
36219- serial->open_count = 0;
c52201e0
MT
36220+ if (local_read(&serial->open_count) <= 0) {
36221+ local_set(&serial->open_count, 0);
58c5fc13
MT
36222 spin_lock_irq(&serial->serial_lock);
36223 if (serial->tty == tty) {
36224 serial->tty->driver_data = NULL;
fe2de317 36225@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
58c5fc13
MT
36226
36227 /* the actual setup */
36228 spin_lock_irqsave(&serial->serial_lock, flags);
36229- if (serial->open_count)
c52201e0 36230+ if (local_read(&serial->open_count))
58c5fc13
MT
36231 _hso_serial_set_termios(tty, old);
36232 else
36233 tty->termios = old;
fe2de317 36234@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
ae4e228f
MT
36235 D1("Pending read interrupt on port %d\n", i);
36236 spin_lock(&serial->serial_lock);
36237 if (serial->rx_state == RX_IDLE &&
36238- serial->open_count > 0) {
c52201e0 36239+ local_read(&serial->open_count) > 0) {
ae4e228f
MT
36240 /* Setup and send a ctrl req read on
36241 * port i */
36242 if (!serial->rx_urb_filled[0]) {
fe2de317 36243@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
58c5fc13
MT
36244 /* Start all serial ports */
36245 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
36246 if (serial_table[i] && (serial_table[i]->interface == iface)) {
36247- if (dev2ser(serial_table[i])->open_count) {
c52201e0 36248+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
58c5fc13
MT
36249 result =
36250 hso_start_serial_device(serial_table[i], GFP_NOIO);
36251 hso_kick_transmit(dev2ser(serial_table[i]));
fe2de317 36252diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
5e856224 36253index efc0111..79c8f5b 100644
fe2de317
MT
36254--- a/drivers/net/wireless/ath/ath.h
36255+++ b/drivers/net/wireless/ath/ath.h
4c928ab7 36256@@ -119,6 +119,7 @@ struct ath_ops {
fe2de317
MT
36257 void (*write_flush) (void *);
36258 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
36259 };
36260+typedef struct ath_ops __no_const ath_ops_no_const;
36261
36262 struct ath_common;
36263 struct ath_bus_ops;
36264diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
4c928ab7 36265index 8c5ce8b..abf101b 100644
fe2de317
MT
36266--- a/drivers/net/wireless/ath/ath5k/debug.c
36267+++ b/drivers/net/wireless/ath/ath5k/debug.c
4c928ab7
MT
36268@@ -343,6 +343,9 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
36269
36270 static ssize_t write_file_debug(struct file *file,
36271 const char __user *userbuf,
36272+ size_t count, loff_t *ppos) __size_overflow(3);
36273+static ssize_t write_file_debug(struct file *file,
36274+ const char __user *userbuf,
36275 size_t count, loff_t *ppos)
36276 {
36277 struct ath5k_hw *ah = file->private_data;
36278diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
5e856224 36279index 7b6417b..ab5db98 100644
4c928ab7
MT
36280--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
36281+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
36282@@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36283 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
36284 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
36285
36286- ACCESS_ONCE(ads->ds_link) = i->link;
36287- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
36288+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
36289+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
36290
36291 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
36292 ctl6 = SM(i->keytype, AR_EncrType);
36293@@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36294
36295 if ((i->is_first || i->is_last) &&
36296 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
36297- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
36298+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
36299 | set11nTries(i->rates, 1)
36300 | set11nTries(i->rates, 2)
36301 | set11nTries(i->rates, 3)
36302 | (i->dur_update ? AR_DurUpdateEna : 0)
36303 | SM(0, AR_BurstDur);
36304
36305- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
36306+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
36307 | set11nRate(i->rates, 1)
36308 | set11nRate(i->rates, 2)
36309 | set11nRate(i->rates, 3);
36310 } else {
36311- ACCESS_ONCE(ads->ds_ctl2) = 0;
36312- ACCESS_ONCE(ads->ds_ctl3) = 0;
36313+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
36314+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
36315 }
36316
36317 if (!i->is_first) {
36318- ACCESS_ONCE(ads->ds_ctl0) = 0;
36319- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
36320- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
36321+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
36322+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
36323+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
36324 return;
36325 }
66a7e928 36326
4c928ab7
MT
36327@@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36328 break;
36329 }
66a7e928 36330
4c928ab7
MT
36331- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
36332+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
36333 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
36334 | SM(i->txpower, AR_XmitPower)
36335 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
36336@@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36337 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
36338 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
66a7e928 36339
4c928ab7
MT
36340- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
36341- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
36342+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
36343+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
15a11c5b 36344
4c928ab7
MT
36345 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
36346 return;
15a11c5b 36347
4c928ab7
MT
36348- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
36349+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
36350 | set11nPktDurRTSCTS(i->rates, 1);
36351
36352- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
36353+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
36354 | set11nPktDurRTSCTS(i->rates, 3);
36355
36356- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
36357+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
36358 | set11nRateFlags(i->rates, 1)
36359 | set11nRateFlags(i->rates, 2)
36360 | set11nRateFlags(i->rates, 3)
36361diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
5e856224 36362index 09b8c9d..905339e 100644
4c928ab7
MT
36363--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
36364+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
36365@@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36366 (i->qcu << AR_TxQcuNum_S) | 0x17;
36367
36368 checksum += val;
36369- ACCESS_ONCE(ads->info) = val;
36370+ ACCESS_ONCE_RW(ads->info) = val;
36371
36372 checksum += i->link;
36373- ACCESS_ONCE(ads->link) = i->link;
36374+ ACCESS_ONCE_RW(ads->link) = i->link;
36375
36376 checksum += i->buf_addr[0];
36377- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
36378+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
36379 checksum += i->buf_addr[1];
36380- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
36381+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
36382 checksum += i->buf_addr[2];
36383- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
36384+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
36385 checksum += i->buf_addr[3];
36386- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
36387+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
36388
36389 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
36390- ACCESS_ONCE(ads->ctl3) = val;
36391+ ACCESS_ONCE_RW(ads->ctl3) = val;
36392 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
36393- ACCESS_ONCE(ads->ctl5) = val;
36394+ ACCESS_ONCE_RW(ads->ctl5) = val;
36395 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
36396- ACCESS_ONCE(ads->ctl7) = val;
36397+ ACCESS_ONCE_RW(ads->ctl7) = val;
36398 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
36399- ACCESS_ONCE(ads->ctl9) = val;
36400+ ACCESS_ONCE_RW(ads->ctl9) = val;
36401
36402 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
36403- ACCESS_ONCE(ads->ctl10) = checksum;
36404+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
36405
36406 if (i->is_first || i->is_last) {
36407- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
36408+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
36409 | set11nTries(i->rates, 1)
36410 | set11nTries(i->rates, 2)
36411 | set11nTries(i->rates, 3)
36412 | (i->dur_update ? AR_DurUpdateEna : 0)
36413 | SM(0, AR_BurstDur);
36414
36415- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
36416+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
36417 | set11nRate(i->rates, 1)
36418 | set11nRate(i->rates, 2)
36419 | set11nRate(i->rates, 3);
36420 } else {
36421- ACCESS_ONCE(ads->ctl13) = 0;
36422- ACCESS_ONCE(ads->ctl14) = 0;
36423+ ACCESS_ONCE_RW(ads->ctl13) = 0;
36424+ ACCESS_ONCE_RW(ads->ctl14) = 0;
36425 }
36426
36427 ads->ctl20 = 0;
36428@@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36429
36430 ctl17 = SM(i->keytype, AR_EncrType);
36431 if (!i->is_first) {
36432- ACCESS_ONCE(ads->ctl11) = 0;
36433- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
36434- ACCESS_ONCE(ads->ctl15) = 0;
36435- ACCESS_ONCE(ads->ctl16) = 0;
36436- ACCESS_ONCE(ads->ctl17) = ctl17;
36437- ACCESS_ONCE(ads->ctl18) = 0;
36438- ACCESS_ONCE(ads->ctl19) = 0;
36439+ ACCESS_ONCE_RW(ads->ctl11) = 0;
36440+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
36441+ ACCESS_ONCE_RW(ads->ctl15) = 0;
36442+ ACCESS_ONCE_RW(ads->ctl16) = 0;
36443+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
36444+ ACCESS_ONCE_RW(ads->ctl18) = 0;
36445+ ACCESS_ONCE_RW(ads->ctl19) = 0;
36446 return;
36447 }
66a7e928 36448
4c928ab7
MT
36449- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
36450+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
36451 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
36452 | SM(i->txpower, AR_XmitPower)
36453 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
36454@@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36455 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
36456 ctl12 |= SM(val, AR_PAPRDChainMask);
66a7e928 36457
4c928ab7
MT
36458- ACCESS_ONCE(ads->ctl12) = ctl12;
36459- ACCESS_ONCE(ads->ctl17) = ctl17;
36460+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
36461+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
15a11c5b 36462
4c928ab7
MT
36463- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
36464+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
36465 | set11nPktDurRTSCTS(i->rates, 1);
15a11c5b 36466
4c928ab7
MT
36467- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
36468+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
36469 | set11nPktDurRTSCTS(i->rates, 3);
66a7e928 36470
4c928ab7
MT
36471- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
36472+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
36473 | set11nRateFlags(i->rates, 1)
36474 | set11nRateFlags(i->rates, 2)
36475 | set11nRateFlags(i->rates, 3)
36476 | SM(i->rtscts_rate, AR_RTSCTSRate);
66a7e928 36477
4c928ab7
MT
36478- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
36479+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
36480 }
15a11c5b 36481
4c928ab7
MT
36482 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
36483diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
5e856224 36484index 68d972b..1d9205b 100644
4c928ab7
MT
36485--- a/drivers/net/wireless/ath/ath9k/debug.c
36486+++ b/drivers/net/wireless/ath/ath9k/debug.c
36487@@ -60,6 +60,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
36488 }
15a11c5b 36489
4c928ab7
MT
36490 static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
36491+ size_t count, loff_t *ppos) __size_overflow(3);
36492+static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
36493 size_t count, loff_t *ppos)
36494 {
36495 struct ath_softc *sc = file->private_data;
36496diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
36497index d3ff33c..c98bcda 100644
36498--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
36499+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
36500@@ -464,6 +464,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
36501 }
15a11c5b 36502
4c928ab7
MT
36503 static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
36504+ size_t count, loff_t *ppos) __size_overflow(3);
36505+static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
36506 size_t count, loff_t *ppos)
36507 {
36508 struct ath9k_htc_priv *priv = file->private_data;
fe2de317 36509diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
5e856224 36510index c8261d4..8d88929 100644
fe2de317
MT
36511--- a/drivers/net/wireless/ath/ath9k/hw.h
36512+++ b/drivers/net/wireless/ath/ath9k/hw.h
5e856224 36513@@ -773,7 +773,7 @@ struct ath_hw_private_ops {
15a11c5b
MT
36514
36515 /* ANI */
36516 void (*ani_cache_ini_regs)(struct ath_hw *ah);
36517-};
36518+} __no_const;
36519
36520 /**
36521 * struct ath_hw_ops - callbacks used by hardware code and driver code
5e856224 36522@@ -803,7 +803,7 @@ struct ath_hw_ops {
15a11c5b
MT
36523 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
36524 struct ath_hw_antcomb_conf *antconf);
36525
36526-};
36527+} __no_const;
36528
36529 struct ath_nf_limits {
36530 s16 max;
5e856224 36531@@ -823,7 +823,7 @@ enum ath_cal_list {
4c928ab7 36532 #define AH_FASTCC 0x4
15a11c5b
MT
36533
36534 struct ath_hw {
36535- struct ath_ops reg_ops;
36536+ ath_ops_no_const reg_ops;
36537
36538 struct ieee80211_hw *hw;
36539 struct ath_common common;
4c928ab7 36540diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
5e856224 36541index af00e2c..ab04d34 100644
4c928ab7
MT
36542--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
36543+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
5e856224 36544@@ -545,7 +545,7 @@ struct phy_func_ptr {
4c928ab7
MT
36545 void (*carrsuppr)(struct brcms_phy *);
36546 s32 (*rxsigpwr)(struct brcms_phy *, s32);
36547 void (*detach)(struct brcms_phy *);
36548-};
36549+} __no_const;
66a7e928 36550
4c928ab7
MT
36551 struct brcms_phy {
36552 struct brcms_phy_pub pubpi_ro;
5e856224
MT
36553diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
36554index a2ec369..36fdf14 100644
36555--- a/drivers/net/wireless/iwlegacy/3945-mac.c
36556+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
36557@@ -3646,7 +3646,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
15a11c5b 36558 */
5e856224
MT
36559 if (il3945_mod_params.disable_hw_scan) {
36560 D_INFO("Disabling hw_scan\n");
36561- il3945_hw_ops.hw_scan = NULL;
15a11c5b 36562+ pax_open_kernel();
5e856224 36563+ *(void **)&il3945_hw_ops.hw_scan = NULL;
15a11c5b 36564+ pax_close_kernel();
66a7e928 36565 }
66a7e928 36566
5e856224 36567 D_INFO("*** LOAD DRIVER ***\n");
fe2de317 36568diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
5e856224 36569index f8fc239..8cade22 100644
fe2de317
MT
36570--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
36571+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
5e856224 36572@@ -86,8 +86,8 @@ do { \
fe2de317
MT
36573 } while (0)
36574
36575 #else
4c928ab7
MT
36576-#define IWL_DEBUG(m, level, fmt, args...)
36577-#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
36578+#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
36579+#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
36580 #define iwl_print_hex_dump(m, level, p, len)
5e856224
MT
36581 #define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...) \
36582 do { \
fe2de317 36583diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
5e856224 36584index 4b9e730..7603659 100644
fe2de317
MT
36585--- a/drivers/net/wireless/mac80211_hwsim.c
36586+++ b/drivers/net/wireless/mac80211_hwsim.c
5e856224 36587@@ -1677,9 +1677,11 @@ static int __init init_mac80211_hwsim(void)
15a11c5b 36588 return -EINVAL;
66a7e928 36589
15a11c5b
MT
36590 if (fake_hw_scan) {
36591- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
36592- mac80211_hwsim_ops.sw_scan_start = NULL;
36593- mac80211_hwsim_ops.sw_scan_complete = NULL;
36594+ pax_open_kernel();
36595+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
36596+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
36597+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
36598+ pax_close_kernel();
36599 }
ae4e228f 36600
15a11c5b 36601 spin_lock_init(&hwsim_radio_lock);
fe2de317 36602diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
5e856224 36603index 3186aa4..b35b09f 100644
fe2de317
MT
36604--- a/drivers/net/wireless/mwifiex/main.h
36605+++ b/drivers/net/wireless/mwifiex/main.h
5e856224 36606@@ -536,7 +536,7 @@ struct mwifiex_if_ops {
6e9df6a3 36607 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
4c928ab7
MT
36608 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
36609 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
6e9df6a3
MT
36610-};
36611+} __no_const;
36612
36613 struct mwifiex_adapter {
4c928ab7 36614 u8 iface_type;
fe2de317 36615diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
5e856224 36616index a330c69..a81540f 100644
fe2de317
MT
36617--- a/drivers/net/wireless/rndis_wlan.c
36618+++ b/drivers/net/wireless/rndis_wlan.c
5e856224 36619@@ -1278,7 +1278,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
df50ba0c
MT
36620
36621 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
36622
36623- if (rts_threshold < 0 || rts_threshold > 2347)
36624+ if (rts_threshold > 2347)
36625 rts_threshold = 2347;
36626
36627 tmp = cpu_to_le32(rts_threshold);
fe2de317
MT
36628diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
36629index a77f1bb..c608b2b 100644
36630--- a/drivers/net/wireless/wl1251/wl1251.h
36631+++ b/drivers/net/wireless/wl1251/wl1251.h
15a11c5b
MT
36632@@ -266,7 +266,7 @@ struct wl1251_if_operations {
36633 void (*reset)(struct wl1251 *wl);
36634 void (*enable_irq)(struct wl1251 *wl);
36635 void (*disable_irq)(struct wl1251 *wl);
36636-};
36637+} __no_const;
36638
36639 struct wl1251 {
36640 struct ieee80211_hw *hw;
fe2de317
MT
36641diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
36642index f34b5b2..b5abb9f 100644
36643--- a/drivers/oprofile/buffer_sync.c
36644+++ b/drivers/oprofile/buffer_sync.c
36645@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
58c5fc13
MT
36646 if (cookie == NO_COOKIE)
36647 offset = pc;
36648 if (cookie == INVALID_COOKIE) {
36649- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
36650+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
36651 offset = pc;
36652 }
36653 if (cookie != last_cookie) {
fe2de317 36654@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
58c5fc13
MT
36655 /* add userspace sample */
36656
36657 if (!mm) {
36658- atomic_inc(&oprofile_stats.sample_lost_no_mm);
36659+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
36660 return 0;
36661 }
36662
36663 cookie = lookup_dcookie(mm, s->eip, &offset);
36664
36665 if (cookie == INVALID_COOKIE) {
36666- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
36667+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
36668 return 0;
36669 }
36670
15a11c5b 36671@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
58c5fc13
MT
36672 /* ignore backtraces if failed to add a sample */
36673 if (state == sb_bt_start) {
36674 state = sb_bt_ignore;
36675- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
36676+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
36677 }
36678 }
36679 release_mm(mm);
fe2de317 36680diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
4c928ab7 36681index c0cc4e7..44d4e54 100644
fe2de317
MT
36682--- a/drivers/oprofile/event_buffer.c
36683+++ b/drivers/oprofile/event_buffer.c
36684@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
ae4e228f
MT
36685 }
36686
58c5fc13
MT
36687 if (buffer_pos == buffer_size) {
36688- atomic_inc(&oprofile_stats.event_lost_overflow);
36689+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
36690 return;
36691 }
36692
fe2de317 36693diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
5e856224 36694index ed2c3ec..deda85a 100644
fe2de317
MT
36695--- a/drivers/oprofile/oprof.c
36696+++ b/drivers/oprofile/oprof.c
36697@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
ae4e228f
MT
36698 if (oprofile_ops.switch_events())
36699 return;
58c5fc13 36700
ae4e228f
MT
36701- atomic_inc(&oprofile_stats.multiplex_counter);
36702+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
36703 start_switch_worker();
36704 }
58c5fc13 36705
4c928ab7
MT
36706diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
36707index 84a208d..f07d177 100644
36708--- a/drivers/oprofile/oprofile_files.c
36709+++ b/drivers/oprofile/oprofile_files.c
36710@@ -36,6 +36,8 @@ static ssize_t timeout_read(struct file *file, char __user *buf,
36711
36712
36713 static ssize_t timeout_write(struct file *file, char const __user *buf,
36714+ size_t count, loff_t *offset) __size_overflow(3);
36715+static ssize_t timeout_write(struct file *file, char const __user *buf,
36716 size_t count, loff_t *offset)
36717 {
36718 unsigned long val;
36719@@ -72,6 +74,7 @@ static ssize_t depth_read(struct file *file, char __user *buf, size_t count, lof
36720 }
36721
36722
36723+static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
36724 static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
36725 {
36726 unsigned long val;
36727@@ -126,12 +129,14 @@ static const struct file_operations cpu_type_fops = {
36728 };
36729
36730
36731+static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset) __size_overflow(3);
36732 static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
36733 {
36734 return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
36735 }
36736
36737
36738+static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
36739 static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
36740 {
36741 unsigned long val;
fe2de317
MT
36742diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
36743index 917d28e..d62d981 100644
36744--- a/drivers/oprofile/oprofile_stats.c
36745+++ b/drivers/oprofile/oprofile_stats.c
ae4e228f 36746@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
58c5fc13
MT
36747 cpu_buf->sample_invalid_eip = 0;
36748 }
36749
36750- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
36751- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
36752- atomic_set(&oprofile_stats.event_lost_overflow, 0);
36753- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
ae4e228f 36754- atomic_set(&oprofile_stats.multiplex_counter, 0);
58c5fc13
MT
36755+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
36756+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
36757+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
36758+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
ae4e228f 36759+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
58c5fc13
MT
36760 }
36761
36762
fe2de317
MT
36763diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
36764index 38b6fc0..b5cbfce 100644
36765--- a/drivers/oprofile/oprofile_stats.h
36766+++ b/drivers/oprofile/oprofile_stats.h
ae4e228f 36767@@ -13,11 +13,11 @@
6e9df6a3 36768 #include <linux/atomic.h>
58c5fc13
MT
36769
36770 struct oprofile_stat_struct {
36771- atomic_t sample_lost_no_mm;
36772- atomic_t sample_lost_no_mapping;
36773- atomic_t bt_lost_no_mapping;
36774- atomic_t event_lost_overflow;
ae4e228f 36775- atomic_t multiplex_counter;
58c5fc13
MT
36776+ atomic_unchecked_t sample_lost_no_mm;
36777+ atomic_unchecked_t sample_lost_no_mapping;
36778+ atomic_unchecked_t bt_lost_no_mapping;
36779+ atomic_unchecked_t event_lost_overflow;
ae4e228f 36780+ atomic_unchecked_t multiplex_counter;
58c5fc13
MT
36781 };
36782
36783 extern struct oprofile_stat_struct oprofile_stats;
fe2de317 36784diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
4c928ab7 36785index 2f0aa0f..d5246c3 100644
fe2de317
MT
36786--- a/drivers/oprofile/oprofilefs.c
36787+++ b/drivers/oprofile/oprofilefs.c
4c928ab7
MT
36788@@ -97,6 +97,7 @@ static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count
36789 }
36790
36791
36792+static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
36793 static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
36794 {
36795 unsigned long value;
36796@@ -193,7 +194,7 @@ static const struct file_operations atomic_ro_fops = {
fe2de317
MT
36797
36798
36799 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
36800- char const *name, atomic_t *val)
36801+ char const *name, atomic_unchecked_t *val)
36802 {
36803 return __oprofilefs_create_file(sb, root, name,
36804 &atomic_ro_fops, 0444, val);
36805diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
36806index 3f56bc0..707d642 100644
36807--- a/drivers/parport/procfs.c
36808+++ b/drivers/parport/procfs.c
36809@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
ae4e228f
MT
36810
36811 *ppos += len;
36812
36813- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
bc901d79 36814+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
ae4e228f
MT
36815 }
36816
36817 #ifdef CONFIG_PARPORT_1284
fe2de317 36818@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
ae4e228f
MT
36819
36820 *ppos += len;
36821
36822- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
bc901d79 36823+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
ae4e228f
MT
36824 }
36825 #endif /* IEEE1284.3 support. */
36826
fe2de317
MT
36827diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
36828index 9fff878..ad0ad53 100644
36829--- a/drivers/pci/hotplug/cpci_hotplug.h
36830+++ b/drivers/pci/hotplug/cpci_hotplug.h
15a11c5b
MT
36831@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
36832 int (*hardware_test) (struct slot* slot, u32 value);
36833 u8 (*get_power) (struct slot* slot);
36834 int (*set_power) (struct slot* slot, int value);
36835-};
36836+} __no_const;
36837
36838 struct cpci_hp_controller {
36839 unsigned int irq;
fe2de317
MT
36840diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
36841index 76ba8a1..20ca857 100644
36842--- a/drivers/pci/hotplug/cpqphp_nvram.c
36843+++ b/drivers/pci/hotplug/cpqphp_nvram.c
36844@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
58c5fc13
MT
36845
36846 void compaq_nvram_init (void __iomem *rom_start)
36847 {
36848+
36849+#ifndef CONFIG_PAX_KERNEXEC
36850 if (rom_start) {
36851 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
36852 }
36853+#endif
36854+
36855 dbg("int15 entry = %p\n", compaq_int15_entry_point);
36856
36857 /* initialize our int15 lock */
fe2de317 36858diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
5e856224 36859index 2275162..95f1a92 100644
fe2de317
MT
36860--- a/drivers/pci/pcie/aspm.c
36861+++ b/drivers/pci/pcie/aspm.c
16454cff
MT
36862@@ -27,9 +27,9 @@
36863 #define MODULE_PARAM_PREFIX "pcie_aspm."
36864
36865 /* Note: those are not register definitions */
36866-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
36867-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
36868-#define ASPM_STATE_L1 (4) /* L1 state */
36869+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
36870+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
36871+#define ASPM_STATE_L1 (4U) /* L1 state */
36872 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
36873 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
36874
fe2de317 36875diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
5e856224 36876index 71eac9c..2de27ef 100644
fe2de317
MT
36877--- a/drivers/pci/probe.c
36878+++ b/drivers/pci/probe.c
36879@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
bc901d79
MT
36880 u32 l, sz, mask;
36881 u16 orig_cmd;
36882
36883- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
36884+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
36885
36886 if (!dev->mmio_always_on) {
36887 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
fe2de317
MT
36888diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
36889index 27911b5..5b6db88 100644
36890--- a/drivers/pci/proc.c
36891+++ b/drivers/pci/proc.c
36892@@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
58c5fc13
MT
36893 static int __init pci_proc_init(void)
36894 {
36895 struct pci_dev *dev = NULL;
36896+
36897+#ifdef CONFIG_GRKERNSEC_PROC_ADD
36898+#ifdef CONFIG_GRKERNSEC_PROC_USER
36899+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
36900+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
36901+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
36902+#endif
36903+#else
36904 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
36905+#endif
36906 proc_create("devices", 0, proc_bus_pci_dir,
36907 &proc_bus_pci_dev_operations);
36908 proc_initialized = 1;
4c928ab7 36909diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
5e856224 36910index 6f966d6..68e18ed 100644
4c928ab7
MT
36911--- a/drivers/platform/x86/asus_acpi.c
36912+++ b/drivers/platform/x86/asus_acpi.c
36913@@ -887,6 +887,8 @@ static int lcd_proc_open(struct inode *inode, struct file *file)
36914 }
36915
36916 static ssize_t lcd_proc_write(struct file *file, const char __user *buffer,
36917+ size_t count, loff_t *pos) __size_overflow(3);
36918+static ssize_t lcd_proc_write(struct file *file, const char __user *buffer,
36919 size_t count, loff_t *pos)
36920 {
36921 int rv, value;
fe2de317 36922diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
5e856224 36923index ea0c607..58c4628 100644
fe2de317
MT
36924--- a/drivers/platform/x86/thinkpad_acpi.c
36925+++ b/drivers/platform/x86/thinkpad_acpi.c
15a11c5b 36926@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
66a7e928
MT
36927 return 0;
36928 }
36929
15a11c5b
MT
36930-void static hotkey_mask_warn_incomplete_mask(void)
36931+static void hotkey_mask_warn_incomplete_mask(void)
66a7e928 36932 {
15a11c5b
MT
36933 /* log only what the user can fix... */
36934 const u32 wantedmask = hotkey_driver_mask &
fe2de317
MT
36935@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
36936 }
36937 }
36938
36939-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36940- struct tp_nvram_state *newn,
36941- const u32 event_mask)
36942-{
36943-
36944 #define TPACPI_COMPARE_KEY(__scancode, __member) \
36945 do { \
36946 if ((event_mask & (1 << __scancode)) && \
36947@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36948 tpacpi_hotkey_send_key(__scancode); \
36949 } while (0)
36950
36951- void issue_volchange(const unsigned int oldvol,
36952- const unsigned int newvol)
36953- {
36954- unsigned int i = oldvol;
36955+static void issue_volchange(const unsigned int oldvol,
36956+ const unsigned int newvol,
36957+ const u32 event_mask)
36958+{
36959+ unsigned int i = oldvol;
36960
36961- while (i > newvol) {
36962- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36963- i--;
36964- }
36965- while (i < newvol) {
36966- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36967- i++;
36968- }
36969+ while (i > newvol) {
36970+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36971+ i--;
36972 }
36973+ while (i < newvol) {
36974+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36975+ i++;
36976+ }
36977+}
36978
36979- void issue_brightnesschange(const unsigned int oldbrt,
36980- const unsigned int newbrt)
36981- {
36982- unsigned int i = oldbrt;
36983+static void issue_brightnesschange(const unsigned int oldbrt,
36984+ const unsigned int newbrt,
36985+ const u32 event_mask)
36986+{
36987+ unsigned int i = oldbrt;
36988
36989- while (i > newbrt) {
36990- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36991- i--;
36992- }
36993- while (i < newbrt) {
36994- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36995- i++;
36996- }
36997+ while (i > newbrt) {
36998+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36999+ i--;
37000+ }
37001+ while (i < newbrt) {
37002+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
37003+ i++;
37004 }
37005+}
37006
37007+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37008+ struct tp_nvram_state *newn,
37009+ const u32 event_mask)
37010+{
37011 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
37012 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
37013 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
37014@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37015 oldn->volume_level != newn->volume_level) {
37016 /* recently muted, or repeated mute keypress, or
37017 * multiple presses ending in mute */
37018- issue_volchange(oldn->volume_level, newn->volume_level);
37019+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
37020 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
37021 }
37022 } else {
37023@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37024 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
37025 }
37026 if (oldn->volume_level != newn->volume_level) {
37027- issue_volchange(oldn->volume_level, newn->volume_level);
37028+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
37029 } else if (oldn->volume_toggle != newn->volume_toggle) {
37030 /* repeated vol up/down keypress at end of scale ? */
37031 if (newn->volume_level == 0)
37032@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37033 /* handle brightness */
37034 if (oldn->brightness_level != newn->brightness_level) {
37035 issue_brightnesschange(oldn->brightness_level,
37036- newn->brightness_level);
37037+ newn->brightness_level,
37038+ event_mask);
37039 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
37040 /* repeated key presses that didn't change state */
37041 if (newn->brightness_level == 0)
37042@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37043 && !tp_features.bright_unkfw)
37044 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
37045 }
37046+}
37047
37048 #undef TPACPI_COMPARE_KEY
37049 #undef TPACPI_MAY_SEND_KEY
37050-}
37051
37052 /*
37053 * Polling driver
4c928ab7
MT
37054diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
37055index dcdc1f4..85cee16 100644
37056--- a/drivers/platform/x86/toshiba_acpi.c
37057+++ b/drivers/platform/x86/toshiba_acpi.c
37058@@ -517,6 +517,8 @@ static int set_lcd_status(struct backlight_device *bd)
37059 }
37060
37061 static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
37062+ size_t count, loff_t *pos) __size_overflow(3);
37063+static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
37064 size_t count, loff_t *pos)
37065 {
37066 struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data;
fe2de317
MT
37067diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
37068index b859d16..5cc6b1a 100644
37069--- a/drivers/pnp/pnpbios/bioscalls.c
37070+++ b/drivers/pnp/pnpbios/bioscalls.c
df50ba0c 37071@@ -59,7 +59,7 @@ do { \
ae4e228f 37072 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
58c5fc13
MT
37073 } while(0)
37074
ae4e228f
MT
37075-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
37076+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
37077 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
58c5fc13
MT
37078
37079 /*
fe2de317 37080@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
58c5fc13
MT
37081
37082 cpu = get_cpu();
37083 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
37084+
ae4e228f 37085+ pax_open_kernel();
58c5fc13 37086 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
ae4e228f 37087+ pax_close_kernel();
58c5fc13 37088
58c5fc13
MT
37089 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
37090 spin_lock_irqsave(&pnp_bios_lock, flags);
fe2de317 37091@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
58c5fc13
MT
37092 :"memory");
37093 spin_unlock_irqrestore(&pnp_bios_lock, flags);
37094
ae4e228f 37095+ pax_open_kernel();
58c5fc13 37096 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
ae4e228f 37097+ pax_close_kernel();
58c5fc13
MT
37098+
37099 put_cpu();
37100
37101 /* If we get here and this is set then the PnP BIOS faulted on us. */
fe2de317 37102@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
58c5fc13
MT
37103 return status;
37104 }
37105
37106-void pnpbios_calls_init(union pnp_bios_install_struct *header)
37107+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
37108 {
37109 int i;
37110
fe2de317 37111@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
58c5fc13
MT
37112 pnp_bios_callpoint.offset = header->fields.pm16offset;
37113 pnp_bios_callpoint.segment = PNP_CS16;
37114
ae4e228f 37115+ pax_open_kernel();
58c5fc13 37116+
ae4e228f
MT
37117 for_each_possible_cpu(i) {
37118 struct desc_struct *gdt = get_cpu_gdt_table(i);
37119 if (!gdt)
fe2de317 37120@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
ae4e228f
MT
37121 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
37122 (unsigned long)__va(header->fields.pm16dseg));
58c5fc13
MT
37123 }
37124+
ae4e228f 37125+ pax_close_kernel();
58c5fc13 37126 }
fe2de317
MT
37127diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
37128index b0ecacb..7c9da2e 100644
37129--- a/drivers/pnp/resource.c
37130+++ b/drivers/pnp/resource.c
37131@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
58c5fc13
MT
37132 return 1;
37133
37134 /* check if the resource is valid */
37135- if (*irq < 0 || *irq > 15)
37136+ if (*irq > 15)
37137 return 0;
37138
37139 /* check if the resource is reserved */
fe2de317 37140@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
58c5fc13
MT
37141 return 1;
37142
37143 /* check if the resource is valid */
37144- if (*dma < 0 || *dma == 4 || *dma > 7)
37145+ if (*dma == 4 || *dma > 7)
37146 return 0;
37147
37148 /* check if the resource is reserved */
fe2de317 37149diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
5e856224 37150index 1ed6ea0..77c0bd2 100644
fe2de317
MT
37151--- a/drivers/power/bq27x00_battery.c
37152+++ b/drivers/power/bq27x00_battery.c
5e856224 37153@@ -72,7 +72,7 @@
15a11c5b
MT
37154 struct bq27x00_device_info;
37155 struct bq27x00_access_methods {
37156 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
37157-};
37158+} __no_const;
37159
37160 enum bq27x00_chip { BQ27000, BQ27500 };
37161
fe2de317 37162diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
5e856224 37163index a838e66..a9e1665 100644
fe2de317
MT
37164--- a/drivers/regulator/max8660.c
37165+++ b/drivers/regulator/max8660.c
37166@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
15a11c5b
MT
37167 max8660->shadow_regs[MAX8660_OVER1] = 5;
37168 } else {
37169 /* Otherwise devices can be toggled via software */
37170- max8660_dcdc_ops.enable = max8660_dcdc_enable;
37171- max8660_dcdc_ops.disable = max8660_dcdc_disable;
37172+ pax_open_kernel();
37173+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
37174+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
37175+ pax_close_kernel();
37176 }
66a7e928 37177
15a11c5b 37178 /*
fe2de317 37179diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
5e856224 37180index e8cfc99..072aee2 100644
fe2de317
MT
37181--- a/drivers/regulator/mc13892-regulator.c
37182+++ b/drivers/regulator/mc13892-regulator.c
5e856224 37183@@ -574,10 +574,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
15a11c5b
MT
37184 }
37185 mc13xxx_unlock(mc13892);
66a7e928 37186
15a11c5b
MT
37187- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
37188+ pax_open_kernel();
37189+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
37190 = mc13892_vcam_set_mode;
37191- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
37192+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
37193 = mc13892_vcam_get_mode;
37194+ pax_close_kernel();
5e856224
MT
37195
37196 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
37197 ARRAY_SIZE(mc13892_regulators));
fe2de317
MT
37198diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
37199index cace6d3..f623fda 100644
37200--- a/drivers/rtc/rtc-dev.c
37201+++ b/drivers/rtc/rtc-dev.c
bc901d79
MT
37202@@ -14,6 +14,7 @@
37203 #include <linux/module.h>
37204 #include <linux/rtc.h>
37205 #include <linux/sched.h>
37206+#include <linux/grsecurity.h>
37207 #include "rtc-core.h"
37208
37209 static dev_t rtc_devt;
fe2de317 37210@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
bc901d79
MT
37211 if (copy_from_user(&tm, uarg, sizeof(tm)))
37212 return -EFAULT;
37213
37214+ gr_log_timechange();
37215+
37216 return rtc_set_time(rtc, &tm);
37217
37218 case RTC_PIE_ON:
fe2de317
MT
37219diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
37220index ffb5878..e6d785c 100644
37221--- a/drivers/scsi/aacraid/aacraid.h
37222+++ b/drivers/scsi/aacraid/aacraid.h
15a11c5b
MT
37223@@ -492,7 +492,7 @@ struct adapter_ops
37224 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
37225 /* Administrative operations */
37226 int (*adapter_comm)(struct aac_dev * dev, int comm);
37227-};
37228+} __no_const;
66a7e928
MT
37229
37230 /*
15a11c5b 37231 * Define which interrupt handler needs to be installed
fe2de317 37232diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
4c928ab7 37233index 705e13e..91c873c 100644
fe2de317
MT
37234--- a/drivers/scsi/aacraid/linit.c
37235+++ b/drivers/scsi/aacraid/linit.c
37236@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
6e9df6a3
MT
37237 #elif defined(__devinitconst)
37238 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
37239 #else
37240-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
37241+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
37242 #endif
37243 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
37244 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
fe2de317
MT
37245diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
37246index d5ff142..49c0ebb 100644
37247--- a/drivers/scsi/aic94xx/aic94xx_init.c
37248+++ b/drivers/scsi/aic94xx/aic94xx_init.c
37249@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
6e9df6a3
MT
37250 .lldd_control_phy = asd_control_phy,
37251 };
37252
37253-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
37254+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
37255 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
37256 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
37257 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
fe2de317
MT
37258diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
37259index a796de9..1ef20e1 100644
37260--- a/drivers/scsi/bfa/bfa.h
37261+++ b/drivers/scsi/bfa/bfa.h
37262@@ -196,7 +196,7 @@ struct bfa_hwif_s {
37263 u32 *end);
37264 int cpe_vec_q0;
37265 int rme_vec_q0;
37266-};
37267+} __no_const;
37268 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
66a7e928 37269
fe2de317
MT
37270 struct bfa_faa_cbfn_s {
37271diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
5e856224 37272index f0f80e2..8ec946b 100644
fe2de317
MT
37273--- a/drivers/scsi/bfa/bfa_fcpim.c
37274+++ b/drivers/scsi/bfa/bfa_fcpim.c
5e856224 37275@@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4c928ab7
MT
37276
37277 bfa_iotag_attach(fcp);
37278
37279- fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
37280+ fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
37281 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
37282 (fcp->num_itns * sizeof(struct bfa_itn_s));
37283 memset(fcp->itn_arr, 0,
5e856224 37284@@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
6e9df6a3
MT
37285 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
37286 {
37287 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
37288- struct bfa_itn_s *itn;
37289+ bfa_itn_s_no_const *itn;
37290
37291 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
37292 itn->isr = isr;
fe2de317 37293diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
5e856224 37294index 36f26da..38a34a8 100644
fe2de317
MT
37295--- a/drivers/scsi/bfa/bfa_fcpim.h
37296+++ b/drivers/scsi/bfa/bfa_fcpim.h
6e9df6a3
MT
37297@@ -37,6 +37,7 @@ struct bfa_iotag_s {
37298 struct bfa_itn_s {
37299 bfa_isr_func_t isr;
37300 };
37301+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
37302
37303 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
37304 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
5e856224 37305@@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
6e9df6a3
MT
37306 struct list_head iotag_tio_free_q; /* free IO resources */
37307 struct list_head iotag_unused_q; /* unused IO resources*/
37308 struct bfa_iotag_s *iotag_arr;
37309- struct bfa_itn_s *itn_arr;
37310+ bfa_itn_s_no_const *itn_arr;
37311 int num_ioim_reqs;
37312 int num_fwtio_reqs;
37313 int num_itns;
fe2de317
MT
37314diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
37315index 546d46b..642fa5b 100644
37316--- a/drivers/scsi/bfa/bfa_ioc.h
37317+++ b/drivers/scsi/bfa/bfa_ioc.h
6e9df6a3 37318@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
15a11c5b
MT
37319 bfa_ioc_disable_cbfn_t disable_cbfn;
37320 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
37321 bfa_ioc_reset_cbfn_t reset_cbfn;
37322-};
37323+} __no_const;
8308f9c9 37324
15a11c5b 37325 /*
6e9df6a3
MT
37326 * IOC event notification mechanism.
37327@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
15a11c5b
MT
37328 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
37329 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
6e9df6a3 37330 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
15a11c5b
MT
37331-};
37332+} __no_const;
37333
6e9df6a3
MT
37334 /*
37335 * Queue element to wait for room in request queue. FIFO order is
fe2de317
MT
37336diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
37337index 351dc0b..951dc32 100644
37338--- a/drivers/scsi/hosts.c
37339+++ b/drivers/scsi/hosts.c
66a7e928
MT
37340@@ -42,7 +42,7 @@
37341 #include "scsi_logging.h"
37342
37343
37344-static atomic_t scsi_host_next_hn; /* host_no for next new host */
37345+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
37346
37347
37348 static void scsi_host_cls_release(struct device *dev)
fe2de317 37349@@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
66a7e928
MT
37350 * subtract one because we increment first then return, but we need to
37351 * know what the next host number was before increment
37352 */
37353- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
37354+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
37355 shost->dma_channel = 0xff;
37356
37357 /* These three are default values which can be overridden */
fe2de317 37358diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
5e856224 37359index b96962c..0c82ec2 100644
fe2de317
MT
37360--- a/drivers/scsi/hpsa.c
37361+++ b/drivers/scsi/hpsa.c
5e856224 37362@@ -507,7 +507,7 @@ static inline u32 next_command(struct ctlr_info *h)
15a11c5b
MT
37363 u32 a;
37364
37365 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
37366- return h->access.command_completed(h);
37367+ return h->access->command_completed(h);
37368
37369 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
37370 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
5e856224 37371@@ -2991,7 +2991,7 @@ static void start_io(struct ctlr_info *h)
15a11c5b
MT
37372 while (!list_empty(&h->reqQ)) {
37373 c = list_entry(h->reqQ.next, struct CommandList, list);
37374 /* can't do anything if fifo is full */
37375- if ((h->access.fifo_full(h))) {
37376+ if ((h->access->fifo_full(h))) {
37377 dev_warn(&h->pdev->dev, "fifo full\n");
37378 break;
37379 }
5e856224 37380@@ -3001,7 +3001,7 @@ static void start_io(struct ctlr_info *h)
15a11c5b 37381 h->Qdepth--;
66a7e928 37382
15a11c5b
MT
37383 /* Tell the controller execute command */
37384- h->access.submit_command(h, c);
37385+ h->access->submit_command(h, c);
66a7e928 37386
15a11c5b
MT
37387 /* Put job onto the completed Q */
37388 addQ(&h->cmpQ, c);
5e856224 37389@@ -3010,17 +3010,17 @@ static void start_io(struct ctlr_info *h)
66a7e928 37390
15a11c5b
MT
37391 static inline unsigned long get_next_completion(struct ctlr_info *h)
37392 {
37393- return h->access.command_completed(h);
37394+ return h->access->command_completed(h);
37395 }
66a7e928 37396
15a11c5b
MT
37397 static inline bool interrupt_pending(struct ctlr_info *h)
37398 {
37399- return h->access.intr_pending(h);
37400+ return h->access->intr_pending(h);
37401 }
66a7e928 37402
15a11c5b
MT
37403 static inline long interrupt_not_for_us(struct ctlr_info *h)
37404 {
37405- return (h->access.intr_pending(h) == 0) ||
37406+ return (h->access->intr_pending(h) == 0) ||
37407 (h->interrupts_enabled == 0);
37408 }
66a7e928 37409
5e856224 37410@@ -3919,7 +3919,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
15a11c5b
MT
37411 if (prod_index < 0)
37412 return -ENODEV;
37413 h->product_name = products[prod_index].product_name;
37414- h->access = *(products[prod_index].access);
37415+ h->access = products[prod_index].access;
66a7e928 37416
15a11c5b
MT
37417 if (hpsa_board_disabled(h->pdev)) {
37418 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
5e856224 37419@@ -4164,7 +4164,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
4c928ab7
MT
37420
37421 assert_spin_locked(&lockup_detector_lock);
37422 remove_ctlr_from_lockup_detector_list(h);
37423- h->access.set_intr_mask(h, HPSA_INTR_OFF);
37424+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
37425 spin_lock_irqsave(&h->lock, flags);
37426 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
37427 spin_unlock_irqrestore(&h->lock, flags);
5e856224 37428@@ -4344,7 +4344,7 @@ reinit_after_soft_reset:
66a7e928 37429 }
66a7e928 37430
15a11c5b
MT
37431 /* make sure the board interrupts are off */
37432- h->access.set_intr_mask(h, HPSA_INTR_OFF);
37433+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
66a7e928 37434
15a11c5b
MT
37435 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
37436 goto clean2;
5e856224 37437@@ -4378,7 +4378,7 @@ reinit_after_soft_reset:
15a11c5b
MT
37438 * fake ones to scoop up any residual completions.
37439 */
37440 spin_lock_irqsave(&h->lock, flags);
37441- h->access.set_intr_mask(h, HPSA_INTR_OFF);
37442+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
37443 spin_unlock_irqrestore(&h->lock, flags);
37444 free_irq(h->intr[h->intr_mode], h);
37445 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
5e856224 37446@@ -4397,9 +4397,9 @@ reinit_after_soft_reset:
15a11c5b
MT
37447 dev_info(&h->pdev->dev, "Board READY.\n");
37448 dev_info(&h->pdev->dev,
37449 "Waiting for stale completions to drain.\n");
37450- h->access.set_intr_mask(h, HPSA_INTR_ON);
37451+ h->access->set_intr_mask(h, HPSA_INTR_ON);
37452 msleep(10000);
37453- h->access.set_intr_mask(h, HPSA_INTR_OFF);
37454+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
37455
37456 rc = controller_reset_failed(h->cfgtable);
37457 if (rc)
5e856224 37458@@ -4420,7 +4420,7 @@ reinit_after_soft_reset:
15a11c5b
MT
37459 }
37460
37461 /* Turn the interrupts on so we can service requests */
37462- h->access.set_intr_mask(h, HPSA_INTR_ON);
37463+ h->access->set_intr_mask(h, HPSA_INTR_ON);
37464
37465 hpsa_hba_inquiry(h);
37466 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
5e856224 37467@@ -4472,7 +4472,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
15a11c5b
MT
37468 * To write all data in the battery backed cache to disks
37469 */
37470 hpsa_flush_cache(h);
37471- h->access.set_intr_mask(h, HPSA_INTR_OFF);
37472+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
37473 free_irq(h->intr[h->intr_mode], h);
37474 #ifdef CONFIG_PCI_MSI
37475 if (h->msix_vector)
5e856224 37476@@ -4636,7 +4636,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
15a11c5b
MT
37477 return;
37478 }
37479 /* Change the access methods to the performant access methods */
37480- h->access = SA5_performant_access;
37481+ h->access = &SA5_performant_access;
37482 h->transMethod = CFGTBL_Trans_Performant;
37483 }
37484
fe2de317 37485diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
4c928ab7 37486index 91edafb..a9b88ec 100644
fe2de317
MT
37487--- a/drivers/scsi/hpsa.h
37488+++ b/drivers/scsi/hpsa.h
15a11c5b
MT
37489@@ -73,7 +73,7 @@ struct ctlr_info {
37490 unsigned int msix_vector;
37491 unsigned int msi_vector;
37492 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
37493- struct access_method access;
37494+ struct access_method *access;
37495
37496 /* queue and queue Info */
37497 struct list_head reqQ;
fe2de317
MT
37498diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
37499index f2df059..a3a9930 100644
37500--- a/drivers/scsi/ips.h
37501+++ b/drivers/scsi/ips.h
15a11c5b
MT
37502@@ -1027,7 +1027,7 @@ typedef struct {
37503 int (*intr)(struct ips_ha *);
37504 void (*enableint)(struct ips_ha *);
37505 uint32_t (*statupd)(struct ips_ha *);
37506-} ips_hw_func_t;
37507+} __no_const ips_hw_func_t;
37508
37509 typedef struct ips_ha {
37510 uint8_t ha_id[IPS_MAX_CHANNELS+1];
fe2de317 37511diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
5e856224 37512index 4d70d96..84d0573 100644
fe2de317
MT
37513--- a/drivers/scsi/libfc/fc_exch.c
37514+++ b/drivers/scsi/libfc/fc_exch.c
16454cff 37515@@ -105,12 +105,12 @@ struct fc_exch_mgr {
58c5fc13
MT
37516 * all together if not used XXX
37517 */
37518 struct {
37519- atomic_t no_free_exch;
37520- atomic_t no_free_exch_xid;
37521- atomic_t xid_not_found;
37522- atomic_t xid_busy;
37523- atomic_t seq_not_found;
37524- atomic_t non_bls_resp;
37525+ atomic_unchecked_t no_free_exch;
37526+ atomic_unchecked_t no_free_exch_xid;
37527+ atomic_unchecked_t xid_not_found;
37528+ atomic_unchecked_t xid_busy;
37529+ atomic_unchecked_t seq_not_found;
37530+ atomic_unchecked_t non_bls_resp;
37531 } stats;
58c5fc13 37532 };
16454cff 37533
4c928ab7 37534@@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
58c5fc13
MT
37535 /* allocate memory for exchange */
37536 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
37537 if (!ep) {
37538- atomic_inc(&mp->stats.no_free_exch);
37539+ atomic_inc_unchecked(&mp->stats.no_free_exch);
37540 goto out;
37541 }
37542 memset(ep, 0, sizeof(*ep));
4c928ab7 37543@@ -780,7 +780,7 @@ out:
58c5fc13
MT
37544 return ep;
37545 err:
ae4e228f 37546 spin_unlock_bh(&pool->lock);
58c5fc13
MT
37547- atomic_inc(&mp->stats.no_free_exch_xid);
37548+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
37549 mempool_free(ep, mp->ep_pool);
37550 return NULL;
37551 }
4c928ab7 37552@@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
58c5fc13
MT
37553 xid = ntohs(fh->fh_ox_id); /* we originated exch */
37554 ep = fc_exch_find(mp, xid);
37555 if (!ep) {
37556- atomic_inc(&mp->stats.xid_not_found);
37557+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37558 reject = FC_RJT_OX_ID;
37559 goto out;
37560 }
4c928ab7 37561@@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
58c5fc13
MT
37562 ep = fc_exch_find(mp, xid);
37563 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
37564 if (ep) {
37565- atomic_inc(&mp->stats.xid_busy);
37566+ atomic_inc_unchecked(&mp->stats.xid_busy);
37567 reject = FC_RJT_RX_ID;
37568 goto rel;
37569 }
4c928ab7 37570@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
58c5fc13
MT
37571 }
37572 xid = ep->xid; /* get our XID */
37573 } else if (!ep) {
37574- atomic_inc(&mp->stats.xid_not_found);
37575+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37576 reject = FC_RJT_RX_ID; /* XID not found */
37577 goto out;
37578 }
4c928ab7 37579@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
58c5fc13
MT
37580 } else {
37581 sp = &ep->seq;
37582 if (sp->id != fh->fh_seq_id) {
37583- atomic_inc(&mp->stats.seq_not_found);
37584+ atomic_inc_unchecked(&mp->stats.seq_not_found);
6e9df6a3
MT
37585 if (f_ctl & FC_FC_END_SEQ) {
37586 /*
37587 * Update sequence_id based on incoming last
4c928ab7 37588@@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
58c5fc13
MT
37589
37590 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
37591 if (!ep) {
37592- atomic_inc(&mp->stats.xid_not_found);
37593+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37594 goto out;
37595 }
37596 if (ep->esb_stat & ESB_ST_COMPLETE) {
37597- atomic_inc(&mp->stats.xid_not_found);
37598+ atomic_inc_unchecked(&mp->stats.xid_not_found);
16454cff 37599 goto rel;
58c5fc13
MT
37600 }
37601 if (ep->rxid == FC_XID_UNKNOWN)
37602 ep->rxid = ntohs(fh->fh_rx_id);
37603 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
37604- atomic_inc(&mp->stats.xid_not_found);
37605+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37606 goto rel;
37607 }
37608 if (ep->did != ntoh24(fh->fh_s_id) &&
37609 ep->did != FC_FID_FLOGI) {
37610- atomic_inc(&mp->stats.xid_not_found);
37611+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37612 goto rel;
37613 }
37614 sof = fr_sof(fp);
4c928ab7 37615@@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
57199397
MT
37616 sp->ssb_stat |= SSB_ST_RESP;
37617 sp->id = fh->fh_seq_id;
37618 } else if (sp->id != fh->fh_seq_id) {
37619- atomic_inc(&mp->stats.seq_not_found);
37620+ atomic_inc_unchecked(&mp->stats.seq_not_found);
37621 goto rel;
58c5fc13 37622 }
57199397 37623
4c928ab7 37624@@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
58c5fc13 37625 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
ae4e228f
MT
37626
37627 if (!sp)
58c5fc13
MT
37628- atomic_inc(&mp->stats.xid_not_found);
37629+ atomic_inc_unchecked(&mp->stats.xid_not_found);
ae4e228f 37630 else
58c5fc13
MT
37631- atomic_inc(&mp->stats.non_bls_resp);
37632+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
ae4e228f 37633
58c5fc13 37634 fc_frame_free(fp);
ae4e228f 37635 }
fe2de317
MT
37636diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
37637index db9238f..4378ed2 100644
37638--- a/drivers/scsi/libsas/sas_ata.c
37639+++ b/drivers/scsi/libsas/sas_ata.c
37640@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
66a7e928
MT
37641 .postreset = ata_std_postreset,
37642 .error_handler = ata_std_error_handler,
ae4e228f 37643 .post_internal_cmd = sas_ata_post_internal,
66a7e928
MT
37644- .qc_defer = ata_std_qc_defer,
37645+ .qc_defer = ata_std_qc_defer,
37646 .qc_prep = ata_noop_qc_prep,
37647 .qc_issue = sas_ata_qc_issue,
37648 .qc_fill_rtf = sas_ata_qc_fill_rtf,
fe2de317 37649diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
5e856224 37650index 825f930..ce42672 100644
fe2de317
MT
37651--- a/drivers/scsi/lpfc/lpfc.h
37652+++ b/drivers/scsi/lpfc/lpfc.h
5e856224 37653@@ -413,7 +413,7 @@ struct lpfc_vport {
fe2de317
MT
37654 struct dentry *debug_nodelist;
37655 struct dentry *vport_debugfs_root;
37656 struct lpfc_debugfs_trc *disc_trc;
37657- atomic_t disc_trc_cnt;
37658+ atomic_unchecked_t disc_trc_cnt;
37659 #endif
37660 uint8_t stat_data_enabled;
37661 uint8_t stat_data_blocked;
5e856224 37662@@ -821,8 +821,8 @@ struct lpfc_hba {
fe2de317
MT
37663 struct timer_list fabric_block_timer;
37664 unsigned long bit_flags;
37665 #define FABRIC_COMANDS_BLOCKED 0
37666- atomic_t num_rsrc_err;
37667- atomic_t num_cmd_success;
37668+ atomic_unchecked_t num_rsrc_err;
37669+ atomic_unchecked_t num_cmd_success;
37670 unsigned long last_rsrc_error_time;
37671 unsigned long last_ramp_down_time;
37672 unsigned long last_ramp_up_time;
5e856224 37673@@ -852,7 +852,7 @@ struct lpfc_hba {
4c928ab7 37674
fe2de317
MT
37675 struct dentry *debug_slow_ring_trc;
37676 struct lpfc_debugfs_trc *slow_ring_trc;
37677- atomic_t slow_ring_trc_cnt;
37678+ atomic_unchecked_t slow_ring_trc_cnt;
37679 /* iDiag debugfs sub-directory */
37680 struct dentry *idiag_root;
37681 struct dentry *idiag_pci_cfg;
37682diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
5e856224 37683index 3587a3f..d45b81b 100644
fe2de317
MT
37684--- a/drivers/scsi/lpfc/lpfc_debugfs.c
37685+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
4c928ab7 37686@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
66a7e928
MT
37687
37688 #include <linux/debugfs.h>
8308f9c9
MT
37689
37690-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
37691+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
37692 static unsigned long lpfc_debugfs_start_time = 0L;
37693
66a7e928 37694 /* iDiag */
4c928ab7 37695@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
8308f9c9
MT
37696 lpfc_debugfs_enable = 0;
37697
37698 len = 0;
37699- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
37700+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
37701 (lpfc_debugfs_max_disc_trc - 1);
37702 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
37703 dtp = vport->disc_trc + i;
4c928ab7 37704@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
8308f9c9
MT
37705 lpfc_debugfs_enable = 0;
37706
37707 len = 0;
37708- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
37709+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
37710 (lpfc_debugfs_max_slow_ring_trc - 1);
37711 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
37712 dtp = phba->slow_ring_trc + i;
4c928ab7 37713@@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
8308f9c9
MT
37714 !vport || !vport->disc_trc)
37715 return;
37716
37717- index = atomic_inc_return(&vport->disc_trc_cnt) &
37718+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
37719 (lpfc_debugfs_max_disc_trc - 1);
37720 dtp = vport->disc_trc + index;
37721 dtp->fmt = fmt;
37722 dtp->data1 = data1;
37723 dtp->data2 = data2;
37724 dtp->data3 = data3;
37725- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
37726+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
37727 dtp->jif = jiffies;
37728 #endif
37729 return;
4c928ab7 37730@@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
8308f9c9
MT
37731 !phba || !phba->slow_ring_trc)
37732 return;
37733
37734- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
37735+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
37736 (lpfc_debugfs_max_slow_ring_trc - 1);
37737 dtp = phba->slow_ring_trc + index;
37738 dtp->fmt = fmt;
37739 dtp->data1 = data1;
37740 dtp->data2 = data2;
37741 dtp->data3 = data3;
37742- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
37743+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
37744 dtp->jif = jiffies;
37745 #endif
37746 return;
5e856224 37747@@ -4040,7 +4040,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
8308f9c9
MT
37748 "slow_ring buffer\n");
37749 goto debug_failed;
37750 }
37751- atomic_set(&phba->slow_ring_trc_cnt, 0);
37752+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
37753 memset(phba->slow_ring_trc, 0,
37754 (sizeof(struct lpfc_debugfs_trc) *
37755 lpfc_debugfs_max_slow_ring_trc));
5e856224 37756@@ -4086,7 +4086,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
8308f9c9
MT
37757 "buffer\n");
37758 goto debug_failed;
37759 }
37760- atomic_set(&vport->disc_trc_cnt, 0);
37761+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
37762
37763 snprintf(name, sizeof(name), "discovery_trace");
37764 vport->debug_disc_trc =
fe2de317 37765diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
5e856224 37766index dfea2da..8e17227 100644
fe2de317
MT
37767--- a/drivers/scsi/lpfc/lpfc_init.c
37768+++ b/drivers/scsi/lpfc/lpfc_init.c
5e856224 37769@@ -10145,8 +10145,10 @@ lpfc_init(void)
15a11c5b
MT
37770 printk(LPFC_COPYRIGHT "\n");
37771
37772 if (lpfc_enable_npiv) {
37773- lpfc_transport_functions.vport_create = lpfc_vport_create;
37774- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
37775+ pax_open_kernel();
37776+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
37777+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
37778+ pax_close_kernel();
37779 }
37780 lpfc_transport_template =
37781 fc_attach_transport(&lpfc_transport_functions);
fe2de317 37782diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
5e856224 37783index c60f5d0..751535c 100644
fe2de317
MT
37784--- a/drivers/scsi/lpfc/lpfc_scsi.c
37785+++ b/drivers/scsi/lpfc/lpfc_scsi.c
4c928ab7 37786@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
8308f9c9
MT
37787 uint32_t evt_posted;
37788
37789 spin_lock_irqsave(&phba->hbalock, flags);
37790- atomic_inc(&phba->num_rsrc_err);
37791+ atomic_inc_unchecked(&phba->num_rsrc_err);
37792 phba->last_rsrc_error_time = jiffies;
37793
37794 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
4c928ab7 37795@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
8308f9c9
MT
37796 unsigned long flags;
37797 struct lpfc_hba *phba = vport->phba;
37798 uint32_t evt_posted;
37799- atomic_inc(&phba->num_cmd_success);
37800+ atomic_inc_unchecked(&phba->num_cmd_success);
37801
37802 if (vport->cfg_lun_queue_depth <= queue_depth)
37803 return;
4c928ab7 37804@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
8308f9c9
MT
37805 unsigned long num_rsrc_err, num_cmd_success;
37806 int i;
37807
37808- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
37809- num_cmd_success = atomic_read(&phba->num_cmd_success);
37810+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
37811+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
37812
37813 vports = lpfc_create_vport_work_array(phba);
37814 if (vports != NULL)
4c928ab7 37815@@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
8308f9c9
MT
37816 }
37817 }
37818 lpfc_destroy_vport_work_array(phba, vports);
37819- atomic_set(&phba->num_rsrc_err, 0);
37820- atomic_set(&phba->num_cmd_success, 0);
37821+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
37822+ atomic_set_unchecked(&phba->num_cmd_success, 0);
37823 }
37824
37825 /**
4c928ab7 37826@@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
8308f9c9
MT
37827 }
37828 }
37829 lpfc_destroy_vport_work_array(phba, vports);
37830- atomic_set(&phba->num_rsrc_err, 0);
37831- atomic_set(&phba->num_cmd_success, 0);
37832+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
37833+ atomic_set_unchecked(&phba->num_cmd_success, 0);
37834 }
37835
37836 /**
fe2de317 37837diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
5e856224 37838index ea8a0b4..812a124 100644
fe2de317
MT
37839--- a/drivers/scsi/pmcraid.c
37840+++ b/drivers/scsi/pmcraid.c
4c928ab7 37841@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
8308f9c9
MT
37842 res->scsi_dev = scsi_dev;
37843 scsi_dev->hostdata = res;
37844 res->change_detected = 0;
37845- atomic_set(&res->read_failures, 0);
37846- atomic_set(&res->write_failures, 0);
37847+ atomic_set_unchecked(&res->read_failures, 0);
37848+ atomic_set_unchecked(&res->write_failures, 0);
37849 rc = 0;
37850 }
37851 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
4c928ab7 37852@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
8308f9c9
MT
37853
37854 /* If this was a SCSI read/write command keep count of errors */
37855 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
37856- atomic_inc(&res->read_failures);
37857+ atomic_inc_unchecked(&res->read_failures);
37858 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
37859- atomic_inc(&res->write_failures);
37860+ atomic_inc_unchecked(&res->write_failures);
37861
37862 if (!RES_IS_GSCSI(res->cfg_entry) &&
37863 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
4c928ab7 37864@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
8308f9c9
MT
37865 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
37866 * hrrq_id assigned here in queuecommand
37867 */
37868- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
37869+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
37870 pinstance->num_hrrq;
37871 cmd->cmd_done = pmcraid_io_done;
37872
4c928ab7 37873@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
8308f9c9
MT
37874 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
37875 * hrrq_id assigned here in queuecommand
37876 */
37877- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
37878+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
37879 pinstance->num_hrrq;
37880
37881 if (request_size) {
4c928ab7 37882@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
8308f9c9
MT
37883
37884 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
37885 /* add resources only after host is added into system */
37886- if (!atomic_read(&pinstance->expose_resources))
37887+ if (!atomic_read_unchecked(&pinstance->expose_resources))
37888 return;
37889
37890 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
4c928ab7 37891@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
8308f9c9
MT
37892 init_waitqueue_head(&pinstance->reset_wait_q);
37893
37894 atomic_set(&pinstance->outstanding_cmds, 0);
37895- atomic_set(&pinstance->last_message_id, 0);
37896- atomic_set(&pinstance->expose_resources, 0);
37897+ atomic_set_unchecked(&pinstance->last_message_id, 0);
37898+ atomic_set_unchecked(&pinstance->expose_resources, 0);
37899
37900 INIT_LIST_HEAD(&pinstance->free_res_q);
37901 INIT_LIST_HEAD(&pinstance->used_res_q);
4c928ab7 37902@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
8308f9c9
MT
37903 /* Schedule worker thread to handle CCN and take care of adding and
37904 * removing devices to OS
37905 */
37906- atomic_set(&pinstance->expose_resources, 1);
37907+ atomic_set_unchecked(&pinstance->expose_resources, 1);
37908 schedule_work(&pinstance->worker_q);
37909 return rc;
37910
fe2de317 37911diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
4c928ab7 37912index ca496c7..9c791d5 100644
fe2de317
MT
37913--- a/drivers/scsi/pmcraid.h
37914+++ b/drivers/scsi/pmcraid.h
4c928ab7 37915@@ -748,7 +748,7 @@ struct pmcraid_instance {
8308f9c9
MT
37916 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
37917
37918 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
37919- atomic_t last_message_id;
37920+ atomic_unchecked_t last_message_id;
37921
37922 /* configuration table */
37923 struct pmcraid_config_table *cfg_table;
4c928ab7 37924@@ -777,7 +777,7 @@ struct pmcraid_instance {
8308f9c9
MT
37925 atomic_t outstanding_cmds;
37926
37927 /* should add/delete resources to mid-layer now ?*/
37928- atomic_t expose_resources;
37929+ atomic_unchecked_t expose_resources;
37930
37931
37932
4c928ab7 37933@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
8308f9c9
MT
37934 struct pmcraid_config_table_entry_ext cfg_entry_ext;
37935 };
37936 struct scsi_device *scsi_dev; /* Link scsi_device structure */
37937- atomic_t read_failures; /* count of failed READ commands */
37938- atomic_t write_failures; /* count of failed WRITE commands */
37939+ atomic_unchecked_t read_failures; /* count of failed READ commands */
37940+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
37941
37942 /* To indicate add/delete/modify during CCN */
37943 u8 change_detected;
fe2de317 37944diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
5e856224 37945index af1003f..be55a75 100644
fe2de317
MT
37946--- a/drivers/scsi/qla2xxx/qla_def.h
37947+++ b/drivers/scsi/qla2xxx/qla_def.h
5e856224 37948@@ -2247,7 +2247,7 @@ struct isp_operations {
15a11c5b
MT
37949 int (*start_scsi) (srb_t *);
37950 int (*abort_isp) (struct scsi_qla_host *);
5e856224 37951 int (*iospace_config)(struct qla_hw_data*);
15a11c5b
MT
37952-};
37953+} __no_const;
37954
37955 /* MSI-X Support *************************************************************/
37956
fe2de317 37957diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
5e856224 37958index bfe6854..ceac088 100644
fe2de317
MT
37959--- a/drivers/scsi/qla4xxx/ql4_def.h
37960+++ b/drivers/scsi/qla4xxx/ql4_def.h
5e856224 37961@@ -261,7 +261,7 @@ struct ddb_entry {
4c928ab7
MT
37962 * (4000 only) */
37963 atomic_t relogin_timer; /* Max Time to wait for
37964 * relogin to complete */
37965- atomic_t relogin_retry_count; /* Num of times relogin has been
37966+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
37967 * retried */
37968 uint32_t default_time2wait; /* Default Min time between
37969 * relogins (+aens) */
37970diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
5e856224 37971index ce6d3b7..73fac54 100644
4c928ab7
MT
37972--- a/drivers/scsi/qla4xxx/ql4_os.c
37973+++ b/drivers/scsi/qla4xxx/ql4_os.c
5e856224 37974@@ -2178,12 +2178,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
4c928ab7
MT
37975 */
37976 if (!iscsi_is_session_online(cls_sess)) {
37977 /* Reset retry relogin timer */
37978- atomic_inc(&ddb_entry->relogin_retry_count);
37979+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
37980 DEBUG2(ql4_printk(KERN_INFO, ha,
37981 "%s: index[%d] relogin timed out-retrying"
37982 " relogin (%d), retry (%d)\n", __func__,
37983 ddb_entry->fw_ddb_index,
37984- atomic_read(&ddb_entry->relogin_retry_count),
37985+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
37986 ddb_entry->default_time2wait + 4));
37987 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
37988 atomic_set(&ddb_entry->retry_relogin_timer,
5e856224 37989@@ -3953,7 +3953,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
4c928ab7 37990
8308f9c9
MT
37991 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
37992 atomic_set(&ddb_entry->relogin_timer, 0);
37993- atomic_set(&ddb_entry->relogin_retry_count, 0);
37994+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
5e856224 37995 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
4c928ab7 37996 ddb_entry->default_relogin_timeout =
5e856224 37997 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
fe2de317
MT
37998diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
37999index 2aeb2e9..46e3925 100644
38000--- a/drivers/scsi/scsi.c
38001+++ b/drivers/scsi/scsi.c
38002@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
8308f9c9
MT
38003 unsigned long timeout;
38004 int rtn = 0;
38005
38006- atomic_inc(&cmd->device->iorequest_cnt);
38007+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
38008
38009 /* check if the device is still usable */
38010 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
fe2de317 38011diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
5e856224 38012index b2c95db..227d74e 100644
fe2de317
MT
38013--- a/drivers/scsi/scsi_lib.c
38014+++ b/drivers/scsi/scsi_lib.c
5e856224 38015@@ -1411,7 +1411,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
8308f9c9
MT
38016 shost = sdev->host;
38017 scsi_init_cmd_errh(cmd);
38018 cmd->result = DID_NO_CONNECT << 16;
38019- atomic_inc(&cmd->device->iorequest_cnt);
38020+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
38021
38022 /*
38023 * SCSI request completion path will do scsi_device_unbusy(),
5e856224 38024@@ -1437,9 +1437,9 @@ static void scsi_softirq_done(struct request *rq)
8308f9c9
MT
38025
38026 INIT_LIST_HEAD(&cmd->eh_entry);
38027
38028- atomic_inc(&cmd->device->iodone_cnt);
38029+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
38030 if (cmd->result)
38031- atomic_inc(&cmd->device->ioerr_cnt);
38032+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
38033
38034 disposition = scsi_decide_disposition(cmd);
38035 if (disposition != SUCCESS &&
fe2de317 38036diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
4c928ab7 38037index 04c2a27..9d8bd66 100644
fe2de317
MT
38038--- a/drivers/scsi/scsi_sysfs.c
38039+++ b/drivers/scsi/scsi_sysfs.c
4c928ab7 38040@@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
8308f9c9
MT
38041 char *buf) \
38042 { \
38043 struct scsi_device *sdev = to_scsi_device(dev); \
38044- unsigned long long count = atomic_read(&sdev->field); \
38045+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
38046 return snprintf(buf, 20, "0x%llx\n", count); \
38047 } \
38048 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
fe2de317
MT
38049diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
38050index 84a1fdf..693b0d6 100644
38051--- a/drivers/scsi/scsi_tgt_lib.c
38052+++ b/drivers/scsi/scsi_tgt_lib.c
38053@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
6e9df6a3
MT
38054 int err;
38055
38056 dprintk("%lx %u\n", uaddr, len);
38057- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
38058+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
38059 if (err) {
38060 /*
38061 * TODO: need to fixup sg_tablesize, max_segment_size,
fe2de317 38062diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
5e856224 38063index f59d4a0..1d89407 100644
fe2de317
MT
38064--- a/drivers/scsi/scsi_transport_fc.c
38065+++ b/drivers/scsi/scsi_transport_fc.c
38066@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
8308f9c9
MT
38067 * Netlink Infrastructure
38068 */
38069
38070-static atomic_t fc_event_seq;
38071+static atomic_unchecked_t fc_event_seq;
38072
38073 /**
38074 * fc_get_event_number - Obtain the next sequential FC event number
15a11c5b 38075@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
8308f9c9
MT
38076 u32
38077 fc_get_event_number(void)
38078 {
38079- return atomic_add_return(1, &fc_event_seq);
38080+ return atomic_add_return_unchecked(1, &fc_event_seq);
38081 }
38082 EXPORT_SYMBOL(fc_get_event_number);
38083
fe2de317 38084@@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
8308f9c9
MT
38085 {
38086 int error;
38087
38088- atomic_set(&fc_event_seq, 0);
38089+ atomic_set_unchecked(&fc_event_seq, 0);
38090
38091 error = transport_class_register(&fc_host_class);
38092 if (error)
fe2de317 38093@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
71d190be 38094 char *cp;
58c5fc13 38095
71d190be
MT
38096 *val = simple_strtoul(buf, &cp, 0);
38097- if ((*cp && (*cp != '\n')) || (*val < 0))
38098+ if (*cp && (*cp != '\n'))
38099 return -EINVAL;
38100 /*
38101 * Check for overflow; dev_loss_tmo is u32
fe2de317 38102diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
5e856224 38103index e3e3c7d..ebdab62 100644
fe2de317
MT
38104--- a/drivers/scsi/scsi_transport_iscsi.c
38105+++ b/drivers/scsi/scsi_transport_iscsi.c
4c928ab7
MT
38106@@ -79,7 +79,7 @@ struct iscsi_internal {
38107 struct transport_container session_cont;
8308f9c9
MT
38108 };
38109
38110-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
38111+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
38112 static struct workqueue_struct *iscsi_eh_timer_workq;
38113
4c928ab7 38114 static DEFINE_IDA(iscsi_sess_ida);
5e856224 38115@@ -1063,7 +1063,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
8308f9c9
MT
38116 int err;
38117
38118 ihost = shost->shost_data;
38119- session->sid = atomic_add_return(1, &iscsi_session_nr);
38120+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
38121
4c928ab7
MT
38122 if (target_id == ISCSI_MAX_TARGET) {
38123 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
5e856224 38124@@ -2680,7 +2680,7 @@ static __init int iscsi_transport_init(void)
8308f9c9
MT
38125 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
38126 ISCSI_TRANSPORT_VERSION);
38127
38128- atomic_set(&iscsi_session_nr, 0);
38129+ atomic_set_unchecked(&iscsi_session_nr, 0);
38130
38131 err = class_register(&iscsi_transport_class);
38132 if (err)
fe2de317
MT
38133diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
38134index 21a045e..ec89e03 100644
38135--- a/drivers/scsi/scsi_transport_srp.c
38136+++ b/drivers/scsi/scsi_transport_srp.c
8308f9c9
MT
38137@@ -33,7 +33,7 @@
38138 #include "scsi_transport_srp_internal.h"
38139
38140 struct srp_host_attrs {
38141- atomic_t next_port_id;
38142+ atomic_unchecked_t next_port_id;
38143 };
38144 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
38145
fe2de317 38146@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
8308f9c9
MT
38147 struct Scsi_Host *shost = dev_to_shost(dev);
38148 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
38149
38150- atomic_set(&srp_host->next_port_id, 0);
38151+ atomic_set_unchecked(&srp_host->next_port_id, 0);
38152 return 0;
38153 }
38154
fe2de317 38155@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
8308f9c9
MT
38156 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
38157 rport->roles = ids->roles;
38158
38159- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
38160+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
38161 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
38162
38163 transport_setup_device(&rport->dev);
fe2de317 38164diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
5e856224 38165index eacd46b..e3f4d62 100644
fe2de317
MT
38166--- a/drivers/scsi/sg.c
38167+++ b/drivers/scsi/sg.c
4c928ab7 38168@@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
6e9df6a3
MT
38169 sdp->disk->disk_name,
38170 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
38171 NULL,
38172- (char *)arg);
38173+ (char __user *)arg);
38174 case BLKTRACESTART:
38175 return blk_trace_startstop(sdp->device->request_queue, 1);
38176 case BLKTRACESTOP:
4c928ab7 38177@@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
ae4e228f 38178 const struct file_operations * fops;
58c5fc13
MT
38179 };
38180
ae4e228f
MT
38181-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
38182+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
38183 {"allow_dio", &adio_fops},
38184 {"debug", &debug_fops},
38185 {"def_reserved_size", &dressz_fops},
5e856224 38186@@ -2332,7 +2332,7 @@ sg_proc_init(void)
ae4e228f 38187 if (!sg_proc_sgp)
5e856224
MT
38188 return 1;
38189 for (k = 0; k < num_leaves; ++k) {
38190- struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
38191+ const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
38192 umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
38193 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
38194 }
fe2de317 38195diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
4c928ab7 38196index f64250e..1ee3049 100644
fe2de317
MT
38197--- a/drivers/spi/spi-dw-pci.c
38198+++ b/drivers/spi/spi-dw-pci.c
4c928ab7 38199@@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
6e9df6a3
MT
38200 #define spi_resume NULL
38201 #endif
38202
38203-static const struct pci_device_id pci_ids[] __devinitdata = {
38204+static const struct pci_device_id pci_ids[] __devinitconst = {
38205 /* Intel MID platform SPI controller 0 */
38206 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
38207 {},
fe2de317 38208diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
5e856224 38209index b2ccdea..84cde75 100644
fe2de317
MT
38210--- a/drivers/spi/spi.c
38211+++ b/drivers/spi/spi.c
4c928ab7 38212@@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
fe2de317
MT
38213 EXPORT_SYMBOL_GPL(spi_bus_unlock);
38214
38215 /* portable code must never pass more than 32 bytes */
38216-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
38217+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
38218
38219 static u8 *buf;
38220
fe2de317 38221diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
5e856224 38222index 400df8c..065d4f4 100644
fe2de317
MT
38223--- a/drivers/staging/octeon/ethernet-rx.c
38224+++ b/drivers/staging/octeon/ethernet-rx.c
38225@@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
8308f9c9
MT
38226 /* Increment RX stats for virtual ports */
38227 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
38228 #ifdef CONFIG_64BIT
38229- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
38230- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
38231+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
38232+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
38233 #else
38234- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
38235- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
38236+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
38237+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
38238 #endif
38239 }
38240 netif_receive_skb(skb);
fe2de317 38241@@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
8308f9c9
MT
38242 dev->name);
38243 */
38244 #ifdef CONFIG_64BIT
38245- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
38246+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
38247 #else
38248- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
38249+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
38250 #endif
38251 dev_kfree_skb_irq(skb);
38252 }
fe2de317 38253diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
5e856224 38254index 9112cd8..92f8d51 100644
fe2de317
MT
38255--- a/drivers/staging/octeon/ethernet.c
38256+++ b/drivers/staging/octeon/ethernet.c
38257@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
38258 * since the RX tasklet also increments it.
38259 */
38260 #ifdef CONFIG_64BIT
38261- atomic64_add(rx_status.dropped_packets,
38262- (atomic64_t *)&priv->stats.rx_dropped);
38263+ atomic64_add_unchecked(rx_status.dropped_packets,
38264+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
38265 #else
38266- atomic_add(rx_status.dropped_packets,
38267- (atomic_t *)&priv->stats.rx_dropped);
38268+ atomic_add_unchecked(rx_status.dropped_packets,
38269+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
38270 #endif
38271 }
38272
4c928ab7 38273diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
5e856224 38274index f9dae95..ff48901 100644
4c928ab7
MT
38275--- a/drivers/staging/rtl8192e/rtllib_module.c
38276+++ b/drivers/staging/rtl8192e/rtllib_module.c
5e856224 38277@@ -215,6 +215,8 @@ static int show_debug_level(char *page, char **start, off_t offset,
4c928ab7
MT
38278 }
38279
38280 static int store_debug_level(struct file *file, const char __user *buffer,
38281+ unsigned long count, void *data) __size_overflow(3);
38282+static int store_debug_level(struct file *file, const char __user *buffer,
38283 unsigned long count, void *data)
38284 {
38285 char buf[] = "0x00000000";
38286diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
38287index e3d47bc..85f4d0d 100644
38288--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
38289+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
38290@@ -250,6 +250,8 @@ static int show_debug_level(char *page, char **start, off_t offset,
38291 }
38292
38293 static int store_debug_level(struct file *file, const char *buffer,
38294+ unsigned long count, void *data) __size_overflow(3);
38295+static int store_debug_level(struct file *file, const char *buffer,
38296 unsigned long count, void *data)
38297 {
38298 char buf[] = "0x00000000";
fe2de317 38299diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
4c928ab7 38300index 86308a0..feaa925 100644
fe2de317
MT
38301--- a/drivers/staging/rtl8712/rtl871x_io.h
38302+++ b/drivers/staging/rtl8712/rtl871x_io.h
4c928ab7 38303@@ -108,7 +108,7 @@ struct _io_ops {
15a11c5b
MT
38304 u8 *pmem);
38305 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
38306 u8 *pmem);
38307-};
38308+} __no_const;
38309
38310 struct io_req {
38311 struct list_head list;
fe2de317
MT
38312diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
38313index c7b5e8b..783d6cb 100644
38314--- a/drivers/staging/sbe-2t3e3/netdev.c
38315+++ b/drivers/staging/sbe-2t3e3/netdev.c
38316@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
15a11c5b
MT
38317 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
38318
38319 if (rlen)
38320- if (copy_to_user(data, &resp, rlen))
38321+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
38322 return -EFAULT;
66a7e928 38323
66a7e928 38324 return 0;
5e856224
MT
38325diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
38326index 42cdafe..2769103 100644
38327--- a/drivers/staging/speakup/speakup_soft.c
38328+++ b/drivers/staging/speakup/speakup_soft.c
38329@@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
38330 break;
38331 } else if (!initialized) {
38332 if (*init) {
38333- ch = *init;
38334 init++;
38335 } else {
38336 initialized = 1;
38337 }
38338+ ch = *init;
38339 } else {
38340 ch = synth_buffer_getc();
38341 }
fe2de317 38342diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
5e856224 38343index b8f8c48..1fc5025 100644
fe2de317
MT
38344--- a/drivers/staging/usbip/usbip_common.h
38345+++ b/drivers/staging/usbip/usbip_common.h
6e9df6a3 38346@@ -289,7 +289,7 @@ struct usbip_device {
15a11c5b
MT
38347 void (*shutdown)(struct usbip_device *);
38348 void (*reset)(struct usbip_device *);
38349 void (*unusable)(struct usbip_device *);
38350- } eh_ops;
38351+ } __no_const eh_ops;
38352 };
38353
5e856224 38354 /* usbip_common.c */
fe2de317 38355diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
4c928ab7 38356index 88b3298..3783eee 100644
fe2de317
MT
38357--- a/drivers/staging/usbip/vhci.h
38358+++ b/drivers/staging/usbip/vhci.h
4c928ab7 38359@@ -88,7 +88,7 @@ struct vhci_hcd {
15a11c5b
MT
38360 unsigned resuming:1;
38361 unsigned long re_timeout;
8308f9c9
MT
38362
38363- atomic_t seqnum;
38364+ atomic_unchecked_t seqnum;
38365
38366 /*
38367 * NOTE:
fe2de317
MT
38368diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
38369index 2ee97e2..0420b86 100644
38370--- a/drivers/staging/usbip/vhci_hcd.c
38371+++ b/drivers/staging/usbip/vhci_hcd.c
6e9df6a3 38372@@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
8308f9c9
MT
38373 return;
38374 }
38375
38376- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
38377+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
38378 if (priv->seqnum == 0xffff)
15a11c5b 38379 dev_info(&urb->dev->dev, "seqnum max\n");
8308f9c9 38380
fe2de317 38381@@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
8308f9c9
MT
38382 return -ENOMEM;
38383 }
38384
38385- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
38386+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
38387 if (unlink->seqnum == 0xffff)
15a11c5b 38388 pr_info("seqnum max\n");
8308f9c9 38389
fe2de317 38390@@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
8308f9c9
MT
38391 vdev->rhport = rhport;
38392 }
38393
38394- atomic_set(&vhci->seqnum, 0);
38395+ atomic_set_unchecked(&vhci->seqnum, 0);
38396 spin_lock_init(&vhci->lock);
38397
15a11c5b 38398 hcd->power_budget = 0; /* no limit */
fe2de317 38399diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
5e856224 38400index 3f511b4..d3dbc1e 100644
fe2de317
MT
38401--- a/drivers/staging/usbip/vhci_rx.c
38402+++ b/drivers/staging/usbip/vhci_rx.c
38403@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
15a11c5b
MT
38404 if (!urb) {
38405 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
38406 pr_info("max seqnum %d\n",
38407- atomic_read(&the_controller->seqnum));
38408+ atomic_read_unchecked(&the_controller->seqnum));
8308f9c9
MT
38409 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
38410 return;
38411 }
fe2de317
MT
38412diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
38413index 7735027..30eed13 100644
38414--- a/drivers/staging/vt6655/hostap.c
38415+++ b/drivers/staging/vt6655/hostap.c
38416@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
15a11c5b
MT
38417 *
38418 */
66a7e928 38419
15a11c5b
MT
38420+static net_device_ops_no_const apdev_netdev_ops;
38421+
38422 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
66a7e928 38423 {
15a11c5b
MT
38424 PSDevice apdev_priv;
38425 struct net_device *dev = pDevice->dev;
38426 int ret;
38427- const struct net_device_ops apdev_netdev_ops = {
38428- .ndo_start_xmit = pDevice->tx_80211,
38429- };
66a7e928 38430
15a11c5b 38431 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
66a7e928 38432
fe2de317 38433@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
15a11c5b
MT
38434 *apdev_priv = *pDevice;
38435 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
38436
38437+ /* only half broken now */
38438+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
38439 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
38440
38441 pDevice->apdev->type = ARPHRD_IEEE80211;
fe2de317
MT
38442diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
38443index 51b5adf..098e320 100644
38444--- a/drivers/staging/vt6656/hostap.c
38445+++ b/drivers/staging/vt6656/hostap.c
38446@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
15a11c5b
MT
38447 *
38448 */
66a7e928 38449
15a11c5b
MT
38450+static net_device_ops_no_const apdev_netdev_ops;
38451+
38452 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
66a7e928 38453 {
15a11c5b
MT
38454 PSDevice apdev_priv;
38455 struct net_device *dev = pDevice->dev;
38456 int ret;
38457- const struct net_device_ops apdev_netdev_ops = {
38458- .ndo_start_xmit = pDevice->tx_80211,
38459- };
38460
38461 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
66a7e928 38462
fe2de317 38463@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
15a11c5b
MT
38464 *apdev_priv = *pDevice;
38465 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
38466
38467+ /* only half broken now */
38468+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
38469 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
38470
38471 pDevice->apdev->type = ARPHRD_IEEE80211;
fe2de317
MT
38472diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
38473index 7843dfd..3db105f 100644
38474--- a/drivers/staging/wlan-ng/hfa384x_usb.c
38475+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
38476@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
15a11c5b
MT
38477
38478 struct usbctlx_completor {
38479 int (*complete) (struct usbctlx_completor *);
38480-};
38481+} __no_const;
38482
38483 static int
38484 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
fe2de317
MT
38485diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
38486index 1ca66ea..76f1343 100644
38487--- a/drivers/staging/zcache/tmem.c
38488+++ b/drivers/staging/zcache/tmem.c
66a7e928
MT
38489@@ -39,7 +39,7 @@
38490 * A tmem host implementation must use this function to register callbacks
38491 * for memory allocation.
38492 */
38493-static struct tmem_hostops tmem_hostops;
15a11c5b 38494+static tmem_hostops_no_const tmem_hostops;
66a7e928
MT
38495
38496 static void tmem_objnode_tree_init(void);
38497
fe2de317 38498@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
66a7e928
MT
38499 * A tmem host implementation must use this function to register
38500 * callbacks for a page-accessible memory (PAM) implementation
38501 */
38502-static struct tmem_pamops tmem_pamops;
15a11c5b 38503+static tmem_pamops_no_const tmem_pamops;
66a7e928
MT
38504
38505 void tmem_register_pamops(struct tmem_pamops *m)
38506 {
fe2de317
MT
38507diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
38508index ed147c4..94fc3c6 100644
38509--- a/drivers/staging/zcache/tmem.h
38510+++ b/drivers/staging/zcache/tmem.h
6e9df6a3
MT
38511@@ -180,6 +180,7 @@ struct tmem_pamops {
38512 void (*new_obj)(struct tmem_obj *);
38513 int (*replace_in_obj)(void *, struct tmem_obj *);
15a11c5b
MT
38514 };
38515+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
38516 extern void tmem_register_pamops(struct tmem_pamops *m);
66a7e928 38517
15a11c5b 38518 /* memory allocation methods provided by the host implementation */
6e9df6a3 38519@@ -189,6 +190,7 @@ struct tmem_hostops {
15a11c5b
MT
38520 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
38521 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
38522 };
38523+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
38524 extern void tmem_register_hostops(struct tmem_hostops *m);
66a7e928 38525
15a11c5b 38526 /* core tmem accessor functions */
fe2de317 38527diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
5e856224 38528index 97c74ee..7f6d77d 100644
fe2de317
MT
38529--- a/drivers/target/iscsi/iscsi_target.c
38530+++ b/drivers/target/iscsi/iscsi_target.c
5e856224 38531@@ -1361,7 +1361,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
6e9df6a3
MT
38532 * outstanding_r2ts reaches zero, go ahead and send the delayed
38533 * TASK_ABORTED status.
38534 */
38535- if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
38536+ if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
38537 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
38538 if (--cmd->outstanding_r2ts < 1) {
38539 iscsit_stop_dataout_timer(cmd);
fe2de317 38540diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
5e856224 38541index dcb0618..97e3d85 100644
fe2de317
MT
38542--- a/drivers/target/target_core_tmr.c
38543+++ b/drivers/target/target_core_tmr.c
5e856224 38544@@ -260,7 +260,7 @@ static void core_tmr_drain_task_list(
6e9df6a3
MT
38545 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
38546 cmd->t_task_list_num,
38547 atomic_read(&cmd->t_task_cdbs_left),
38548- atomic_read(&cmd->t_task_cdbs_sent),
38549+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
38550 atomic_read(&cmd->t_transport_active),
38551 atomic_read(&cmd->t_transport_stop),
38552 atomic_read(&cmd->t_transport_sent));
5e856224 38553@@ -291,7 +291,7 @@ static void core_tmr_drain_task_list(
6e9df6a3 38554 pr_debug("LUN_RESET: got t_transport_active = 1 for"
8308f9c9
MT
38555 " task: %p, t_fe_count: %d dev: %p\n", task,
38556 fe_count, dev);
6e9df6a3
MT
38557- atomic_set(&cmd->t_transport_aborted, 1);
38558+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
38559 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
38560
8308f9c9 38561 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
5e856224 38562@@ -299,7 +299,7 @@ static void core_tmr_drain_task_list(
8308f9c9 38563 }
6e9df6a3 38564 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
8308f9c9 38565 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
6e9df6a3
MT
38566- atomic_set(&cmd->t_transport_aborted, 1);
38567+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
38568 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8308f9c9 38569
6e9df6a3 38570 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
fe2de317 38571diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
5e856224 38572index cd5cd95..5249d30 100644
fe2de317
MT
38573--- a/drivers/target/target_core_transport.c
38574+++ b/drivers/target/target_core_transport.c
5e856224
MT
38575@@ -1330,7 +1330,7 @@ struct se_device *transport_add_device_to_core_hba(
38576 spin_lock_init(&dev->se_port_lock);
38577 spin_lock_init(&dev->se_tmr_lock);
38578 spin_lock_init(&dev->qf_cmd_lock);
8308f9c9
MT
38579- atomic_set(&dev->dev_ordered_id, 0);
38580+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
38581
38582 se_dev_set_default_attribs(dev, dev_limits);
38583
5e856224 38584@@ -1517,7 +1517,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
8308f9c9
MT
38585 * Used to determine when ORDERED commands should go from
38586 * Dormant to Active status.
38587 */
6e9df6a3
MT
38588- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
38589+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
8308f9c9 38590 smp_mb__after_atomic_inc();
6e9df6a3 38591 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
8308f9c9 38592 cmd->se_ordered_id, cmd->sam_task_attr,
5e856224 38593@@ -1862,7 +1862,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
8308f9c9 38594 " t_transport_active: %d t_transport_stop: %d"
6e9df6a3
MT
38595 " t_transport_sent: %d\n", cmd->t_task_list_num,
38596 atomic_read(&cmd->t_task_cdbs_left),
38597- atomic_read(&cmd->t_task_cdbs_sent),
38598+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
38599 atomic_read(&cmd->t_task_cdbs_ex_left),
38600 atomic_read(&cmd->t_transport_active),
38601 atomic_read(&cmd->t_transport_stop),
5e856224
MT
38602@@ -2121,9 +2121,9 @@ check_depth:
38603 cmd = task->task_se_cmd;
6e9df6a3 38604 spin_lock_irqsave(&cmd->t_state_lock, flags);
4c928ab7 38605 task->task_flags |= (TF_ACTIVE | TF_SENT);
6e9df6a3
MT
38606- atomic_inc(&cmd->t_task_cdbs_sent);
38607+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
8308f9c9 38608
6e9df6a3
MT
38609- if (atomic_read(&cmd->t_task_cdbs_sent) ==
38610+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
38611 cmd->t_task_list_num)
4c928ab7 38612 atomic_set(&cmd->t_transport_sent, 1);
8308f9c9 38613
5e856224 38614@@ -4348,7 +4348,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
6e9df6a3 38615 atomic_set(&cmd->transport_lun_stop, 0);
8308f9c9 38616 }
6e9df6a3 38617 if (!atomic_read(&cmd->t_transport_active) ||
4c928ab7
MT
38618- atomic_read(&cmd->t_transport_aborted)) {
38619+ atomic_read_unchecked(&cmd->t_transport_aborted)) {
38620 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
38621 return false;
38622 }
5e856224 38623@@ -4597,7 +4597,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
8308f9c9
MT
38624 {
38625 int ret = 0;
38626
6e9df6a3
MT
38627- if (atomic_read(&cmd->t_transport_aborted) != 0) {
38628+ if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
38629 if (!send_status ||
8308f9c9
MT
38630 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
38631 return 1;
5e856224 38632@@ -4634,7 +4634,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
8308f9c9
MT
38633 */
38634 if (cmd->data_direction == DMA_TO_DEVICE) {
6e9df6a3
MT
38635 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
38636- atomic_inc(&cmd->t_transport_aborted);
38637+ atomic_inc_unchecked(&cmd->t_transport_aborted);
8308f9c9 38638 smp_mb__after_atomic_inc();
4c928ab7
MT
38639 }
38640 }
fe2de317 38641diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
4c928ab7 38642index b9040be..e3f5aab 100644
fe2de317
MT
38643--- a/drivers/tty/hvc/hvcs.c
38644+++ b/drivers/tty/hvc/hvcs.c
16454cff
MT
38645@@ -83,6 +83,7 @@
38646 #include <asm/hvcserver.h>
38647 #include <asm/uaccess.h>
38648 #include <asm/vio.h>
38649+#include <asm/local.h>
38650
38651 /*
38652 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
38653@@ -270,7 +271,7 @@ struct hvcs_struct {
38654 unsigned int index;
38655
38656 struct tty_struct *tty;
38657- int open_count;
38658+ local_t open_count;
38659
38660 /*
38661 * Used to tell the driver kernel_thread what operations need to take
fe2de317 38662@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
16454cff
MT
38663
38664 spin_lock_irqsave(&hvcsd->lock, flags);
38665
38666- if (hvcsd->open_count > 0) {
38667+ if (local_read(&hvcsd->open_count) > 0) {
38668 spin_unlock_irqrestore(&hvcsd->lock, flags);
38669 printk(KERN_INFO "HVCS: vterm state unchanged. "
38670 "The hvcs device node is still in use.\n");
fe2de317 38671@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
16454cff
MT
38672 if ((retval = hvcs_partner_connect(hvcsd)))
38673 goto error_release;
38674
38675- hvcsd->open_count = 1;
38676+ local_set(&hvcsd->open_count, 1);
38677 hvcsd->tty = tty;
38678 tty->driver_data = hvcsd;
38679
66a7e928 38680@@ -1179,7 +1180,7 @@ fast_open:
16454cff
MT
38681
38682 spin_lock_irqsave(&hvcsd->lock, flags);
38683 kref_get(&hvcsd->kref);
38684- hvcsd->open_count++;
38685+ local_inc(&hvcsd->open_count);
38686 hvcsd->todo_mask |= HVCS_SCHED_READ;
38687 spin_unlock_irqrestore(&hvcsd->lock, flags);
38688
fe2de317 38689@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
16454cff
MT
38690 hvcsd = tty->driver_data;
38691
38692 spin_lock_irqsave(&hvcsd->lock, flags);
38693- if (--hvcsd->open_count == 0) {
38694+ if (local_dec_and_test(&hvcsd->open_count)) {
38695
38696 vio_disable_interrupts(hvcsd->vdev);
38697
fe2de317 38698@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
16454cff
MT
38699 free_irq(irq, hvcsd);
38700 kref_put(&hvcsd->kref, destroy_hvcs_struct);
38701 return;
38702- } else if (hvcsd->open_count < 0) {
38703+ } else if (local_read(&hvcsd->open_count) < 0) {
38704 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
38705 " is missmanaged.\n",
38706- hvcsd->vdev->unit_address, hvcsd->open_count);
38707+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
38708 }
38709
38710 spin_unlock_irqrestore(&hvcsd->lock, flags);
fe2de317 38711@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
16454cff
MT
38712
38713 spin_lock_irqsave(&hvcsd->lock, flags);
38714 /* Preserve this so that we know how many kref refs to put */
38715- temp_open_count = hvcsd->open_count;
38716+ temp_open_count = local_read(&hvcsd->open_count);
38717
38718 /*
38719 * Don't kref put inside the spinlock because the destruction
fe2de317 38720@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
16454cff
MT
38721 hvcsd->tty->driver_data = NULL;
38722 hvcsd->tty = NULL;
38723
38724- hvcsd->open_count = 0;
38725+ local_set(&hvcsd->open_count, 0);
38726
38727 /* This will drop any buffered data on the floor which is OK in a hangup
38728 * scenario. */
fe2de317 38729@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
16454cff
MT
38730 * the middle of a write operation? This is a crummy place to do this
38731 * but we want to keep it all in the spinlock.
38732 */
38733- if (hvcsd->open_count <= 0) {
38734+ if (local_read(&hvcsd->open_count) <= 0) {
38735 spin_unlock_irqrestore(&hvcsd->lock, flags);
38736 return -ENODEV;
38737 }
fe2de317 38738@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
16454cff
MT
38739 {
38740 struct hvcs_struct *hvcsd = tty->driver_data;
38741
38742- if (!hvcsd || hvcsd->open_count <= 0)
38743+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
38744 return 0;
38745
38746 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
fe2de317
MT
38747diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
38748index ef92869..f4ebd88 100644
38749--- a/drivers/tty/ipwireless/tty.c
38750+++ b/drivers/tty/ipwireless/tty.c
66a7e928
MT
38751@@ -29,6 +29,7 @@
38752 #include <linux/tty_driver.h>
38753 #include <linux/tty_flip.h>
38754 #include <linux/uaccess.h>
38755+#include <asm/local.h>
38756
38757 #include "tty.h"
38758 #include "network.h"
38759@@ -51,7 +52,7 @@ struct ipw_tty {
38760 int tty_type;
38761 struct ipw_network *network;
38762 struct tty_struct *linux_tty;
38763- int open_count;
38764+ local_t open_count;
38765 unsigned int control_lines;
38766 struct mutex ipw_tty_mutex;
38767 int tx_bytes_queued;
fe2de317 38768@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
66a7e928
MT
38769 mutex_unlock(&tty->ipw_tty_mutex);
38770 return -ENODEV;
38771 }
38772- if (tty->open_count == 0)
38773+ if (local_read(&tty->open_count) == 0)
38774 tty->tx_bytes_queued = 0;
38775
38776- tty->open_count++;
38777+ local_inc(&tty->open_count);
38778
38779 tty->linux_tty = linux_tty;
38780 linux_tty->driver_data = tty;
fe2de317 38781@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
66a7e928
MT
38782
38783 static void do_ipw_close(struct ipw_tty *tty)
38784 {
38785- tty->open_count--;
38786-
38787- if (tty->open_count == 0) {
38788+ if (local_dec_return(&tty->open_count) == 0) {
38789 struct tty_struct *linux_tty = tty->linux_tty;
38790
38791 if (linux_tty != NULL) {
fe2de317 38792@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
66a7e928
MT
38793 return;
38794
38795 mutex_lock(&tty->ipw_tty_mutex);
38796- if (tty->open_count == 0) {
38797+ if (local_read(&tty->open_count) == 0) {
38798 mutex_unlock(&tty->ipw_tty_mutex);
38799 return;
38800 }
fe2de317 38801@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
66a7e928
MT
38802 return;
38803 }
38804
38805- if (!tty->open_count) {
38806+ if (!local_read(&tty->open_count)) {
38807 mutex_unlock(&tty->ipw_tty_mutex);
38808 return;
38809 }
fe2de317 38810@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
66a7e928
MT
38811 return -ENODEV;
38812
38813 mutex_lock(&tty->ipw_tty_mutex);
38814- if (!tty->open_count) {
38815+ if (!local_read(&tty->open_count)) {
38816 mutex_unlock(&tty->ipw_tty_mutex);
38817 return -EINVAL;
38818 }
fe2de317 38819@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
66a7e928
MT
38820 if (!tty)
38821 return -ENODEV;
38822
38823- if (!tty->open_count)
38824+ if (!local_read(&tty->open_count))
38825 return -EINVAL;
38826
38827 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
fe2de317 38828@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
66a7e928
MT
38829 if (!tty)
38830 return 0;
38831
38832- if (!tty->open_count)
38833+ if (!local_read(&tty->open_count))
38834 return 0;
38835
38836 return tty->tx_bytes_queued;
fe2de317 38837@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
66a7e928
MT
38838 if (!tty)
38839 return -ENODEV;
38840
38841- if (!tty->open_count)
38842+ if (!local_read(&tty->open_count))
38843 return -EINVAL;
38844
38845 return get_control_lines(tty);
fe2de317 38846@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
66a7e928
MT
38847 if (!tty)
38848 return -ENODEV;
38849
38850- if (!tty->open_count)
38851+ if (!local_read(&tty->open_count))
38852 return -EINVAL;
38853
38854 return set_control_lines(tty, set, clear);
fe2de317 38855@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
66a7e928
MT
38856 if (!tty)
38857 return -ENODEV;
38858
38859- if (!tty->open_count)
38860+ if (!local_read(&tty->open_count))
38861 return -EINVAL;
38862
38863 /* FIXME: Exactly how is the tty object locked here .. */
fe2de317 38864@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
66a7e928
MT
38865 against a parallel ioctl etc */
38866 mutex_lock(&ttyj->ipw_tty_mutex);
38867 }
38868- while (ttyj->open_count)
38869+ while (local_read(&ttyj->open_count))
38870 do_ipw_close(ttyj);
38871 ipwireless_disassociate_network_ttys(network,
38872 ttyj->channel_idx);
fe2de317 38873diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
4c928ab7 38874index fc7bbba..9527e93 100644
fe2de317
MT
38875--- a/drivers/tty/n_gsm.c
38876+++ b/drivers/tty/n_gsm.c
4c928ab7 38877@@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
6e9df6a3
MT
38878 kref_init(&dlci->ref);
38879 mutex_init(&dlci->mutex);
bc901d79
MT
38880 dlci->fifo = &dlci->_fifo;
38881- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
38882+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
38883 kfree(dlci);
38884 return NULL;
38885 }
fe2de317 38886diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
5e856224 38887index d2256d0..97476fa 100644
fe2de317
MT
38888--- a/drivers/tty/n_tty.c
38889+++ b/drivers/tty/n_tty.c
38890@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
bc901d79
MT
38891 {
38892 *ops = tty_ldisc_N_TTY;
38893 ops->owner = NULL;
38894- ops->refcount = ops->flags = 0;
38895+ atomic_set(&ops->refcount, 0);
38896+ ops->flags = 0;
38897 }
38898 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
fe2de317 38899diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
5e856224 38900index d8653ab..f8afd9d 100644
fe2de317
MT
38901--- a/drivers/tty/pty.c
38902+++ b/drivers/tty/pty.c
5e856224 38903@@ -765,8 +765,10 @@ static void __init unix98_pty_init(void)
bc901d79
MT
38904 register_sysctl_table(pty_root_table);
38905
15a11c5b
MT
38906 /* Now create the /dev/ptmx special device */
38907+ pax_open_kernel();
38908 tty_default_fops(&ptmx_fops);
bc901d79 38909- ptmx_fops.open = ptmx_open;
15a11c5b
MT
38910+ *(void **)&ptmx_fops.open = ptmx_open;
38911+ pax_close_kernel();
38912
bc901d79
MT
38913 cdev_init(&ptmx_cdev, &ptmx_fops);
38914 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
fe2de317 38915diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
4c928ab7 38916index 2b42a01..32a2ed3 100644
fe2de317
MT
38917--- a/drivers/tty/serial/kgdboc.c
38918+++ b/drivers/tty/serial/kgdboc.c
4c928ab7 38919@@ -24,8 +24,9 @@
15a11c5b 38920 #define MAX_CONFIG_LEN 40
66a7e928 38921
15a11c5b
MT
38922 static struct kgdb_io kgdboc_io_ops;
38923+static struct kgdb_io kgdboc_io_ops_console;
66a7e928 38924
15a11c5b
MT
38925-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
38926+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
38927 static int configured = -1;
66a7e928 38928
15a11c5b 38929 static char config[MAX_CONFIG_LEN];
4c928ab7 38930@@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
15a11c5b
MT
38931 kgdboc_unregister_kbd();
38932 if (configured == 1)
38933 kgdb_unregister_io_module(&kgdboc_io_ops);
38934+ else if (configured == 2)
38935+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
66a7e928
MT
38936 }
38937
15a11c5b 38938 static int configure_kgdboc(void)
4c928ab7 38939@@ -157,13 +160,13 @@ static int configure_kgdboc(void)
15a11c5b
MT
38940 int err;
38941 char *cptr = config;
38942 struct console *cons;
38943+ int is_console = 0;
38944
38945 err = kgdboc_option_setup(config);
38946 if (err || !strlen(config) || isspace(config[0]))
38947 goto noconfig;
38948
38949 err = -ENODEV;
38950- kgdboc_io_ops.is_console = 0;
38951 kgdb_tty_driver = NULL;
38952
38953 kgdboc_use_kms = 0;
4c928ab7 38954@@ -184,7 +187,7 @@ static int configure_kgdboc(void)
15a11c5b
MT
38955 int idx;
38956 if (cons->device && cons->device(cons, &idx) == p &&
38957 idx == tty_line) {
38958- kgdboc_io_ops.is_console = 1;
38959+ is_console = 1;
38960 break;
38961 }
38962 cons = cons->next;
4c928ab7 38963@@ -194,12 +197,16 @@ static int configure_kgdboc(void)
15a11c5b
MT
38964 kgdb_tty_line = tty_line;
38965
38966 do_register:
38967- err = kgdb_register_io_module(&kgdboc_io_ops);
38968+ if (is_console) {
38969+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
38970+ configured = 2;
38971+ } else {
38972+ err = kgdb_register_io_module(&kgdboc_io_ops);
38973+ configured = 1;
38974+ }
38975 if (err)
38976 goto noconfig;
66a7e928 38977
15a11c5b
MT
38978- configured = 1;
38979-
66a7e928 38980 return 0;
66a7e928 38981
15a11c5b 38982 noconfig:
4c928ab7 38983@@ -213,7 +220,7 @@ noconfig:
15a11c5b
MT
38984 static int __init init_kgdboc(void)
38985 {
38986 /* Already configured? */
38987- if (configured == 1)
38988+ if (configured >= 1)
38989 return 0;
16454cff 38990
15a11c5b 38991 return configure_kgdboc();
4c928ab7 38992@@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
15a11c5b
MT
38993 if (config[len - 1] == '\n')
38994 config[len - 1] = '\0';
66a7e928 38995
15a11c5b
MT
38996- if (configured == 1)
38997+ if (configured >= 1)
38998 cleanup_kgdboc();
66a7e928 38999
15a11c5b 39000 /* Go and configure with the new params. */
4c928ab7 39001@@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
15a11c5b 39002 .post_exception = kgdboc_post_exp_handler,
66a7e928 39003 };
66a7e928 39004
15a11c5b
MT
39005+static struct kgdb_io kgdboc_io_ops_console = {
39006+ .name = "kgdboc",
39007+ .read_char = kgdboc_get_char,
39008+ .write_char = kgdboc_put_char,
39009+ .pre_exception = kgdboc_pre_exp_handler,
39010+ .post_exception = kgdboc_post_exp_handler,
39011+ .is_console = 1
39012+};
39013+
39014 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
39015 /* This is only available if kgdboc is a built in for early debugging */
39016 static int __init kgdboc_early_init(char *opt)
5e856224
MT
39017diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
39018index 7867b7c..b3c119d 100644
39019--- a/drivers/tty/sysrq.c
39020+++ b/drivers/tty/sysrq.c
39021@@ -862,7 +862,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
39022 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
39023 size_t count, loff_t *ppos)
39024 {
39025- if (count) {
39026+ if (count && capable(CAP_SYS_ADMIN)) {
39027 char c;
39028
39029 if (get_user(c, buf))
fe2de317 39030diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
5e856224 39031index e41b9bb..84002fb 100644
fe2de317
MT
39032--- a/drivers/tty/tty_io.c
39033+++ b/drivers/tty/tty_io.c
5e856224 39034@@ -3291,7 +3291,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
66a7e928 39035
15a11c5b 39036 void tty_default_fops(struct file_operations *fops)
bc901d79 39037 {
15a11c5b
MT
39038- *fops = tty_fops;
39039+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
bc901d79 39040 }
bc901d79 39041
bc901d79 39042 /*
fe2de317 39043diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
5e856224 39044index 24b95db..9c078d0 100644
fe2de317
MT
39045--- a/drivers/tty/tty_ldisc.c
39046+++ b/drivers/tty/tty_ldisc.c
5e856224 39047@@ -57,7 +57,7 @@ static void put_ldisc(struct tty_ldisc *ld)
bc901d79
MT
39048 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
39049 struct tty_ldisc_ops *ldo = ld->ops;
39050
39051- ldo->refcount--;
39052+ atomic_dec(&ldo->refcount);
39053 module_put(ldo->owner);
39054 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
39055
5e856224 39056@@ -92,7 +92,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
bc901d79
MT
39057 spin_lock_irqsave(&tty_ldisc_lock, flags);
39058 tty_ldiscs[disc] = new_ldisc;
39059 new_ldisc->num = disc;
39060- new_ldisc->refcount = 0;
39061+ atomic_set(&new_ldisc->refcount, 0);
39062 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
39063
39064 return ret;
5e856224 39065@@ -120,7 +120,7 @@ int tty_unregister_ldisc(int disc)
bc901d79
MT
39066 return -EINVAL;
39067
39068 spin_lock_irqsave(&tty_ldisc_lock, flags);
39069- if (tty_ldiscs[disc]->refcount)
39070+ if (atomic_read(&tty_ldiscs[disc]->refcount))
39071 ret = -EBUSY;
39072 else
39073 tty_ldiscs[disc] = NULL;
5e856224 39074@@ -141,7 +141,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
bc901d79
MT
39075 if (ldops) {
39076 ret = ERR_PTR(-EAGAIN);
39077 if (try_module_get(ldops->owner)) {
39078- ldops->refcount++;
39079+ atomic_inc(&ldops->refcount);
39080 ret = ldops;
39081 }
39082 }
5e856224 39083@@ -154,7 +154,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
bc901d79
MT
39084 unsigned long flags;
39085
39086 spin_lock_irqsave(&tty_ldisc_lock, flags);
39087- ldops->refcount--;
39088+ atomic_dec(&ldops->refcount);
39089 module_put(ldops->owner);
39090 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
39091 }
fe2de317 39092diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
4c928ab7 39093index a605549..6bd3c96 100644
fe2de317
MT
39094--- a/drivers/tty/vt/keyboard.c
39095+++ b/drivers/tty/vt/keyboard.c
4c928ab7 39096@@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
66a7e928 39097 kbd->kbdmode == VC_OFF) &&
bc901d79
MT
39098 value != KVAL(K_SAK))
39099 return; /* SAK is allowed even in raw mode */
39100+
39101+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39102+ {
39103+ void *func = fn_handler[value];
39104+ if (func == fn_show_state || func == fn_show_ptregs ||
39105+ func == fn_show_mem)
39106+ return;
39107+ }
39108+#endif
39109+
39110 fn_handler[value](vc);
39111 }
39112
fe2de317 39113diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
4c928ab7 39114index 65447c5..0526f0a 100644
fe2de317
MT
39115--- a/drivers/tty/vt/vt_ioctl.c
39116+++ b/drivers/tty/vt/vt_ioctl.c
39117@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
bc901d79
MT
39118 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
39119 return -EFAULT;
39120
39121- if (!capable(CAP_SYS_TTY_CONFIG))
39122- perm = 0;
39123-
39124 switch (cmd) {
39125 case KDGKBENT:
39126 key_map = key_maps[s];
fe2de317 39127@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
bc901d79
MT
39128 val = (i ? K_HOLE : K_NOSUCHMAP);
39129 return put_user(val, &user_kbe->kb_value);
39130 case KDSKBENT:
39131+ if (!capable(CAP_SYS_TTY_CONFIG))
39132+ perm = 0;
39133+
39134 if (!perm)
39135 return -EPERM;
39136 if (!i && v == K_NOSUCHMAP) {
fe2de317 39137@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
bc901d79
MT
39138 int i, j, k;
39139 int ret;
39140
39141- if (!capable(CAP_SYS_TTY_CONFIG))
39142- perm = 0;
39143-
39144 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
39145 if (!kbs) {
39146 ret = -ENOMEM;
fe2de317 39147@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
bc901d79
MT
39148 kfree(kbs);
39149 return ((p && *p) ? -EOVERFLOW : 0);
39150 case KDSKBSENT:
39151+ if (!capable(CAP_SYS_TTY_CONFIG))
39152+ perm = 0;
39153+
39154 if (!perm) {
39155 ret = -EPERM;
39156 goto reterr;
fe2de317 39157diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
4c928ab7 39158index a783d53..cb30d94 100644
fe2de317
MT
39159--- a/drivers/uio/uio.c
39160+++ b/drivers/uio/uio.c
c52201e0
MT
39161@@ -25,6 +25,7 @@
39162 #include <linux/kobject.h>
39163 #include <linux/cdev.h>
39164 #include <linux/uio_driver.h>
39165+#include <asm/local.h>
39166
39167 #define UIO_MAX_DEVICES (1U << MINORBITS)
39168
8308f9c9
MT
39169@@ -32,10 +33,10 @@ struct uio_device {
39170 struct module *owner;
39171 struct device *dev;
39172 int minor;
39173- atomic_t event;
39174+ atomic_unchecked_t event;
c52201e0
MT
39175 struct fasync_struct *async_queue;
39176 wait_queue_head_t wait;
39177- int vma_count;
39178+ local_t vma_count;
39179 struct uio_info *info;
39180 struct kobject *map_dir;
39181 struct kobject *portio_dir;
fe2de317 39182@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
8308f9c9
MT
39183 struct device_attribute *attr, char *buf)
39184 {
39185 struct uio_device *idev = dev_get_drvdata(dev);
39186- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
39187+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
39188 }
39189
39190 static struct device_attribute uio_class_attributes[] = {
fe2de317 39191@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
8308f9c9
MT
39192 {
39193 struct uio_device *idev = info->uio_dev;
39194
39195- atomic_inc(&idev->event);
39196+ atomic_inc_unchecked(&idev->event);
39197 wake_up_interruptible(&idev->wait);
39198 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
39199 }
fe2de317 39200@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
8308f9c9
MT
39201 }
39202
39203 listener->dev = idev;
39204- listener->event_count = atomic_read(&idev->event);
39205+ listener->event_count = atomic_read_unchecked(&idev->event);
39206 filep->private_data = listener;
39207
39208 if (idev->info->open) {
fe2de317 39209@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
8308f9c9
MT
39210 return -EIO;
39211
39212 poll_wait(filep, &idev->wait, wait);
39213- if (listener->event_count != atomic_read(&idev->event))
39214+ if (listener->event_count != atomic_read_unchecked(&idev->event))
39215 return POLLIN | POLLRDNORM;
39216 return 0;
39217 }
fe2de317 39218@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
8308f9c9
MT
39219 do {
39220 set_current_state(TASK_INTERRUPTIBLE);
39221
39222- event_count = atomic_read(&idev->event);
39223+ event_count = atomic_read_unchecked(&idev->event);
39224 if (event_count != listener->event_count) {
39225 if (copy_to_user(buf, &event_count, count))
39226 retval = -EFAULT;
fe2de317 39227@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
c52201e0
MT
39228 static void uio_vma_open(struct vm_area_struct *vma)
39229 {
39230 struct uio_device *idev = vma->vm_private_data;
39231- idev->vma_count++;
39232+ local_inc(&idev->vma_count);
39233 }
39234
39235 static void uio_vma_close(struct vm_area_struct *vma)
39236 {
39237 struct uio_device *idev = vma->vm_private_data;
39238- idev->vma_count--;
39239+ local_dec(&idev->vma_count);
39240 }
39241
39242 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4c928ab7 39243@@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
8308f9c9
MT
39244 idev->owner = owner;
39245 idev->info = info;
39246 init_waitqueue_head(&idev->wait);
39247- atomic_set(&idev->event, 0);
39248+ atomic_set_unchecked(&idev->event, 0);
39249
39250 ret = uio_get_minor(idev);
39251 if (ret)
fe2de317 39252diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
5e856224 39253index 98b89fe..aff824e 100644
fe2de317
MT
39254--- a/drivers/usb/atm/cxacru.c
39255+++ b/drivers/usb/atm/cxacru.c
39256@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
6892158b
MT
39257 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
39258 if (ret < 2)
39259 return -EINVAL;
39260- if (index < 0 || index > 0x7f)
39261+ if (index > 0x7f)
39262 return -EINVAL;
39263 pos += tmp;
39264
fe2de317
MT
39265diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
39266index d3448ca..d2864ca 100644
39267--- a/drivers/usb/atm/usbatm.c
39268+++ b/drivers/usb/atm/usbatm.c
39269@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
58c5fc13
MT
39270 if (printk_ratelimit())
39271 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
39272 __func__, vpi, vci);
39273- atomic_inc(&vcc->stats->rx_err);
39274+ atomic_inc_unchecked(&vcc->stats->rx_err);
39275 return;
39276 }
39277
fe2de317 39278@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
58c5fc13
MT
39279 if (length > ATM_MAX_AAL5_PDU) {
39280 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
39281 __func__, length, vcc);
39282- atomic_inc(&vcc->stats->rx_err);
39283+ atomic_inc_unchecked(&vcc->stats->rx_err);
39284 goto out;
39285 }
39286
fe2de317 39287@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
58c5fc13
MT
39288 if (sarb->len < pdu_length) {
39289 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
39290 __func__, pdu_length, sarb->len, vcc);
39291- atomic_inc(&vcc->stats->rx_err);
39292+ atomic_inc_unchecked(&vcc->stats->rx_err);
39293 goto out;
39294 }
39295
39296 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
39297 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
39298 __func__, vcc);
39299- atomic_inc(&vcc->stats->rx_err);
39300+ atomic_inc_unchecked(&vcc->stats->rx_err);
39301 goto out;
39302 }
39303
fe2de317 39304@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
58c5fc13
MT
39305 if (printk_ratelimit())
39306 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
39307 __func__, length);
39308- atomic_inc(&vcc->stats->rx_drop);
39309+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39310 goto out;
39311 }
39312
fe2de317 39313@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
58c5fc13
MT
39314
39315 vcc->push(vcc, skb);
39316
39317- atomic_inc(&vcc->stats->rx);
39318+ atomic_inc_unchecked(&vcc->stats->rx);
39319 out:
39320 skb_trim(sarb, 0);
39321 }
fe2de317 39322@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
58c5fc13
MT
39323 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
39324
39325 usbatm_pop(vcc, skb);
39326- atomic_inc(&vcc->stats->tx);
39327+ atomic_inc_unchecked(&vcc->stats->tx);
39328
39329 skb = skb_dequeue(&instance->sndqueue);
39330 }
fe2de317 39331@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
58c5fc13
MT
39332 if (!left--)
39333 return sprintf(page,
39334 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
39335- atomic_read(&atm_dev->stats.aal5.tx),
39336- atomic_read(&atm_dev->stats.aal5.tx_err),
39337- atomic_read(&atm_dev->stats.aal5.rx),
39338- atomic_read(&atm_dev->stats.aal5.rx_err),
39339- atomic_read(&atm_dev->stats.aal5.rx_drop));
39340+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
39341+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
39342+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
39343+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
39344+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
39345
39346 if (!left--) {
39347 if (instance->disconnected)
fe2de317 39348diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
4c928ab7 39349index d956965..4179a77 100644
fe2de317
MT
39350--- a/drivers/usb/core/devices.c
39351+++ b/drivers/usb/core/devices.c
15a11c5b 39352@@ -126,7 +126,7 @@ static const char format_endpt[] =
8308f9c9
MT
39353 * time it gets called.
39354 */
39355 static struct device_connect_event {
39356- atomic_t count;
39357+ atomic_unchecked_t count;
39358 wait_queue_head_t wait;
39359 } device_event = {
39360 .count = ATOMIC_INIT(1),
fe2de317 39361@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
8308f9c9
MT
39362
39363 void usbfs_conn_disc_event(void)
39364 {
39365- atomic_add(2, &device_event.count);
39366+ atomic_add_unchecked(2, &device_event.count);
39367 wake_up(&device_event.wait);
39368 }
39369
fe2de317 39370@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
8308f9c9
MT
39371
39372 poll_wait(file, &device_event.wait, wait);
39373
39374- event_count = atomic_read(&device_event.count);
39375+ event_count = atomic_read_unchecked(&device_event.count);
39376 if (file->f_version != event_count) {
39377 file->f_version = event_count;
39378 return POLLIN | POLLRDNORM;
fe2de317
MT
39379diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
39380index 1fc8f12..20647c1 100644
39381--- a/drivers/usb/early/ehci-dbgp.c
39382+++ b/drivers/usb/early/ehci-dbgp.c
39383@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
6892158b
MT
39384
39385 #ifdef CONFIG_KGDB
15a11c5b
MT
39386 static struct kgdb_io kgdbdbgp_io_ops;
39387-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
39388+static struct kgdb_io kgdbdbgp_io_ops_console;
39389+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
6892158b 39390 #else
16454cff 39391 #define dbgp_kgdb_mode (0)
15a11c5b 39392 #endif
fe2de317 39393@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
16454cff 39394 .write_char = kgdbdbgp_write_char,
66a7e928
MT
39395 };
39396
15a11c5b
MT
39397+static struct kgdb_io kgdbdbgp_io_ops_console = {
39398+ .name = "kgdbdbgp",
39399+ .read_char = kgdbdbgp_read_char,
39400+ .write_char = kgdbdbgp_write_char,
39401+ .is_console = 1
39402+};
39403+
39404 static int kgdbdbgp_wait_time;
66a7e928 39405
15a11c5b 39406 static int __init kgdbdbgp_parse_config(char *str)
fe2de317 39407@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
15a11c5b
MT
39408 ptr++;
39409 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
39410 }
39411- kgdb_register_io_module(&kgdbdbgp_io_ops);
39412- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
39413+ if (early_dbgp_console.index != -1)
39414+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
39415+ else
39416+ kgdb_register_io_module(&kgdbdbgp_io_ops);
66a7e928 39417
66a7e928
MT
39418 return 0;
39419 }
fe2de317
MT
39420diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
39421index d6bea3e..60b250e 100644
39422--- a/drivers/usb/wusbcore/wa-hc.h
39423+++ b/drivers/usb/wusbcore/wa-hc.h
8308f9c9
MT
39424@@ -192,7 +192,7 @@ struct wahc {
39425 struct list_head xfer_delayed_list;
39426 spinlock_t xfer_list_lock;
39427 struct work_struct xfer_work;
39428- atomic_t xfer_id_count;
39429+ atomic_unchecked_t xfer_id_count;
39430 };
39431
39432
fe2de317 39433@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
8308f9c9
MT
39434 INIT_LIST_HEAD(&wa->xfer_delayed_list);
39435 spin_lock_init(&wa->xfer_list_lock);
39436 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
39437- atomic_set(&wa->xfer_id_count, 1);
39438+ atomic_set_unchecked(&wa->xfer_id_count, 1);
39439 }
39440
39441 /**
fe2de317 39442diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
4c928ab7 39443index 57c01ab..8a05959 100644
fe2de317
MT
39444--- a/drivers/usb/wusbcore/wa-xfer.c
39445+++ b/drivers/usb/wusbcore/wa-xfer.c
4c928ab7 39446@@ -296,7 +296,7 @@ out:
8308f9c9
MT
39447 */
39448 static void wa_xfer_id_init(struct wa_xfer *xfer)
39449 {
39450- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
39451+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
39452 }
39453
39454 /*
fe2de317
MT
39455diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
39456index c14c42b..f955cc2 100644
39457--- a/drivers/vhost/vhost.c
39458+++ b/drivers/vhost/vhost.c
39459@@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
6e9df6a3 39460 return 0;
57199397
MT
39461 }
39462
39463-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
39464+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
39465 {
39466 struct file *eventfp, *filep = NULL,
39467 *pollstart = NULL, *pollstop = NULL;
fe2de317
MT
39468diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
39469index b0b2ac3..89a4399 100644
39470--- a/drivers/video/aty/aty128fb.c
39471+++ b/drivers/video/aty/aty128fb.c
6e9df6a3
MT
39472@@ -148,7 +148,7 @@ enum {
39473 };
39474
39475 /* Must match above enum */
39476-static const char *r128_family[] __devinitdata = {
39477+static const char *r128_family[] __devinitconst = {
39478 "AGP",
39479 "PCI",
39480 "PRO AGP",
fe2de317
MT
39481diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
39482index 5c3960d..15cf8fc 100644
39483--- a/drivers/video/fbcmap.c
39484+++ b/drivers/video/fbcmap.c
39485@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
df50ba0c
MT
39486 rc = -ENODEV;
39487 goto out;
39488 }
39489- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
39490- !info->fbops->fb_setcmap)) {
39491+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
39492 rc = -EINVAL;
39493 goto out1;
39494 }
fe2de317 39495diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
5e856224 39496index c6ce416..3b9b642 100644
fe2de317
MT
39497--- a/drivers/video/fbmem.c
39498+++ b/drivers/video/fbmem.c
39499@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
58c5fc13
MT
39500 image->dx += image->width + 8;
39501 }
39502 } else if (rotate == FB_ROTATE_UD) {
39503- for (x = 0; x < num && image->dx >= 0; x++) {
39504+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
39505 info->fbops->fb_imageblit(info, image);
39506 image->dx -= image->width + 8;
39507 }
fe2de317 39508@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
58c5fc13
MT
39509 image->dy += image->height + 8;
39510 }
39511 } else if (rotate == FB_ROTATE_CCW) {
39512- for (x = 0; x < num && image->dy >= 0; x++) {
39513+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
39514 info->fbops->fb_imageblit(info, image);
39515 image->dy -= image->height + 8;
39516 }
5e856224 39517@@ -1157,7 +1157,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
58c5fc13
MT
39518 return -EFAULT;
39519 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
39520 return -EINVAL;
39521- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
39522+ if (con2fb.framebuffer >= FB_MAX)
39523 return -EINVAL;
39524 if (!registered_fb[con2fb.framebuffer])
39525 request_module("fb%d", con2fb.framebuffer);
fe2de317
MT
39526diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
39527index 5a5d092..265c5ed 100644
39528--- a/drivers/video/geode/gx1fb_core.c
39529+++ b/drivers/video/geode/gx1fb_core.c
6e9df6a3
MT
39530@@ -29,7 +29,7 @@ static int crt_option = 1;
39531 static char panel_option[32] = "";
39532
39533 /* Modes relevant to the GX1 (taken from modedb.c) */
39534-static const struct fb_videomode __devinitdata gx1_modedb[] = {
39535+static const struct fb_videomode __devinitconst gx1_modedb[] = {
39536 /* 640x480-60 VESA */
39537 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
39538 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
fe2de317 39539diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
4c928ab7 39540index 0fad23f..0e9afa4 100644
fe2de317
MT
39541--- a/drivers/video/gxt4500.c
39542+++ b/drivers/video/gxt4500.c
6e9df6a3
MT
39543@@ -156,7 +156,7 @@ struct gxt4500_par {
39544 static char *mode_option;
39545
39546 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
39547-static const struct fb_videomode defaultmode __devinitdata = {
39548+static const struct fb_videomode defaultmode __devinitconst = {
39549 .refresh = 60,
39550 .xres = 1280,
39551 .yres = 1024,
fe2de317 39552@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
6e9df6a3
MT
39553 return 0;
39554 }
39555
39556-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
39557+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
39558 .id = "IBM GXT4500P",
39559 .type = FB_TYPE_PACKED_PIXELS,
39560 .visual = FB_VISUAL_PSEUDOCOLOR,
fe2de317
MT
39561diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
39562index 7672d2e..b56437f 100644
39563--- a/drivers/video/i810/i810_accel.c
39564+++ b/drivers/video/i810/i810_accel.c
39565@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
58c5fc13
MT
39566 }
39567 }
39568 printk("ringbuffer lockup!!!\n");
39569+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
39570 i810_report_error(mmio);
39571 par->dev_flags |= LOCKUP;
39572 info->pixmap.scan_align = 1;
fe2de317 39573diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
5e856224 39574index b83f361..2b05a91 100644
fe2de317
MT
39575--- a/drivers/video/i810/i810_main.c
39576+++ b/drivers/video/i810/i810_main.c
39577@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
6e9df6a3
MT
39578 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
39579
39580 /* PCI */
39581-static const char *i810_pci_list[] __devinitdata = {
39582+static const char *i810_pci_list[] __devinitconst = {
39583 "Intel(R) 810 Framebuffer Device" ,
39584 "Intel(R) 810-DC100 Framebuffer Device" ,
39585 "Intel(R) 810E Framebuffer Device" ,
fe2de317
MT
39586diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
39587index de36693..3c63fc2 100644
39588--- a/drivers/video/jz4740_fb.c
39589+++ b/drivers/video/jz4740_fb.c
6e9df6a3
MT
39590@@ -136,7 +136,7 @@ struct jzfb {
39591 uint32_t pseudo_palette[16];
39592 };
39593
39594-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
39595+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
39596 .id = "JZ4740 FB",
39597 .type = FB_TYPE_PACKED_PIXELS,
39598 .visual = FB_VISUAL_TRUECOLOR,
fe2de317
MT
39599diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
39600index 3c14e43..eafa544 100644
39601--- a/drivers/video/logo/logo_linux_clut224.ppm
39602+++ b/drivers/video/logo/logo_linux_clut224.ppm
15a11c5b
MT
39603@@ -1,1604 +1,1123 @@
39604 P3
39605-# Standard 224-color Linux logo
39606 80 80
39607 255
39608- 0 0 0 0 0 0 0 0 0 0 0 0
39609- 0 0 0 0 0 0 0 0 0 0 0 0
39610- 0 0 0 0 0 0 0 0 0 0 0 0
39611- 0 0 0 0 0 0 0 0 0 0 0 0
39612- 0 0 0 0 0 0 0 0 0 0 0 0
39613- 0 0 0 0 0 0 0 0 0 0 0 0
39614- 0 0 0 0 0 0 0 0 0 0 0 0
39615- 0 0 0 0 0 0 0 0 0 0 0 0
39616- 0 0 0 0 0 0 0 0 0 0 0 0
39617- 6 6 6 6 6 6 10 10 10 10 10 10
39618- 10 10 10 6 6 6 6 6 6 6 6 6
39619- 0 0 0 0 0 0 0 0 0 0 0 0
39620- 0 0 0 0 0 0 0 0 0 0 0 0
39621- 0 0 0 0 0 0 0 0 0 0 0 0
39622- 0 0 0 0 0 0 0 0 0 0 0 0
39623- 0 0 0 0 0 0 0 0 0 0 0 0
39624- 0 0 0 0 0 0 0 0 0 0 0 0
39625- 0 0 0 0 0 0 0 0 0 0 0 0
39626- 0 0 0 0 0 0 0 0 0 0 0 0
39627- 0 0 0 0 0 0 0 0 0 0 0 0
39628- 0 0 0 0 0 0 0 0 0 0 0 0
39629- 0 0 0 0 0 0 0 0 0 0 0 0
39630- 0 0 0 0 0 0 0 0 0 0 0 0
39631- 0 0 0 0 0 0 0 0 0 0 0 0
39632- 0 0 0 0 0 0 0 0 0 0 0 0
39633- 0 0 0 0 0 0 0 0 0 0 0 0
39634- 0 0 0 0 0 0 0 0 0 0 0 0
39635- 0 0 0 0 0 0 0 0 0 0 0 0
39636- 0 0 0 6 6 6 10 10 10 14 14 14
39637- 22 22 22 26 26 26 30 30 30 34 34 34
39638- 30 30 30 30 30 30 26 26 26 18 18 18
39639- 14 14 14 10 10 10 6 6 6 0 0 0
39640- 0 0 0 0 0 0 0 0 0 0 0 0
39641- 0 0 0 0 0 0 0 0 0 0 0 0
39642- 0 0 0 0 0 0 0 0 0 0 0 0
39643- 0 0 0 0 0 0 0 0 0 0 0 0
39644- 0 0 0 0 0 0 0 0 0 0 0 0
39645- 0 0 0 0 0 0 0 0 0 0 0 0
39646- 0 0 0 0 0 0 0 0 0 0 0 0
39647- 0 0 0 0 0 0 0 0 0 0 0 0
39648- 0 0 0 0 0 0 0 0 0 0 0 0
39649- 0 0 0 0 0 1 0 0 1 0 0 0
39650- 0 0 0 0 0 0 0 0 0 0 0 0
39651- 0 0 0 0 0 0 0 0 0 0 0 0
39652- 0 0 0 0 0 0 0 0 0 0 0 0
39653- 0 0 0 0 0 0 0 0 0 0 0 0
39654- 0 0 0 0 0 0 0 0 0 0 0 0
39655- 0 0 0 0 0 0 0 0 0 0 0 0
39656- 6 6 6 14 14 14 26 26 26 42 42 42
39657- 54 54 54 66 66 66 78 78 78 78 78 78
39658- 78 78 78 74 74 74 66 66 66 54 54 54
39659- 42 42 42 26 26 26 18 18 18 10 10 10
39660- 6 6 6 0 0 0 0 0 0 0 0 0
39661- 0 0 0 0 0 0 0 0 0 0 0 0
39662- 0 0 0 0 0 0 0 0 0 0 0 0
39663- 0 0 0 0 0 0 0 0 0 0 0 0
39664- 0 0 0 0 0 0 0 0 0 0 0 0
39665- 0 0 0 0 0 0 0 0 0 0 0 0
39666- 0 0 0 0 0 0 0 0 0 0 0 0
39667- 0 0 0 0 0 0 0 0 0 0 0 0
39668- 0 0 0 0 0 0 0 0 0 0 0 0
39669- 0 0 1 0 0 0 0 0 0 0 0 0
39670- 0 0 0 0 0 0 0 0 0 0 0 0
39671- 0 0 0 0 0 0 0 0 0 0 0 0
39672- 0 0 0 0 0 0 0 0 0 0 0 0
39673- 0 0 0 0 0 0 0 0 0 0 0 0
39674- 0 0 0 0 0 0 0 0 0 0 0 0
39675- 0 0 0 0 0 0 0 0 0 10 10 10
39676- 22 22 22 42 42 42 66 66 66 86 86 86
39677- 66 66 66 38 38 38 38 38 38 22 22 22
39678- 26 26 26 34 34 34 54 54 54 66 66 66
39679- 86 86 86 70 70 70 46 46 46 26 26 26
39680- 14 14 14 6 6 6 0 0 0 0 0 0
39681- 0 0 0 0 0 0 0 0 0 0 0 0
39682- 0 0 0 0 0 0 0 0 0 0 0 0
39683- 0 0 0 0 0 0 0 0 0 0 0 0
39684- 0 0 0 0 0 0 0 0 0 0 0 0
39685- 0 0 0 0 0 0 0 0 0 0 0 0
39686- 0 0 0 0 0 0 0 0 0 0 0 0
39687- 0 0 0 0 0 0 0 0 0 0 0 0
39688- 0 0 0 0 0 0 0 0 0 0 0 0
39689- 0 0 1 0 0 1 0 0 1 0 0 0
39690- 0 0 0 0 0 0 0 0 0 0 0 0
39691- 0 0 0 0 0 0 0 0 0 0 0 0
39692- 0 0 0 0 0 0 0 0 0 0 0 0
39693- 0 0 0 0 0 0 0 0 0 0 0 0
39694- 0 0 0 0 0 0 0 0 0 0 0 0
39695- 0 0 0 0 0 0 10 10 10 26 26 26
39696- 50 50 50 82 82 82 58 58 58 6 6 6
39697- 2 2 6 2 2 6 2 2 6 2 2 6
39698- 2 2 6 2 2 6 2 2 6 2 2 6
39699- 6 6 6 54 54 54 86 86 86 66 66 66
39700- 38 38 38 18 18 18 6 6 6 0 0 0
39701- 0 0 0 0 0 0 0 0 0 0 0 0
39702- 0 0 0 0 0 0 0 0 0 0 0 0
39703- 0 0 0 0 0 0 0 0 0 0 0 0
39704- 0 0 0 0 0 0 0 0 0 0 0 0
39705- 0 0 0 0 0 0 0 0 0 0 0 0
39706- 0 0 0 0 0 0 0 0 0 0 0 0
39707- 0 0 0 0 0 0 0 0 0 0 0 0
39708- 0 0 0 0 0 0 0 0 0 0 0 0
39709- 0 0 0 0 0 0 0 0 0 0 0 0
39710- 0 0 0 0 0 0 0 0 0 0 0 0
39711- 0 0 0 0 0 0 0 0 0 0 0 0
39712- 0 0 0 0 0 0 0 0 0 0 0 0
39713- 0 0 0 0 0 0 0 0 0 0 0 0
39714- 0 0 0 0 0 0 0 0 0 0 0 0
39715- 0 0 0 6 6 6 22 22 22 50 50 50
39716- 78 78 78 34 34 34 2 2 6 2 2 6
39717- 2 2 6 2 2 6 2 2 6 2 2 6
39718- 2 2 6 2 2 6 2 2 6 2 2 6
39719- 2 2 6 2 2 6 6 6 6 70 70 70
39720- 78 78 78 46 46 46 22 22 22 6 6 6
39721- 0 0 0 0 0 0 0 0 0 0 0 0
39722- 0 0 0 0 0 0 0 0 0 0 0 0
39723- 0 0 0 0 0 0 0 0 0 0 0 0
39724- 0 0 0 0 0 0 0 0 0 0 0 0
39725- 0 0 0 0 0 0 0 0 0 0 0 0
39726- 0 0 0 0 0 0 0 0 0 0 0 0
39727- 0 0 0 0 0 0 0 0 0 0 0 0
39728- 0 0 0 0 0 0 0 0 0 0 0 0
39729- 0 0 1 0 0 1 0 0 1 0 0 0
39730- 0 0 0 0 0 0 0 0 0 0 0 0
39731- 0 0 0 0 0 0 0 0 0 0 0 0
39732- 0 0 0 0 0 0 0 0 0 0 0 0
39733- 0 0 0 0 0 0 0 0 0 0 0 0
39734- 0 0 0 0 0 0 0 0 0 0 0 0
39735- 6 6 6 18 18 18 42 42 42 82 82 82
39736- 26 26 26 2 2 6 2 2 6 2 2 6
39737- 2 2 6 2 2 6 2 2 6 2 2 6
39738- 2 2 6 2 2 6 2 2 6 14 14 14
39739- 46 46 46 34 34 34 6 6 6 2 2 6
39740- 42 42 42 78 78 78 42 42 42 18 18 18
39741- 6 6 6 0 0 0 0 0 0 0 0 0
39742- 0 0 0 0 0 0 0 0 0 0 0 0
39743- 0 0 0 0 0 0 0 0 0 0 0 0
39744- 0 0 0 0 0 0 0 0 0 0 0 0
39745- 0 0 0 0 0 0 0 0 0 0 0 0
39746- 0 0 0 0 0 0 0 0 0 0 0 0
39747- 0 0 0 0 0 0 0 0 0 0 0 0
39748- 0 0 0 0 0 0 0 0 0 0 0 0
39749- 0 0 1 0 0 0 0 0 1 0 0 0
39750- 0 0 0 0 0 0 0 0 0 0 0 0
39751- 0 0 0 0 0 0 0 0 0 0 0 0
39752- 0 0 0 0 0 0 0 0 0 0 0 0
39753- 0 0 0 0 0 0 0 0 0 0 0 0
39754- 0 0 0 0 0 0 0 0 0 0 0 0
39755- 10 10 10 30 30 30 66 66 66 58 58 58
39756- 2 2 6 2 2 6 2 2 6 2 2 6
39757- 2 2 6 2 2 6 2 2 6 2 2 6
39758- 2 2 6 2 2 6 2 2 6 26 26 26
39759- 86 86 86 101 101 101 46 46 46 10 10 10
39760- 2 2 6 58 58 58 70 70 70 34 34 34
39761- 10 10 10 0 0 0 0 0 0 0 0 0
39762- 0 0 0 0 0 0 0 0 0 0 0 0
39763- 0 0 0 0 0 0 0 0 0 0 0 0
39764- 0 0 0 0 0 0 0 0 0 0 0 0
39765- 0 0 0 0 0 0 0 0 0 0 0 0
39766- 0 0 0 0 0 0 0 0 0 0 0 0
39767- 0 0 0 0 0 0 0 0 0 0 0 0
39768- 0 0 0 0 0 0 0 0 0 0 0 0
39769- 0 0 1 0 0 1 0 0 1 0 0 0
39770- 0 0 0 0 0 0 0 0 0 0 0 0
39771- 0 0 0 0 0 0 0 0 0 0 0 0
39772- 0 0 0 0 0 0 0 0 0 0 0 0
39773- 0 0 0 0 0 0 0 0 0 0 0 0
39774- 0 0 0 0 0 0 0 0 0 0 0 0
39775- 14 14 14 42 42 42 86 86 86 10 10 10
39776- 2 2 6 2 2 6 2 2 6 2 2 6
39777- 2 2 6 2 2 6 2 2 6 2 2 6
39778- 2 2 6 2 2 6 2 2 6 30 30 30
39779- 94 94 94 94 94 94 58 58 58 26 26 26
39780- 2 2 6 6 6 6 78 78 78 54 54 54
39781- 22 22 22 6 6 6 0 0 0 0 0 0
39782- 0 0 0 0 0 0 0 0 0 0 0 0
39783- 0 0 0 0 0 0 0 0 0 0 0 0
39784- 0 0 0 0 0 0 0 0 0 0 0 0
39785- 0 0 0 0 0 0 0 0 0 0 0 0
39786- 0 0 0 0 0 0 0 0 0 0 0 0
39787- 0 0 0 0 0 0 0 0 0 0 0 0
39788- 0 0 0 0 0 0 0 0 0 0 0 0
39789- 0 0 0 0 0 0 0 0 0 0 0 0
39790- 0 0 0 0 0 0 0 0 0 0 0 0
39791- 0 0 0 0 0 0 0 0 0 0 0 0
39792- 0 0 0 0 0 0 0 0 0 0 0 0
39793- 0 0 0 0 0 0 0 0 0 0 0 0
39794- 0 0 0 0 0 0 0 0 0 6 6 6
39795- 22 22 22 62 62 62 62 62 62 2 2 6
39796- 2 2 6 2 2 6 2 2 6 2 2 6
39797- 2 2 6 2 2 6 2 2 6 2 2 6
39798- 2 2 6 2 2 6 2 2 6 26 26 26
39799- 54 54 54 38 38 38 18 18 18 10 10 10
39800- 2 2 6 2 2 6 34 34 34 82 82 82
39801- 38 38 38 14 14 14 0 0 0 0 0 0
39802- 0 0 0 0 0 0 0 0 0 0 0 0
39803- 0 0 0 0 0 0 0 0 0 0 0 0
39804- 0 0 0 0 0 0 0 0 0 0 0 0
39805- 0 0 0 0 0 0 0 0 0 0 0 0
39806- 0 0 0 0 0 0 0 0 0 0 0 0
39807- 0 0 0 0 0 0 0 0 0 0 0 0
39808- 0 0 0 0 0 0 0 0 0 0 0 0
39809- 0 0 0 0 0 1 0 0 1 0 0 0
39810- 0 0 0 0 0 0 0 0 0 0 0 0
39811- 0 0 0 0 0 0 0 0 0 0 0 0
39812- 0 0 0 0 0 0 0 0 0 0 0 0
39813- 0 0 0 0 0 0 0 0 0 0 0 0
39814- 0 0 0 0 0 0 0 0 0 6 6 6
39815- 30 30 30 78 78 78 30 30 30 2 2 6
39816- 2 2 6 2 2 6 2 2 6 2 2 6
39817- 2 2 6 2 2 6 2 2 6 2 2 6
39818- 2 2 6 2 2 6 2 2 6 10 10 10
39819- 10 10 10 2 2 6 2 2 6 2 2 6
39820- 2 2 6 2 2 6 2 2 6 78 78 78
39821- 50 50 50 18 18 18 6 6 6 0 0 0
39822- 0 0 0 0 0 0 0 0 0 0 0 0
39823- 0 0 0 0 0 0 0 0 0 0 0 0
39824- 0 0 0 0 0 0 0 0 0 0 0 0
39825- 0 0 0 0 0 0 0 0 0 0 0 0
39826- 0 0 0 0 0 0 0 0 0 0 0 0
39827- 0 0 0 0 0 0 0 0 0 0 0 0
39828- 0 0 0 0 0 0 0 0 0 0 0 0
39829- 0 0 1 0 0 0 0 0 0 0 0 0
39830- 0 0 0 0 0 0 0 0 0 0 0 0
39831- 0 0 0 0 0 0 0 0 0 0 0 0
39832- 0 0 0 0 0 0 0 0 0 0 0 0
39833- 0 0 0 0 0 0 0 0 0 0 0 0
39834- 0 0 0 0 0 0 0 0 0 10 10 10
39835- 38 38 38 86 86 86 14 14 14 2 2 6
39836- 2 2 6 2 2 6 2 2 6 2 2 6
39837- 2 2 6 2 2 6 2 2 6 2 2 6
39838- 2 2 6 2 2 6 2 2 6 2 2 6
39839- 2 2 6 2 2 6 2 2 6 2 2 6
39840- 2 2 6 2 2 6 2 2 6 54 54 54
39841- 66 66 66 26 26 26 6 6 6 0 0 0
39842- 0 0 0 0 0 0 0 0 0 0 0 0
39843- 0 0 0 0 0 0 0 0 0 0 0 0
39844- 0 0 0 0 0 0 0 0 0 0 0 0
39845- 0 0 0 0 0 0 0 0 0 0 0 0
39846- 0 0 0 0 0 0 0 0 0 0 0 0
39847- 0 0 0 0 0 0 0 0 0 0 0 0
39848- 0 0 0 0 0 0 0 0 0 0 0 0
39849- 0 0 0 0 0 1 0 0 1 0 0 0
39850- 0 0 0 0 0 0 0 0 0 0 0 0
39851- 0 0 0 0 0 0 0 0 0 0 0 0
39852- 0 0 0 0 0 0 0 0 0 0 0 0
39853- 0 0 0 0 0 0 0 0 0 0 0 0
39854- 0 0 0 0 0 0 0 0 0 14 14 14
39855- 42 42 42 82 82 82 2 2 6 2 2 6
39856- 2 2 6 6 6 6 10 10 10 2 2 6
39857- 2 2 6 2 2 6 2 2 6 2 2 6
39858- 2 2 6 2 2 6 2 2 6 6 6 6
39859- 14 14 14 10 10 10 2 2 6 2 2 6
39860- 2 2 6 2 2 6 2 2 6 18 18 18
39861- 82 82 82 34 34 34 10 10 10 0 0 0
39862- 0 0 0 0 0 0 0 0 0 0 0 0
39863- 0 0 0 0 0 0 0 0 0 0 0 0
39864- 0 0 0 0 0 0 0 0 0 0 0 0
39865- 0 0 0 0 0 0 0 0 0 0 0 0
39866- 0 0 0 0 0 0 0 0 0 0 0 0
39867- 0 0 0 0 0 0 0 0 0 0 0 0
39868- 0 0 0 0 0 0 0 0 0 0 0 0
39869- 0 0 1 0 0 0 0 0 0 0 0 0
39870- 0 0 0 0 0 0 0 0 0 0 0 0
39871- 0 0 0 0 0 0 0 0 0 0 0 0
39872- 0 0 0 0 0 0 0 0 0 0 0 0
39873- 0 0 0 0 0 0 0 0 0 0 0 0
39874- 0 0 0 0 0 0 0 0 0 14 14 14
39875- 46 46 46 86 86 86 2 2 6 2 2 6
39876- 6 6 6 6 6 6 22 22 22 34 34 34
39877- 6 6 6 2 2 6 2 2 6 2 2 6
39878- 2 2 6 2 2 6 18 18 18 34 34 34
39879- 10 10 10 50 50 50 22 22 22 2 2 6
39880- 2 2 6 2 2 6 2 2 6 10 10 10
39881- 86 86 86 42 42 42 14 14 14 0 0 0
39882- 0 0 0 0 0 0 0 0 0 0 0 0
39883- 0 0 0 0 0 0 0 0 0 0 0 0
39884- 0 0 0 0 0 0 0 0 0 0 0 0
39885- 0 0 0 0 0 0 0 0 0 0 0 0
39886- 0 0 0 0 0 0 0 0 0 0 0 0
39887- 0 0 0 0 0 0 0 0 0 0 0 0
39888- 0 0 0 0 0 0 0 0 0 0 0 0
39889- 0 0 1 0 0 1 0 0 1 0 0 0
39890- 0 0 0 0 0 0 0 0 0 0 0 0
39891- 0 0 0 0 0 0 0 0 0 0 0 0
39892- 0 0 0 0 0 0 0 0 0 0 0 0
39893- 0 0 0 0 0 0 0 0 0 0 0 0
39894- 0 0 0 0 0 0 0 0 0 14 14 14
39895- 46 46 46 86 86 86 2 2 6 2 2 6
39896- 38 38 38 116 116 116 94 94 94 22 22 22
39897- 22 22 22 2 2 6 2 2 6 2 2 6
39898- 14 14 14 86 86 86 138 138 138 162 162 162
39899-154 154 154 38 38 38 26 26 26 6 6 6
39900- 2 2 6 2 2 6 2 2 6 2 2 6
39901- 86 86 86 46 46 46 14 14 14 0 0 0
39902- 0 0 0 0 0 0 0 0 0 0 0 0
39903- 0 0 0 0 0 0 0 0 0 0 0 0
39904- 0 0 0 0 0 0 0 0 0 0 0 0
39905- 0 0 0 0 0 0 0 0 0 0 0 0
39906- 0 0 0 0 0 0 0 0 0 0 0 0
39907- 0 0 0 0 0 0 0 0 0 0 0 0
39908- 0 0 0 0 0 0 0 0 0 0 0 0
39909- 0 0 0 0 0 0 0 0 0 0 0 0
39910- 0 0 0 0 0 0 0 0 0 0 0 0
39911- 0 0 0 0 0 0 0 0 0 0 0 0
39912- 0 0 0 0 0 0 0 0 0 0 0 0
39913- 0 0 0 0 0 0 0 0 0 0 0 0
39914- 0 0 0 0 0 0 0 0 0 14 14 14
39915- 46 46 46 86 86 86 2 2 6 14 14 14
39916-134 134 134 198 198 198 195 195 195 116 116 116
39917- 10 10 10 2 2 6 2 2 6 6 6 6
39918-101 98 89 187 187 187 210 210 210 218 218 218
39919-214 214 214 134 134 134 14 14 14 6 6 6
39920- 2 2 6 2 2 6 2 2 6 2 2 6
39921- 86 86 86 50 50 50 18 18 18 6 6 6
39922- 0 0 0 0 0 0 0 0 0 0 0 0
39923- 0 0 0 0 0 0 0 0 0 0 0 0
39924- 0 0 0 0 0 0 0 0 0 0 0 0
39925- 0 0 0 0 0 0 0 0 0 0 0 0
39926- 0 0 0 0 0 0 0 0 0 0 0 0
39927- 0 0 0 0 0 0 0 0 0 0 0 0
39928- 0 0 0 0 0 0 0 0 1 0 0 0
39929- 0 0 1 0 0 1 0 0 1 0 0 0
39930- 0 0 0 0 0 0 0 0 0 0 0 0
39931- 0 0 0 0 0 0 0 0 0 0 0 0
39932- 0 0 0 0 0 0 0 0 0 0 0 0
39933- 0 0 0 0 0 0 0 0 0 0 0 0
39934- 0 0 0 0 0 0 0 0 0 14 14 14
39935- 46 46 46 86 86 86 2 2 6 54 54 54
39936-218 218 218 195 195 195 226 226 226 246 246 246
39937- 58 58 58 2 2 6 2 2 6 30 30 30
39938-210 210 210 253 253 253 174 174 174 123 123 123
39939-221 221 221 234 234 234 74 74 74 2 2 6
39940- 2 2 6 2 2 6 2 2 6 2 2 6
39941- 70 70 70 58 58 58 22 22 22 6 6 6
39942- 0 0 0 0 0 0 0 0 0 0 0 0
39943- 0 0 0 0 0 0 0 0 0 0 0 0
39944- 0 0 0 0 0 0 0 0 0 0 0 0
39945- 0 0 0 0 0 0 0 0 0 0 0 0
39946- 0 0 0 0 0 0 0 0 0 0 0 0
39947- 0 0 0 0 0 0 0 0 0 0 0 0
39948- 0 0 0 0 0 0 0 0 0 0 0 0
39949- 0 0 0 0 0 0 0 0 0 0 0 0
39950- 0 0 0 0 0 0 0 0 0 0 0 0
39951- 0 0 0 0 0 0 0 0 0 0 0 0
39952- 0 0 0 0 0 0 0 0 0 0 0 0
39953- 0 0 0 0 0 0 0 0 0 0 0 0
39954- 0 0 0 0 0 0 0 0 0 14 14 14
39955- 46 46 46 82 82 82 2 2 6 106 106 106
39956-170 170 170 26 26 26 86 86 86 226 226 226
39957-123 123 123 10 10 10 14 14 14 46 46 46
39958-231 231 231 190 190 190 6 6 6 70 70 70
39959- 90 90 90 238 238 238 158 158 158 2 2 6
39960- 2 2 6 2 2 6 2 2 6 2 2 6
39961- 70 70 70 58 58 58 22 22 22 6 6 6
39962- 0 0 0 0 0 0 0 0 0 0 0 0
39963- 0 0 0 0 0 0 0 0 0 0 0 0
39964- 0 0 0 0 0 0 0 0 0 0 0 0
39965- 0 0 0 0 0 0 0 0 0 0 0 0
39966- 0 0 0 0 0 0 0 0 0 0 0 0
39967- 0 0 0 0 0 0 0 0 0 0 0 0
39968- 0 0 0 0 0 0 0 0 1 0 0 0
39969- 0 0 1 0 0 1 0 0 1 0 0 0
39970- 0 0 0 0 0 0 0 0 0 0 0 0
39971- 0 0 0 0 0 0 0 0 0 0 0 0
39972- 0 0 0 0 0 0 0 0 0 0 0 0
39973- 0 0 0 0 0 0 0 0 0 0 0 0
39974- 0 0 0 0 0 0 0 0 0 14 14 14
39975- 42 42 42 86 86 86 6 6 6 116 116 116
39976-106 106 106 6 6 6 70 70 70 149 149 149
39977-128 128 128 18 18 18 38 38 38 54 54 54
39978-221 221 221 106 106 106 2 2 6 14 14 14
39979- 46 46 46 190 190 190 198 198 198 2 2 6
39980- 2 2 6 2 2 6 2 2 6 2 2 6
39981- 74 74 74 62 62 62 22 22 22 6 6 6
39982- 0 0 0 0 0 0 0 0 0 0 0 0
39983- 0 0 0 0 0 0 0 0 0 0 0 0
39984- 0 0 0 0 0 0 0 0 0 0 0 0
39985- 0 0 0 0 0 0 0 0 0 0 0 0
39986- 0 0 0 0 0 0 0 0 0 0 0 0
39987- 0 0 0 0 0 0 0 0 0 0 0 0
39988- 0 0 0 0 0 0 0 0 1 0 0 0
39989- 0 0 1 0 0 0 0 0 1 0 0 0
39990- 0 0 0 0 0 0 0 0 0 0 0 0
39991- 0 0 0 0 0 0 0 0 0 0 0 0
39992- 0 0 0 0 0 0 0 0 0 0 0 0
39993- 0 0 0 0 0 0 0 0 0 0 0 0
39994- 0 0 0 0 0 0 0 0 0 14 14 14
39995- 42 42 42 94 94 94 14 14 14 101 101 101
39996-128 128 128 2 2 6 18 18 18 116 116 116
39997-118 98 46 121 92 8 121 92 8 98 78 10
39998-162 162 162 106 106 106 2 2 6 2 2 6
39999- 2 2 6 195 195 195 195 195 195 6 6 6
40000- 2 2 6 2 2 6 2 2 6 2 2 6
40001- 74 74 74 62 62 62 22 22 22 6 6 6
40002- 0 0 0 0 0 0 0 0 0 0 0 0
40003- 0 0 0 0 0 0 0 0 0 0 0 0
40004- 0 0 0 0 0 0 0 0 0 0 0 0
40005- 0 0 0 0 0 0 0 0 0 0 0 0
40006- 0 0 0 0 0 0 0 0 0 0 0 0
40007- 0 0 0 0 0 0 0 0 0 0 0 0
40008- 0 0 0 0 0 0 0 0 1 0 0 1
40009- 0 0 1 0 0 0 0 0 1 0 0 0
40010- 0 0 0 0 0 0 0 0 0 0 0 0
40011- 0 0 0 0 0 0 0 0 0 0 0 0
40012- 0 0 0 0 0 0 0 0 0 0 0 0
40013- 0 0 0 0 0 0 0 0 0 0 0 0
40014- 0 0 0 0 0 0 0 0 0 10 10 10
40015- 38 38 38 90 90 90 14 14 14 58 58 58
40016-210 210 210 26 26 26 54 38 6 154 114 10
40017-226 170 11 236 186 11 225 175 15 184 144 12
40018-215 174 15 175 146 61 37 26 9 2 2 6
40019- 70 70 70 246 246 246 138 138 138 2 2 6
40020- 2 2 6 2 2 6 2 2 6 2 2 6
40021- 70 70 70 66 66 66 26 26 26 6 6 6
40022- 0 0 0 0 0 0 0 0 0 0 0 0
40023- 0 0 0 0 0 0 0 0 0 0 0 0
40024- 0 0 0 0 0 0 0 0 0 0 0 0
40025- 0 0 0 0 0 0 0 0 0 0 0 0
40026- 0 0 0 0 0 0 0 0 0 0 0 0
40027- 0 0 0 0 0 0 0 0 0 0 0 0
40028- 0 0 0 0 0 0 0 0 0 0 0 0
40029- 0 0 0 0 0 0 0 0 0 0 0 0
40030- 0 0 0 0 0 0 0 0 0 0 0 0
40031- 0 0 0 0 0 0 0 0 0 0 0 0
40032- 0 0 0 0 0 0 0 0 0 0 0 0
40033- 0 0 0 0 0 0 0 0 0 0 0 0
40034- 0 0 0 0 0 0 0 0 0 10 10 10
40035- 38 38 38 86 86 86 14 14 14 10 10 10
40036-195 195 195 188 164 115 192 133 9 225 175 15
40037-239 182 13 234 190 10 232 195 16 232 200 30
40038-245 207 45 241 208 19 232 195 16 184 144 12
40039-218 194 134 211 206 186 42 42 42 2 2 6
40040- 2 2 6 2 2 6 2 2 6 2 2 6
40041- 50 50 50 74 74 74 30 30 30 6 6 6
40042- 0 0 0 0 0 0 0 0 0 0 0 0
40043- 0 0 0 0 0 0 0 0 0 0 0 0
40044- 0 0 0 0 0 0 0 0 0 0 0 0
40045- 0 0 0 0 0 0 0 0 0 0 0 0
40046- 0 0 0 0 0 0 0 0 0 0 0 0
40047- 0 0 0 0 0 0 0 0 0 0 0 0
40048- 0 0 0 0 0 0 0 0 0 0 0 0
40049- 0 0 0 0 0 0 0 0 0 0 0 0
40050- 0 0 0 0 0 0 0 0 0 0 0 0
40051- 0 0 0 0 0 0 0 0 0 0 0 0
40052- 0 0 0 0 0 0 0 0 0 0 0 0
40053- 0 0 0 0 0 0 0 0 0 0 0 0
40054- 0 0 0 0 0 0 0 0 0 10 10 10
40055- 34 34 34 86 86 86 14 14 14 2 2 6
40056-121 87 25 192 133 9 219 162 10 239 182 13
40057-236 186 11 232 195 16 241 208 19 244 214 54
40058-246 218 60 246 218 38 246 215 20 241 208 19
40059-241 208 19 226 184 13 121 87 25 2 2 6
40060- 2 2 6 2 2 6 2 2 6 2 2 6
40061- 50 50 50 82 82 82 34 34 34 10 10 10
40062- 0 0 0 0 0 0 0 0 0 0 0 0
40063- 0 0 0 0 0 0 0 0 0 0 0 0
40064- 0 0 0 0 0 0 0 0 0 0 0 0
40065- 0 0 0 0 0 0 0 0 0 0 0 0
40066- 0 0 0 0 0 0 0 0 0 0 0 0
40067- 0 0 0 0 0 0 0 0 0 0 0 0
40068- 0 0 0 0 0 0 0 0 0 0 0 0
40069- 0 0 0 0 0 0 0 0 0 0 0 0
40070- 0 0 0 0 0 0 0 0 0 0 0 0
40071- 0 0 0 0 0 0 0 0 0 0 0 0
40072- 0 0 0 0 0 0 0 0 0 0 0 0
40073- 0 0 0 0 0 0 0 0 0 0 0 0
40074- 0 0 0 0 0 0 0 0 0 10 10 10
40075- 34 34 34 82 82 82 30 30 30 61 42 6
40076-180 123 7 206 145 10 230 174 11 239 182 13
40077-234 190 10 238 202 15 241 208 19 246 218 74
40078-246 218 38 246 215 20 246 215 20 246 215 20
40079-226 184 13 215 174 15 184 144 12 6 6 6
40080- 2 2 6 2 2 6 2 2 6 2 2 6
40081- 26 26 26 94 94 94 42 42 42 14 14 14
40082- 0 0 0 0 0 0 0 0 0 0 0 0
40083- 0 0 0 0 0 0 0 0 0 0 0 0
40084- 0 0 0 0 0 0 0 0 0 0 0 0
40085- 0 0 0 0 0 0 0 0 0 0 0 0
40086- 0 0 0 0 0 0 0 0 0 0 0 0
40087- 0 0 0 0 0 0 0 0 0 0 0 0
40088- 0 0 0 0 0 0 0 0 0 0 0 0
40089- 0 0 0 0 0 0 0 0 0 0 0 0
40090- 0 0 0 0 0 0 0 0 0 0 0 0
40091- 0 0 0 0 0 0 0 0 0 0 0 0
40092- 0 0 0 0 0 0 0 0 0 0 0 0
40093- 0 0 0 0 0 0 0 0 0 0 0 0
40094- 0 0 0 0 0 0 0 0 0 10 10 10
40095- 30 30 30 78 78 78 50 50 50 104 69 6
40096-192 133 9 216 158 10 236 178 12 236 186 11
40097-232 195 16 241 208 19 244 214 54 245 215 43
40098-246 215 20 246 215 20 241 208 19 198 155 10
40099-200 144 11 216 158 10 156 118 10 2 2 6
40100- 2 2 6 2 2 6 2 2 6 2 2 6
40101- 6 6 6 90 90 90 54 54 54 18 18 18
40102- 6 6 6 0 0 0 0 0 0 0 0 0
40103- 0 0 0 0 0 0 0 0 0 0 0 0
40104- 0 0 0 0 0 0 0 0 0 0 0 0
40105- 0 0 0 0 0 0 0 0 0 0 0 0
40106- 0 0 0 0 0 0 0 0 0 0 0 0
40107- 0 0 0 0 0 0 0 0 0 0 0 0
40108- 0 0 0 0 0 0 0 0 0 0 0 0
40109- 0 0 0 0 0 0 0 0 0 0 0 0
40110- 0 0 0 0 0 0 0 0 0 0 0 0
40111- 0 0 0 0 0 0 0 0 0 0 0 0
40112- 0 0 0 0 0 0 0 0 0 0 0 0
40113- 0 0 0 0 0 0 0 0 0 0 0 0
40114- 0 0 0 0 0 0 0 0 0 10 10 10
40115- 30 30 30 78 78 78 46 46 46 22 22 22
40116-137 92 6 210 162 10 239 182 13 238 190 10
40117-238 202 15 241 208 19 246 215 20 246 215 20
40118-241 208 19 203 166 17 185 133 11 210 150 10
40119-216 158 10 210 150 10 102 78 10 2 2 6
40120- 6 6 6 54 54 54 14 14 14 2 2 6
40121- 2 2 6 62 62 62 74 74 74 30 30 30
40122- 10 10 10 0 0 0 0 0 0 0 0 0
40123- 0 0 0 0 0 0 0 0 0 0 0 0
40124- 0 0 0 0 0 0 0 0 0 0 0 0
40125- 0 0 0 0 0 0 0 0 0 0 0 0
40126- 0 0 0 0 0 0 0 0 0 0 0 0
40127- 0 0 0 0 0 0 0 0 0 0 0 0
40128- 0 0 0 0 0 0 0 0 0 0 0 0
40129- 0 0 0 0 0 0 0 0 0 0 0 0
40130- 0 0 0 0 0 0 0 0 0 0 0 0
40131- 0 0 0 0 0 0 0 0 0 0 0 0
40132- 0 0 0 0 0 0 0 0 0 0 0 0
40133- 0 0 0 0 0 0 0 0 0 0 0 0
40134- 0 0 0 0 0 0 0 0 0 10 10 10
40135- 34 34 34 78 78 78 50 50 50 6 6 6
40136- 94 70 30 139 102 15 190 146 13 226 184 13
40137-232 200 30 232 195 16 215 174 15 190 146 13
40138-168 122 10 192 133 9 210 150 10 213 154 11
40139-202 150 34 182 157 106 101 98 89 2 2 6
40140- 2 2 6 78 78 78 116 116 116 58 58 58
40141- 2 2 6 22 22 22 90 90 90 46 46 46
40142- 18 18 18 6 6 6 0 0 0 0 0 0
40143- 0 0 0 0 0 0 0 0 0 0 0 0
40144- 0 0 0 0 0 0 0 0 0 0 0 0
40145- 0 0 0 0 0 0 0 0 0 0 0 0
40146- 0 0 0 0 0 0 0 0 0 0 0 0
40147- 0 0 0 0 0 0 0 0 0 0 0 0
40148- 0 0 0 0 0 0 0 0 0 0 0 0
40149- 0 0 0 0 0 0 0 0 0 0 0 0
40150- 0 0 0 0 0 0 0 0 0 0 0 0
40151- 0 0 0 0 0 0 0 0 0 0 0 0
40152- 0 0 0 0 0 0 0 0 0 0 0 0
40153- 0 0 0 0 0 0 0 0 0 0 0 0
40154- 0 0 0 0 0 0 0 0 0 10 10 10
40155- 38 38 38 86 86 86 50 50 50 6 6 6
40156-128 128 128 174 154 114 156 107 11 168 122 10
40157-198 155 10 184 144 12 197 138 11 200 144 11
40158-206 145 10 206 145 10 197 138 11 188 164 115
40159-195 195 195 198 198 198 174 174 174 14 14 14
40160- 2 2 6 22 22 22 116 116 116 116 116 116
40161- 22 22 22 2 2 6 74 74 74 70 70 70
40162- 30 30 30 10 10 10 0 0 0 0 0 0
40163- 0 0 0 0 0 0 0 0 0 0 0 0
40164- 0 0 0 0 0 0 0 0 0 0 0 0
40165- 0 0 0 0 0 0 0 0 0 0 0 0
40166- 0 0 0 0 0 0 0 0 0 0 0 0
40167- 0 0 0 0 0 0 0 0 0 0 0 0
40168- 0 0 0 0 0 0 0 0 0 0 0 0
40169- 0 0 0 0 0 0 0 0 0 0 0 0
40170- 0 0 0 0 0 0 0 0 0 0 0 0
40171- 0 0 0 0 0 0 0 0 0 0 0 0
40172- 0 0 0 0 0 0 0 0 0 0 0 0
40173- 0 0 0 0 0 0 0 0 0 0 0 0
40174- 0 0 0 0 0 0 6 6 6 18 18 18
40175- 50 50 50 101 101 101 26 26 26 10 10 10
40176-138 138 138 190 190 190 174 154 114 156 107 11
40177-197 138 11 200 144 11 197 138 11 192 133 9
40178-180 123 7 190 142 34 190 178 144 187 187 187
40179-202 202 202 221 221 221 214 214 214 66 66 66
40180- 2 2 6 2 2 6 50 50 50 62 62 62
40181- 6 6 6 2 2 6 10 10 10 90 90 90
40182- 50 50 50 18 18 18 6 6 6 0 0 0
40183- 0 0 0 0 0 0 0 0 0 0 0 0
40184- 0 0 0 0 0 0 0 0 0 0 0 0
40185- 0 0 0 0 0 0 0 0 0 0 0 0
40186- 0 0 0 0 0 0 0 0 0 0 0 0
40187- 0 0 0 0 0 0 0 0 0 0 0 0
40188- 0 0 0 0 0 0 0 0 0 0 0 0
40189- 0 0 0 0 0 0 0 0 0 0 0 0
40190- 0 0 0 0 0 0 0 0 0 0 0 0
40191- 0 0 0 0 0 0 0 0 0 0 0 0
40192- 0 0 0 0 0 0 0 0 0 0 0 0
40193- 0 0 0 0 0 0 0 0 0 0 0 0
40194- 0 0 0 0 0 0 10 10 10 34 34 34
40195- 74 74 74 74 74 74 2 2 6 6 6 6
40196-144 144 144 198 198 198 190 190 190 178 166 146
40197-154 121 60 156 107 11 156 107 11 168 124 44
40198-174 154 114 187 187 187 190 190 190 210 210 210
40199-246 246 246 253 253 253 253 253 253 182 182 182
40200- 6 6 6 2 2 6 2 2 6 2 2 6
40201- 2 2 6 2 2 6 2 2 6 62 62 62
40202- 74 74 74 34 34 34 14 14 14 0 0 0
40203- 0 0 0 0 0 0 0 0 0 0 0 0
40204- 0 0 0 0 0 0 0 0 0 0 0 0
40205- 0 0 0 0 0 0 0 0 0 0 0 0
40206- 0 0 0 0 0 0 0 0 0 0 0 0
40207- 0 0 0 0 0 0 0 0 0 0 0 0
40208- 0 0 0 0 0 0 0 0 0 0 0 0
40209- 0 0 0 0 0 0 0 0 0 0 0 0
40210- 0 0 0 0 0 0 0 0 0 0 0 0
40211- 0 0 0 0 0 0 0 0 0 0 0 0
40212- 0 0 0 0 0 0 0 0 0 0 0 0
40213- 0 0 0 0 0 0 0 0 0 0 0 0
40214- 0 0 0 10 10 10 22 22 22 54 54 54
40215- 94 94 94 18 18 18 2 2 6 46 46 46
40216-234 234 234 221 221 221 190 190 190 190 190 190
40217-190 190 190 187 187 187 187 187 187 190 190 190
40218-190 190 190 195 195 195 214 214 214 242 242 242
40219-253 253 253 253 253 253 253 253 253 253 253 253
40220- 82 82 82 2 2 6 2 2 6 2 2 6
40221- 2 2 6 2 2 6 2 2 6 14 14 14
40222- 86 86 86 54 54 54 22 22 22 6 6 6
40223- 0 0 0 0 0 0 0 0 0 0 0 0
40224- 0 0 0 0 0 0 0 0 0 0 0 0
40225- 0 0 0 0 0 0 0 0 0 0 0 0
40226- 0 0 0 0 0 0 0 0 0 0 0 0
40227- 0 0 0 0 0 0 0 0 0 0 0 0
40228- 0 0 0 0 0 0 0 0 0 0 0 0
40229- 0 0 0 0 0 0 0 0 0 0 0 0
40230- 0 0 0 0 0 0 0 0 0 0 0 0
40231- 0 0 0 0 0 0 0 0 0 0 0 0
40232- 0 0 0 0 0 0 0 0 0 0 0 0
40233- 0 0 0 0 0 0 0 0 0 0 0 0
40234- 6 6 6 18 18 18 46 46 46 90 90 90
40235- 46 46 46 18 18 18 6 6 6 182 182 182
40236-253 253 253 246 246 246 206 206 206 190 190 190
40237-190 190 190 190 190 190 190 190 190 190 190 190
40238-206 206 206 231 231 231 250 250 250 253 253 253
40239-253 253 253 253 253 253 253 253 253 253 253 253
40240-202 202 202 14 14 14 2 2 6 2 2 6
40241- 2 2 6 2 2 6 2 2 6 2 2 6
40242- 42 42 42 86 86 86 42 42 42 18 18 18
40243- 6 6 6 0 0 0 0 0 0 0 0 0
40244- 0 0 0 0 0 0 0 0 0 0 0 0
40245- 0 0 0 0 0 0 0 0 0 0 0 0
40246- 0 0 0 0 0 0 0 0 0 0 0 0
40247- 0 0 0 0 0 0 0 0 0 0 0 0
40248- 0 0 0 0 0 0 0 0 0 0 0 0
40249- 0 0 0 0 0 0 0 0 0 0 0 0
40250- 0 0 0 0 0 0 0 0 0 0 0 0
40251- 0 0 0 0 0 0 0 0 0 0 0 0
40252- 0 0 0 0 0 0 0 0 0 0 0 0
40253- 0 0 0 0 0 0 0 0 0 6 6 6
40254- 14 14 14 38 38 38 74 74 74 66 66 66
40255- 2 2 6 6 6 6 90 90 90 250 250 250
40256-253 253 253 253 253 253 238 238 238 198 198 198
40257-190 190 190 190 190 190 195 195 195 221 221 221
40258-246 246 246 253 253 253 253 253 253 253 253 253
40259-253 253 253 253 253 253 253 253 253 253 253 253
40260-253 253 253 82 82 82 2 2 6 2 2 6
40261- 2 2 6 2 2 6 2 2 6 2 2 6
40262- 2 2 6 78 78 78 70 70 70 34 34 34
40263- 14 14 14 6 6 6 0 0 0 0 0 0
40264- 0 0 0 0 0 0 0 0 0 0 0 0
40265- 0 0 0 0 0 0 0 0 0 0 0 0
40266- 0 0 0 0 0 0 0 0 0 0 0 0
40267- 0 0 0 0 0 0 0 0 0 0 0 0
40268- 0 0 0 0 0 0 0 0 0 0 0 0
40269- 0 0 0 0 0 0 0 0 0 0 0 0
40270- 0 0 0 0 0 0 0 0 0 0 0 0
40271- 0 0 0 0 0 0 0 0 0 0 0 0
40272- 0 0 0 0 0 0 0 0 0 0 0 0
40273- 0 0 0 0 0 0 0 0 0 14 14 14
40274- 34 34 34 66 66 66 78 78 78 6 6 6
40275- 2 2 6 18 18 18 218 218 218 253 253 253
40276-253 253 253 253 253 253 253 253 253 246 246 246
40277-226 226 226 231 231 231 246 246 246 253 253 253
40278-253 253 253 253 253 253 253 253 253 253 253 253
40279-253 253 253 253 253 253 253 253 253 253 253 253
40280-253 253 253 178 178 178 2 2 6 2 2 6
40281- 2 2 6 2 2 6 2 2 6 2 2 6
40282- 2 2 6 18 18 18 90 90 90 62 62 62
40283- 30 30 30 10 10 10 0 0 0 0 0 0
40284- 0 0 0 0 0 0 0 0 0 0 0 0
40285- 0 0 0 0 0 0 0 0 0 0 0 0
40286- 0 0 0 0 0 0 0 0 0 0 0 0
40287- 0 0 0 0 0 0 0 0 0 0 0 0
40288- 0 0 0 0 0 0 0 0 0 0 0 0
40289- 0 0 0 0 0 0 0 0 0 0 0 0
40290- 0 0 0 0 0 0 0 0 0 0 0 0
40291- 0 0 0 0 0 0 0 0 0 0 0 0
40292- 0 0 0 0 0 0 0 0 0 0 0 0
40293- 0 0 0 0 0 0 10 10 10 26 26 26
40294- 58 58 58 90 90 90 18 18 18 2 2 6
40295- 2 2 6 110 110 110 253 253 253 253 253 253
40296-253 253 253 253 253 253 253 253 253 253 253 253
40297-250 250 250 253 253 253 253 253 253 253 253 253
40298-253 253 253 253 253 253 253 253 253 253 253 253
40299-253 253 253 253 253 253 253 253 253 253 253 253
40300-253 253 253 231 231 231 18 18 18 2 2 6
40301- 2 2 6 2 2 6 2 2 6 2 2 6
40302- 2 2 6 2 2 6 18 18 18 94 94 94
40303- 54 54 54 26 26 26 10 10 10 0 0 0
40304- 0 0 0 0 0 0 0 0 0 0 0 0
40305- 0 0 0 0 0 0 0 0 0 0 0 0
40306- 0 0 0 0 0 0 0 0 0 0 0 0
40307- 0 0 0 0 0 0 0 0 0 0 0 0
40308- 0 0 0 0 0 0 0 0 0 0 0 0
40309- 0 0 0 0 0 0 0 0 0 0 0 0
40310- 0 0 0 0 0 0 0 0 0 0 0 0
40311- 0 0 0 0 0 0 0 0 0 0 0 0
40312- 0 0 0 0 0 0 0 0 0 0 0 0
40313- 0 0 0 6 6 6 22 22 22 50 50 50
40314- 90 90 90 26 26 26 2 2 6 2 2 6
40315- 14 14 14 195 195 195 250 250 250 253 253 253
40316-253 253 253 253 253 253 253 253 253 253 253 253
40317-253 253 253 253 253 253 253 253 253 253 253 253
40318-253 253 253 253 253 253 253 253 253 253 253 253
40319-253 253 253 253 253 253 253 253 253 253 253 253
40320-250 250 250 242 242 242 54 54 54 2 2 6
40321- 2 2 6 2 2 6 2 2 6 2 2 6
40322- 2 2 6 2 2 6 2 2 6 38 38 38
40323- 86 86 86 50 50 50 22 22 22 6 6 6
40324- 0 0 0 0 0 0 0 0 0 0 0 0
40325- 0 0 0 0 0 0 0 0 0 0 0 0
40326- 0 0 0 0 0 0 0 0 0 0 0 0
40327- 0 0 0 0 0 0 0 0 0 0 0 0
40328- 0 0 0 0 0 0 0 0 0 0 0 0
40329- 0 0 0 0 0 0 0 0 0 0 0 0
40330- 0 0 0 0 0 0 0 0 0 0 0 0
40331- 0 0 0 0 0 0 0 0 0 0 0 0
40332- 0 0 0 0 0 0 0 0 0 0 0 0
40333- 6 6 6 14 14 14 38 38 38 82 82 82
40334- 34 34 34 2 2 6 2 2 6 2 2 6
40335- 42 42 42 195 195 195 246 246 246 253 253 253
40336-253 253 253 253 253 253 253 253 253 250 250 250
40337-242 242 242 242 242 242 250 250 250 253 253 253
40338-253 253 253 253 253 253 253 253 253 253 253 253
40339-253 253 253 250 250 250 246 246 246 238 238 238
40340-226 226 226 231 231 231 101 101 101 6 6 6
40341- 2 2 6 2 2 6 2 2 6 2 2 6
40342- 2 2 6 2 2 6 2 2 6 2 2 6
40343- 38 38 38 82 82 82 42 42 42 14 14 14
40344- 6 6 6 0 0 0 0 0 0 0 0 0
40345- 0 0 0 0 0 0 0 0 0 0 0 0
40346- 0 0 0 0 0 0 0 0 0 0 0 0
40347- 0 0 0 0 0 0 0 0 0 0 0 0
40348- 0 0 0 0 0 0 0 0 0 0 0 0
40349- 0 0 0 0 0 0 0 0 0 0 0 0
40350- 0 0 0 0 0 0 0 0 0 0 0 0
40351- 0 0 0 0 0 0 0 0 0 0 0 0
40352- 0 0 0 0 0 0 0 0 0 0 0 0
40353- 10 10 10 26 26 26 62 62 62 66 66 66
40354- 2 2 6 2 2 6 2 2 6 6 6 6
40355- 70 70 70 170 170 170 206 206 206 234 234 234
40356-246 246 246 250 250 250 250 250 250 238 238 238
40357-226 226 226 231 231 231 238 238 238 250 250 250
40358-250 250 250 250 250 250 246 246 246 231 231 231
40359-214 214 214 206 206 206 202 202 202 202 202 202
40360-198 198 198 202 202 202 182 182 182 18 18 18
40361- 2 2 6 2 2 6 2 2 6 2 2 6
40362- 2 2 6 2 2 6 2 2 6 2 2 6
40363- 2 2 6 62 62 62 66 66 66 30 30 30
40364- 10 10 10 0 0 0 0 0 0 0 0 0
40365- 0 0 0 0 0 0 0 0 0 0 0 0
40366- 0 0 0 0 0 0 0 0 0 0 0 0
40367- 0 0 0 0 0 0 0 0 0 0 0 0
40368- 0 0 0 0 0 0 0 0 0 0 0 0
40369- 0 0 0 0 0 0 0 0 0 0 0 0
40370- 0 0 0 0 0 0 0 0 0 0 0 0
40371- 0 0 0 0 0 0 0 0 0 0 0 0
40372- 0 0 0 0 0 0 0 0 0 0 0 0
40373- 14 14 14 42 42 42 82 82 82 18 18 18
40374- 2 2 6 2 2 6 2 2 6 10 10 10
40375- 94 94 94 182 182 182 218 218 218 242 242 242
40376-250 250 250 253 253 253 253 253 253 250 250 250
40377-234 234 234 253 253 253 253 253 253 253 253 253
40378-253 253 253 253 253 253 253 253 253 246 246 246
40379-238 238 238 226 226 226 210 210 210 202 202 202
40380-195 195 195 195 195 195 210 210 210 158 158 158
40381- 6 6 6 14 14 14 50 50 50 14 14 14
40382- 2 2 6 2 2 6 2 2 6 2 2 6
40383- 2 2 6 6 6 6 86 86 86 46 46 46
40384- 18 18 18 6 6 6 0 0 0 0 0 0
40385- 0 0 0 0 0 0 0 0 0 0 0 0
40386- 0 0 0 0 0 0 0 0 0 0 0 0
40387- 0 0 0 0 0 0 0 0 0 0 0 0
40388- 0 0 0 0 0 0 0 0 0 0 0 0
40389- 0 0 0 0 0 0 0 0 0 0 0 0
40390- 0 0 0 0 0 0 0 0 0 0 0 0
40391- 0 0 0 0 0 0 0 0 0 0 0 0
40392- 0 0 0 0 0 0 0 0 0 6 6 6
40393- 22 22 22 54 54 54 70 70 70 2 2 6
40394- 2 2 6 10 10 10 2 2 6 22 22 22
40395-166 166 166 231 231 231 250 250 250 253 253 253
40396-253 253 253 253 253 253 253 253 253 250 250 250
40397-242 242 242 253 253 253 253 253 253 253 253 253
40398-253 253 253 253 253 253 253 253 253 253 253 253
40399-253 253 253 253 253 253 253 253 253 246 246 246
40400-231 231 231 206 206 206 198 198 198 226 226 226
40401- 94 94 94 2 2 6 6 6 6 38 38 38
40402- 30 30 30 2 2 6 2 2 6 2 2 6
40403- 2 2 6 2 2 6 62 62 62 66 66 66
40404- 26 26 26 10 10 10 0 0 0 0 0 0
40405- 0 0 0 0 0 0 0 0 0 0 0 0
40406- 0 0 0 0 0 0 0 0 0 0 0 0
40407- 0 0 0 0 0 0 0 0 0 0 0 0
40408- 0 0 0 0 0 0 0 0 0 0 0 0
40409- 0 0 0 0 0 0 0 0 0 0 0 0
40410- 0 0 0 0 0 0 0 0 0 0 0 0
40411- 0 0 0 0 0 0 0 0 0 0 0 0
40412- 0 0 0 0 0 0 0 0 0 10 10 10
40413- 30 30 30 74 74 74 50 50 50 2 2 6
40414- 26 26 26 26 26 26 2 2 6 106 106 106
40415-238 238 238 253 253 253 253 253 253 253 253 253
40416-253 253 253 253 253 253 253 253 253 253 253 253
40417-253 253 253 253 253 253 253 253 253 253 253 253
40418-253 253 253 253 253 253 253 253 253 253 253 253
40419-253 253 253 253 253 253 253 253 253 253 253 253
40420-253 253 253 246 246 246 218 218 218 202 202 202
40421-210 210 210 14 14 14 2 2 6 2 2 6
40422- 30 30 30 22 22 22 2 2 6 2 2 6
40423- 2 2 6 2 2 6 18 18 18 86 86 86
40424- 42 42 42 14 14 14 0 0 0 0 0 0
40425- 0 0 0 0 0 0 0 0 0 0 0 0
40426- 0 0 0 0 0 0 0 0 0 0 0 0
40427- 0 0 0 0 0 0 0 0 0 0 0 0
40428- 0 0 0 0 0 0 0 0 0 0 0 0
40429- 0 0 0 0 0 0 0 0 0 0 0 0
40430- 0 0 0 0 0 0 0 0 0 0 0 0
40431- 0 0 0 0 0 0 0 0 0 0 0 0
40432- 0 0 0 0 0 0 0 0 0 14 14 14
40433- 42 42 42 90 90 90 22 22 22 2 2 6
40434- 42 42 42 2 2 6 18 18 18 218 218 218
40435-253 253 253 253 253 253 253 253 253 253 253 253
40436-253 253 253 253 253 253 253 253 253 253 253 253
40437-253 253 253 253 253 253 253 253 253 253 253 253
40438-253 253 253 253 253 253 253 253 253 253 253 253
40439-253 253 253 253 253 253 253 253 253 253 253 253
40440-253 253 253 253 253 253 250 250 250 221 221 221
40441-218 218 218 101 101 101 2 2 6 14 14 14
40442- 18 18 18 38 38 38 10 10 10 2 2 6
40443- 2 2 6 2 2 6 2 2 6 78 78 78
40444- 58 58 58 22 22 22 6 6 6 0 0 0
40445- 0 0 0 0 0 0 0 0 0 0 0 0
40446- 0 0 0 0 0 0 0 0 0 0 0 0
40447- 0 0 0 0 0 0 0 0 0 0 0 0
40448- 0 0 0 0 0 0 0 0 0 0 0 0
40449- 0 0 0 0 0 0 0 0 0 0 0 0
40450- 0 0 0 0 0 0 0 0 0 0 0 0
40451- 0 0 0 0 0 0 0 0 0 0 0 0
40452- 0 0 0 0 0 0 6 6 6 18 18 18
40453- 54 54 54 82 82 82 2 2 6 26 26 26
40454- 22 22 22 2 2 6 123 123 123 253 253 253
40455-253 253 253 253 253 253 253 253 253 253 253 253
40456-253 253 253 253 253 253 253 253 253 253 253 253
40457-253 253 253 253 253 253 253 253 253 253 253 253
40458-253 253 253 253 253 253 253 253 253 253 253 253
40459-253 253 253 253 253 253 253 253 253 253 253 253
40460-253 253 253 253 253 253 253 253 253 250 250 250
40461-238 238 238 198 198 198 6 6 6 38 38 38
40462- 58 58 58 26 26 26 38 38 38 2 2 6
40463- 2 2 6 2 2 6 2 2 6 46 46 46
40464- 78 78 78 30 30 30 10 10 10 0 0 0
40465- 0 0 0 0 0 0 0 0 0 0 0 0
40466- 0 0 0 0 0 0 0 0 0 0 0 0
40467- 0 0 0 0 0 0 0 0 0 0 0 0
40468- 0 0 0 0 0 0 0 0 0 0 0 0
40469- 0 0 0 0 0 0 0 0 0 0 0 0
40470- 0 0 0 0 0 0 0 0 0 0 0 0
40471- 0 0 0 0 0 0 0 0 0 0 0 0
40472- 0 0 0 0 0 0 10 10 10 30 30 30
40473- 74 74 74 58 58 58 2 2 6 42 42 42
40474- 2 2 6 22 22 22 231 231 231 253 253 253
40475-253 253 253 253 253 253 253 253 253 253 253 253
40476-253 253 253 253 253 253 253 253 253 250 250 250
40477-253 253 253 253 253 253 253 253 253 253 253 253
40478-253 253 253 253 253 253 253 253 253 253 253 253
40479-253 253 253 253 253 253 253 253 253 253 253 253
40480-253 253 253 253 253 253 253 253 253 253 253 253
40481-253 253 253 246 246 246 46 46 46 38 38 38
40482- 42 42 42 14 14 14 38 38 38 14 14 14
40483- 2 2 6 2 2 6 2 2 6 6 6 6
40484- 86 86 86 46 46 46 14 14 14 0 0 0
40485- 0 0 0 0 0 0 0 0 0 0 0 0
40486- 0 0 0 0 0 0 0 0 0 0 0 0
40487- 0 0 0 0 0 0 0 0 0 0 0 0
40488- 0 0 0 0 0 0 0 0 0 0 0 0
40489- 0 0 0 0 0 0 0 0 0 0 0 0
40490- 0 0 0 0 0 0 0 0 0 0 0 0
40491- 0 0 0 0 0 0 0 0 0 0 0 0
40492- 0 0 0 6 6 6 14 14 14 42 42 42
40493- 90 90 90 18 18 18 18 18 18 26 26 26
40494- 2 2 6 116 116 116 253 253 253 253 253 253
40495-253 253 253 253 253 253 253 253 253 253 253 253
40496-253 253 253 253 253 253 250 250 250 238 238 238
40497-253 253 253 253 253 253 253 253 253 253 253 253
40498-253 253 253 253 253 253 253 253 253 253 253 253
40499-253 253 253 253 253 253 253 253 253 253 253 253
40500-253 253 253 253 253 253 253 253 253 253 253 253
40501-253 253 253 253 253 253 94 94 94 6 6 6
40502- 2 2 6 2 2 6 10 10 10 34 34 34
40503- 2 2 6 2 2 6 2 2 6 2 2 6
40504- 74 74 74 58 58 58 22 22 22 6 6 6
40505- 0 0 0 0 0 0 0 0 0 0 0 0
40506- 0 0 0 0 0 0 0 0 0 0 0 0
40507- 0 0 0 0 0 0 0 0 0 0 0 0
40508- 0 0 0 0 0 0 0 0 0 0 0 0
40509- 0 0 0 0 0 0 0 0 0 0 0 0
40510- 0 0 0 0 0 0 0 0 0 0 0 0
40511- 0 0 0 0 0 0 0 0 0 0 0 0
40512- 0 0 0 10 10 10 26 26 26 66 66 66
40513- 82 82 82 2 2 6 38 38 38 6 6 6
40514- 14 14 14 210 210 210 253 253 253 253 253 253
40515-253 253 253 253 253 253 253 253 253 253 253 253
40516-253 253 253 253 253 253 246 246 246 242 242 242
40517-253 253 253 253 253 253 253 253 253 253 253 253
40518-253 253 253 253 253 253 253 253 253 253 253 253
40519-253 253 253 253 253 253 253 253 253 253 253 253
40520-253 253 253 253 253 253 253 253 253 253 253 253
40521-253 253 253 253 253 253 144 144 144 2 2 6
40522- 2 2 6 2 2 6 2 2 6 46 46 46
40523- 2 2 6 2 2 6 2 2 6 2 2 6
40524- 42 42 42 74 74 74 30 30 30 10 10 10
40525- 0 0 0 0 0 0 0 0 0 0 0 0
40526- 0 0 0 0 0 0 0 0 0 0 0 0
40527- 0 0 0 0 0 0 0 0 0 0 0 0
40528- 0 0 0 0 0 0 0 0 0 0 0 0
40529- 0 0 0 0 0 0 0 0 0 0 0 0
40530- 0 0 0 0 0 0 0 0 0 0 0 0
40531- 0 0 0 0 0 0 0 0 0 0 0 0
40532- 6 6 6 14 14 14 42 42 42 90 90 90
40533- 26 26 26 6 6 6 42 42 42 2 2 6
40534- 74 74 74 250 250 250 253 253 253 253 253 253
40535-253 253 253 253 253 253 253 253 253 253 253 253
40536-253 253 253 253 253 253 242 242 242 242 242 242
40537-253 253 253 253 253 253 253 253 253 253 253 253
40538-253 253 253 253 253 253 253 253 253 253 253 253
40539-253 253 253 253 253 253 253 253 253 253 253 253
40540-253 253 253 253 253 253 253 253 253 253 253 253
40541-253 253 253 253 253 253 182 182 182 2 2 6
40542- 2 2 6 2 2 6 2 2 6 46 46 46
40543- 2 2 6 2 2 6 2 2 6 2 2 6
40544- 10 10 10 86 86 86 38 38 38 10 10 10
40545- 0 0 0 0 0 0 0 0 0 0 0 0
40546- 0 0 0 0 0 0 0 0 0 0 0 0
40547- 0 0 0 0 0 0 0 0 0 0 0 0
40548- 0 0 0 0 0 0 0 0 0 0 0 0
40549- 0 0 0 0 0 0 0 0 0 0 0 0
40550- 0 0 0 0 0 0 0 0 0 0 0 0
40551- 0 0 0 0 0 0 0 0 0 0 0 0
40552- 10 10 10 26 26 26 66 66 66 82 82 82
40553- 2 2 6 22 22 22 18 18 18 2 2 6
40554-149 149 149 253 253 253 253 253 253 253 253 253
40555-253 253 253 253 253 253 253 253 253 253 253 253
40556-253 253 253 253 253 253 234 234 234 242 242 242
40557-253 253 253 253 253 253 253 253 253 253 253 253
40558-253 253 253 253 253 253 253 253 253 253 253 253
40559-253 253 253 253 253 253 253 253 253 253 253 253
40560-253 253 253 253 253 253 253 253 253 253 253 253
40561-253 253 253 253 253 253 206 206 206 2 2 6
40562- 2 2 6 2 2 6 2 2 6 38 38 38
40563- 2 2 6 2 2 6 2 2 6 2 2 6
40564- 6 6 6 86 86 86 46 46 46 14 14 14
40565- 0 0 0 0 0 0 0 0 0 0 0 0
40566- 0 0 0 0 0 0 0 0 0 0 0 0
40567- 0 0 0 0 0 0 0 0 0 0 0 0
40568- 0 0 0 0 0 0 0 0 0 0 0 0
40569- 0 0 0 0 0 0 0 0 0 0 0 0
40570- 0 0 0 0 0 0 0 0 0 0 0 0
40571- 0 0 0 0 0 0 0 0 0 6 6 6
40572- 18 18 18 46 46 46 86 86 86 18 18 18
40573- 2 2 6 34 34 34 10 10 10 6 6 6
40574-210 210 210 253 253 253 253 253 253 253 253 253
40575-253 253 253 253 253 253 253 253 253 253 253 253
40576-253 253 253 253 253 253 234 234 234 242 242 242
40577-253 253 253 253 253 253 253 253 253 253 253 253
40578-253 253 253 253 253 253 253 253 253 253 253 253
40579-253 253 253 253 253 253 253 253 253 253 253 253
40580-253 253 253 253 253 253 253 253 253 253 253 253
40581-253 253 253 253 253 253 221 221 221 6 6 6
40582- 2 2 6 2 2 6 6 6 6 30 30 30
40583- 2 2 6 2 2 6 2 2 6 2 2 6
40584- 2 2 6 82 82 82 54 54 54 18 18 18
40585- 6 6 6 0 0 0 0 0 0 0 0 0
40586- 0 0 0 0 0 0 0 0 0 0 0 0
40587- 0 0 0 0 0 0 0 0 0 0 0 0
40588- 0 0 0 0 0 0 0 0 0 0 0 0
40589- 0 0 0 0 0 0 0 0 0 0 0 0
40590- 0 0 0 0 0 0 0 0 0 0 0 0
40591- 0 0 0 0 0 0 0 0 0 10 10 10
40592- 26 26 26 66 66 66 62 62 62 2 2 6
40593- 2 2 6 38 38 38 10 10 10 26 26 26
40594-238 238 238 253 253 253 253 253 253 253 253 253
40595-253 253 253 253 253 253 253 253 253 253 253 253
40596-253 253 253 253 253 253 231 231 231 238 238 238
40597-253 253 253 253 253 253 253 253 253 253 253 253
40598-253 253 253 253 253 253 253 253 253 253 253 253
40599-253 253 253 253 253 253 253 253 253 253 253 253
40600-253 253 253 253 253 253 253 253 253 253 253 253
40601-253 253 253 253 253 253 231 231 231 6 6 6
40602- 2 2 6 2 2 6 10 10 10 30 30 30
40603- 2 2 6 2 2 6 2 2 6 2 2 6
40604- 2 2 6 66 66 66 58 58 58 22 22 22
40605- 6 6 6 0 0 0 0 0 0 0 0 0
40606- 0 0 0 0 0 0 0 0 0 0 0 0
40607- 0 0 0 0 0 0 0 0 0 0 0 0
40608- 0 0 0 0 0 0 0 0 0 0 0 0
40609- 0 0 0 0 0 0 0 0 0 0 0 0
40610- 0 0 0 0 0 0 0 0 0 0 0 0
40611- 0 0 0 0 0 0 0 0 0 10 10 10
40612- 38 38 38 78 78 78 6 6 6 2 2 6
40613- 2 2 6 46 46 46 14 14 14 42 42 42
40614-246 246 246 253 253 253 253 253 253 253 253 253
40615-253 253 253 253 253 253 253 253 253 253 253 253
40616-253 253 253 253 253 253 231 231 231 242 242 242
40617-253 253 253 253 253 253 253 253 253 253 253 253
40618-253 253 253 253 253 253 253 253 253 253 253 253
40619-253 253 253 253 253 253 253 253 253 253 253 253
40620-253 253 253 253 253 253 253 253 253 253 253 253
40621-253 253 253 253 253 253 234 234 234 10 10 10
40622- 2 2 6 2 2 6 22 22 22 14 14 14
40623- 2 2 6 2 2 6 2 2 6 2 2 6
40624- 2 2 6 66 66 66 62 62 62 22 22 22
40625- 6 6 6 0 0 0 0 0 0 0 0 0
40626- 0 0 0 0 0 0 0 0 0 0 0 0
40627- 0 0 0 0 0 0 0 0 0 0 0 0
40628- 0 0 0 0 0 0 0 0 0 0 0 0
40629- 0 0 0 0 0 0 0 0 0 0 0 0
40630- 0 0 0 0 0 0 0 0 0 0 0 0
40631- 0 0 0 0 0 0 6 6 6 18 18 18
40632- 50 50 50 74 74 74 2 2 6 2 2 6
40633- 14 14 14 70 70 70 34 34 34 62 62 62
40634-250 250 250 253 253 253 253 253 253 253 253 253
40635-253 253 253 253 253 253 253 253 253 253 253 253
40636-253 253 253 253 253 253 231 231 231 246 246 246
40637-253 253 253 253 253 253 253 253 253 253 253 253
40638-253 253 253 253 253 253 253 253 253 253 253 253
40639-253 253 253 253 253 253 253 253 253 253 253 253
40640-253 253 253 253 253 253 253 253 253 253 253 253
40641-253 253 253 253 253 253 234 234 234 14 14 14
40642- 2 2 6 2 2 6 30 30 30 2 2 6
40643- 2 2 6 2 2 6 2 2 6 2 2 6
40644- 2 2 6 66 66 66 62 62 62 22 22 22
40645- 6 6 6 0 0 0 0 0 0 0 0 0
40646- 0 0 0 0 0 0 0 0 0 0 0 0
40647- 0 0 0 0 0 0 0 0 0 0 0 0
40648- 0 0 0 0 0 0 0 0 0 0 0 0
40649- 0 0 0 0 0 0 0 0 0 0 0 0
40650- 0 0 0 0 0 0 0 0 0 0 0 0
40651- 0 0 0 0 0 0 6 6 6 18 18 18
40652- 54 54 54 62 62 62 2 2 6 2 2 6
40653- 2 2 6 30 30 30 46 46 46 70 70 70
40654-250 250 250 253 253 253 253 253 253 253 253 253
40655-253 253 253 253 253 253 253 253 253 253 253 253
40656-253 253 253 253 253 253 231 231 231 246 246 246
40657-253 253 253 253 253 253 253 253 253 253 253 253
40658-253 253 253 253 253 253 253 253 253 253 253 253
40659-253 253 253 253 253 253 253 253 253 253 253 253
40660-253 253 253 253 253 253 253 253 253 253 253 253
40661-253 253 253 253 253 253 226 226 226 10 10 10
40662- 2 2 6 6 6 6 30 30 30 2 2 6
40663- 2 2 6 2 2 6 2 2 6 2 2 6
40664- 2 2 6 66 66 66 58 58 58 22 22 22
40665- 6 6 6 0 0 0 0 0 0 0 0 0
40666- 0 0 0 0 0 0 0 0 0 0 0 0
40667- 0 0 0 0 0 0 0 0 0 0 0 0
40668- 0 0 0 0 0 0 0 0 0 0 0 0
40669- 0 0 0 0 0 0 0 0 0 0 0 0
40670- 0 0 0 0 0 0 0 0 0 0 0 0
40671- 0 0 0 0 0 0 6 6 6 22 22 22
40672- 58 58 58 62 62 62 2 2 6 2 2 6
40673- 2 2 6 2 2 6 30 30 30 78 78 78
40674-250 250 250 253 253 253 253 253 253 253 253 253
40675-253 253 253 253 253 253 253 253 253 253 253 253
40676-253 253 253 253 253 253 231 231 231 246 246 246
40677-253 253 253 253 253 253 253 253 253 253 253 253
40678-253 253 253 253 253 253 253 253 253 253 253 253
40679-253 253 253 253 253 253 253 253 253 253 253 253
40680-253 253 253 253 253 253 253 253 253 253 253 253
40681-253 253 253 253 253 253 206 206 206 2 2 6
40682- 22 22 22 34 34 34 18 14 6 22 22 22
40683- 26 26 26 18 18 18 6 6 6 2 2 6
40684- 2 2 6 82 82 82 54 54 54 18 18 18
40685- 6 6 6 0 0 0 0 0 0 0 0 0
40686- 0 0 0 0 0 0 0 0 0 0 0 0
40687- 0 0 0 0 0 0 0 0 0 0 0 0
40688- 0 0 0 0 0 0 0 0 0 0 0 0
40689- 0 0 0 0 0 0 0 0 0 0 0 0
40690- 0 0 0 0 0 0 0 0 0 0 0 0
40691- 0 0 0 0 0 0 6 6 6 26 26 26
40692- 62 62 62 106 106 106 74 54 14 185 133 11
40693-210 162 10 121 92 8 6 6 6 62 62 62
40694-238 238 238 253 253 253 253 253 253 253 253 253
40695-253 253 253 253 253 253 253 253 253 253 253 253
40696-253 253 253 253 253 253 231 231 231 246 246 246
40697-253 253 253 253 253 253 253 253 253 253 253 253
40698-253 253 253 253 253 253 253 253 253 253 253 253
40699-253 253 253 253 253 253 253 253 253 253 253 253
40700-253 253 253 253 253 253 253 253 253 253 253 253
40701-253 253 253 253 253 253 158 158 158 18 18 18
40702- 14 14 14 2 2 6 2 2 6 2 2 6
40703- 6 6 6 18 18 18 66 66 66 38 38 38
40704- 6 6 6 94 94 94 50 50 50 18 18 18
40705- 6 6 6 0 0 0 0 0 0 0 0 0
40706- 0 0 0 0 0 0 0 0 0 0 0 0
40707- 0 0 0 0 0 0 0 0 0 0 0 0
40708- 0 0 0 0 0 0 0 0 0 0 0 0
40709- 0 0 0 0 0 0 0 0 0 0 0 0
40710- 0 0 0 0 0 0 0 0 0 6 6 6
40711- 10 10 10 10 10 10 18 18 18 38 38 38
40712- 78 78 78 142 134 106 216 158 10 242 186 14
40713-246 190 14 246 190 14 156 118 10 10 10 10
40714- 90 90 90 238 238 238 253 253 253 253 253 253
40715-253 253 253 253 253 253 253 253 253 253 253 253
40716-253 253 253 253 253 253 231 231 231 250 250 250
40717-253 253 253 253 253 253 253 253 253 253 253 253
40718-253 253 253 253 253 253 253 253 253 253 253 253
40719-253 253 253 253 253 253 253 253 253 253 253 253
40720-253 253 253 253 253 253 253 253 253 246 230 190
40721-238 204 91 238 204 91 181 142 44 37 26 9
40722- 2 2 6 2 2 6 2 2 6 2 2 6
40723- 2 2 6 2 2 6 38 38 38 46 46 46
40724- 26 26 26 106 106 106 54 54 54 18 18 18
40725- 6 6 6 0 0 0 0 0 0 0 0 0
40726- 0 0 0 0 0 0 0 0 0 0 0 0
40727- 0 0 0 0 0 0 0 0 0 0 0 0
40728- 0 0 0 0 0 0 0 0 0 0 0 0
40729- 0 0 0 0 0 0 0 0 0 0 0 0
40730- 0 0 0 6 6 6 14 14 14 22 22 22
40731- 30 30 30 38 38 38 50 50 50 70 70 70
40732-106 106 106 190 142 34 226 170 11 242 186 14
40733-246 190 14 246 190 14 246 190 14 154 114 10
40734- 6 6 6 74 74 74 226 226 226 253 253 253
40735-253 253 253 253 253 253 253 253 253 253 253 253
40736-253 253 253 253 253 253 231 231 231 250 250 250
40737-253 253 253 253 253 253 253 253 253 253 253 253
40738-253 253 253 253 253 253 253 253 253 253 253 253
40739-253 253 253 253 253 253 253 253 253 253 253 253
40740-253 253 253 253 253 253 253 253 253 228 184 62
40741-241 196 14 241 208 19 232 195 16 38 30 10
40742- 2 2 6 2 2 6 2 2 6 2 2 6
40743- 2 2 6 6 6 6 30 30 30 26 26 26
40744-203 166 17 154 142 90 66 66 66 26 26 26
40745- 6 6 6 0 0 0 0 0 0 0 0 0
40746- 0 0 0 0 0 0 0 0 0 0 0 0
40747- 0 0 0 0 0 0 0 0 0 0 0 0
40748- 0 0 0 0 0 0 0 0 0 0 0 0
40749- 0 0 0 0 0 0 0 0 0 0 0 0
40750- 6 6 6 18 18 18 38 38 38 58 58 58
40751- 78 78 78 86 86 86 101 101 101 123 123 123
40752-175 146 61 210 150 10 234 174 13 246 186 14
40753-246 190 14 246 190 14 246 190 14 238 190 10
40754-102 78 10 2 2 6 46 46 46 198 198 198
40755-253 253 253 253 253 253 253 253 253 253 253 253
40756-253 253 253 253 253 253 234 234 234 242 242 242
40757-253 253 253 253 253 253 253 253 253 253 253 253
40758-253 253 253 253 253 253 253 253 253 253 253 253
40759-253 253 253 253 253 253 253 253 253 253 253 253
40760-253 253 253 253 253 253 253 253 253 224 178 62
40761-242 186 14 241 196 14 210 166 10 22 18 6
40762- 2 2 6 2 2 6 2 2 6 2 2 6
40763- 2 2 6 2 2 6 6 6 6 121 92 8
40764-238 202 15 232 195 16 82 82 82 34 34 34
40765- 10 10 10 0 0 0 0 0 0 0 0 0
40766- 0 0 0 0 0 0 0 0 0 0 0 0
40767- 0 0 0 0 0 0 0 0 0 0 0 0
40768- 0 0 0 0 0 0 0 0 0 0 0 0
40769- 0 0 0 0 0 0 0 0 0 0 0 0
40770- 14 14 14 38 38 38 70 70 70 154 122 46
40771-190 142 34 200 144 11 197 138 11 197 138 11
40772-213 154 11 226 170 11 242 186 14 246 190 14
40773-246 190 14 246 190 14 246 190 14 246 190 14
40774-225 175 15 46 32 6 2 2 6 22 22 22
40775-158 158 158 250 250 250 253 253 253 253 253 253
40776-253 253 253 253 253 253 253 253 253 253 253 253
40777-253 253 253 253 253 253 253 253 253 253 253 253
40778-253 253 253 253 253 253 253 253 253 253 253 253
40779-253 253 253 253 253 253 253 253 253 253 253 253
40780-253 253 253 250 250 250 242 242 242 224 178 62
40781-239 182 13 236 186 11 213 154 11 46 32 6
40782- 2 2 6 2 2 6 2 2 6 2 2 6
40783- 2 2 6 2 2 6 61 42 6 225 175 15
40784-238 190 10 236 186 11 112 100 78 42 42 42
40785- 14 14 14 0 0 0 0 0 0 0 0 0
40786- 0 0 0 0 0 0 0 0 0 0 0 0
40787- 0 0 0 0 0 0 0 0 0 0 0 0
40788- 0 0 0 0 0 0 0 0 0 0 0 0
40789- 0 0 0 0 0 0 0 0 0 6 6 6
40790- 22 22 22 54 54 54 154 122 46 213 154 11
40791-226 170 11 230 174 11 226 170 11 226 170 11
40792-236 178 12 242 186 14 246 190 14 246 190 14
40793-246 190 14 246 190 14 246 190 14 246 190 14
40794-241 196 14 184 144 12 10 10 10 2 2 6
40795- 6 6 6 116 116 116 242 242 242 253 253 253
40796-253 253 253 253 253 253 253 253 253 253 253 253
40797-253 253 253 253 253 253 253 253 253 253 253 253
40798-253 253 253 253 253 253 253 253 253 253 253 253
40799-253 253 253 253 253 253 253 253 253 253 253 253
40800-253 253 253 231 231 231 198 198 198 214 170 54
40801-236 178 12 236 178 12 210 150 10 137 92 6
40802- 18 14 6 2 2 6 2 2 6 2 2 6
40803- 6 6 6 70 47 6 200 144 11 236 178 12
40804-239 182 13 239 182 13 124 112 88 58 58 58
40805- 22 22 22 6 6 6 0 0 0 0 0 0
40806- 0 0 0 0 0 0 0 0 0 0 0 0
40807- 0 0 0 0 0 0 0 0 0 0 0 0
40808- 0 0 0 0 0 0 0 0 0 0 0 0
40809- 0 0 0 0 0 0 0 0 0 10 10 10
40810- 30 30 30 70 70 70 180 133 36 226 170 11
40811-239 182 13 242 186 14 242 186 14 246 186 14
40812-246 190 14 246 190 14 246 190 14 246 190 14
40813-246 190 14 246 190 14 246 190 14 246 190 14
40814-246 190 14 232 195 16 98 70 6 2 2 6
40815- 2 2 6 2 2 6 66 66 66 221 221 221
40816-253 253 253 253 253 253 253 253 253 253 253 253
40817-253 253 253 253 253 253 253 253 253 253 253 253
40818-253 253 253 253 253 253 253 253 253 253 253 253
40819-253 253 253 253 253 253 253 253 253 253 253 253
40820-253 253 253 206 206 206 198 198 198 214 166 58
40821-230 174 11 230 174 11 216 158 10 192 133 9
40822-163 110 8 116 81 8 102 78 10 116 81 8
40823-167 114 7 197 138 11 226 170 11 239 182 13
40824-242 186 14 242 186 14 162 146 94 78 78 78
40825- 34 34 34 14 14 14 6 6 6 0 0 0
40826- 0 0 0 0 0 0 0 0 0 0 0 0
40827- 0 0 0 0 0 0 0 0 0 0 0 0
40828- 0 0 0 0 0 0 0 0 0 0 0 0
40829- 0 0 0 0 0 0 0 0 0 6 6 6
40830- 30 30 30 78 78 78 190 142 34 226 170 11
40831-239 182 13 246 190 14 246 190 14 246 190 14
40832-246 190 14 246 190 14 246 190 14 246 190 14
40833-246 190 14 246 190 14 246 190 14 246 190 14
40834-246 190 14 241 196 14 203 166 17 22 18 6
40835- 2 2 6 2 2 6 2 2 6 38 38 38
40836-218 218 218 253 253 253 253 253 253 253 253 253
40837-253 253 253 253 253 253 253 253 253 253 253 253
40838-253 253 253 253 253 253 253 253 253 253 253 253
40839-253 253 253 253 253 253 253 253 253 253 253 253
40840-250 250 250 206 206 206 198 198 198 202 162 69
40841-226 170 11 236 178 12 224 166 10 210 150 10
40842-200 144 11 197 138 11 192 133 9 197 138 11
40843-210 150 10 226 170 11 242 186 14 246 190 14
40844-246 190 14 246 186 14 225 175 15 124 112 88
40845- 62 62 62 30 30 30 14 14 14 6 6 6
40846- 0 0 0 0 0 0 0 0 0 0 0 0
40847- 0 0 0 0 0 0 0 0 0 0 0 0
40848- 0 0 0 0 0 0 0 0 0 0 0 0
40849- 0 0 0 0 0 0 0 0 0 10 10 10
40850- 30 30 30 78 78 78 174 135 50 224 166 10
40851-239 182 13 246 190 14 246 190 14 246 190 14
40852-246 190 14 246 190 14 246 190 14 246 190 14
40853-246 190 14 246 190 14 246 190 14 246 190 14
40854-246 190 14 246 190 14 241 196 14 139 102 15
40855- 2 2 6 2 2 6 2 2 6 2 2 6
40856- 78 78 78 250 250 250 253 253 253 253 253 253
40857-253 253 253 253 253 253 253 253 253 253 253 253
40858-253 253 253 253 253 253 253 253 253 253 253 253
40859-253 253 253 253 253 253 253 253 253 253 253 253
40860-250 250 250 214 214 214 198 198 198 190 150 46
40861-219 162 10 236 178 12 234 174 13 224 166 10
40862-216 158 10 213 154 11 213 154 11 216 158 10
40863-226 170 11 239 182 13 246 190 14 246 190 14
40864-246 190 14 246 190 14 242 186 14 206 162 42
40865-101 101 101 58 58 58 30 30 30 14 14 14
40866- 6 6 6 0 0 0 0 0 0 0 0 0
40867- 0 0 0 0 0 0 0 0 0 0 0 0
40868- 0 0 0 0 0 0 0 0 0 0 0 0
40869- 0 0 0 0 0 0 0 0 0 10 10 10
40870- 30 30 30 74 74 74 174 135 50 216 158 10
40871-236 178 12 246 190 14 246 190 14 246 190 14
40872-246 190 14 246 190 14 246 190 14 246 190 14
40873-246 190 14 246 190 14 246 190 14 246 190 14
40874-246 190 14 246 190 14 241 196 14 226 184 13
40875- 61 42 6 2 2 6 2 2 6 2 2 6
40876- 22 22 22 238 238 238 253 253 253 253 253 253
40877-253 253 253 253 253 253 253 253 253 253 253 253
40878-253 253 253 253 253 253 253 253 253 253 253 253
40879-253 253 253 253 253 253 253 253 253 253 253 253
40880-253 253 253 226 226 226 187 187 187 180 133 36
40881-216 158 10 236 178 12 239 182 13 236 178 12
40882-230 174 11 226 170 11 226 170 11 230 174 11
40883-236 178 12 242 186 14 246 190 14 246 190 14
40884-246 190 14 246 190 14 246 186 14 239 182 13
40885-206 162 42 106 106 106 66 66 66 34 34 34
40886- 14 14 14 6 6 6 0 0 0 0 0 0
40887- 0 0 0 0 0 0 0 0 0 0 0 0
40888- 0 0 0 0 0 0 0 0 0 0 0 0
40889- 0 0 0 0 0 0 0 0 0 6 6 6
40890- 26 26 26 70 70 70 163 133 67 213 154 11
40891-236 178 12 246 190 14 246 190 14 246 190 14
40892-246 190 14 246 190 14 246 190 14 246 190 14
40893-246 190 14 246 190 14 246 190 14 246 190 14
40894-246 190 14 246 190 14 246 190 14 241 196 14
40895-190 146 13 18 14 6 2 2 6 2 2 6
40896- 46 46 46 246 246 246 253 253 253 253 253 253
40897-253 253 253 253 253 253 253 253 253 253 253 253
40898-253 253 253 253 253 253 253 253 253 253 253 253
40899-253 253 253 253 253 253 253 253 253 253 253 253
40900-253 253 253 221 221 221 86 86 86 156 107 11
40901-216 158 10 236 178 12 242 186 14 246 186 14
40902-242 186 14 239 182 13 239 182 13 242 186 14
40903-242 186 14 246 186 14 246 190 14 246 190 14
40904-246 190 14 246 190 14 246 190 14 246 190 14
40905-242 186 14 225 175 15 142 122 72 66 66 66
40906- 30 30 30 10 10 10 0 0 0 0 0 0
40907- 0 0 0 0 0 0 0 0 0 0 0 0
40908- 0 0 0 0 0 0 0 0 0 0 0 0
40909- 0 0 0 0 0 0 0 0 0 6 6 6
40910- 26 26 26 70 70 70 163 133 67 210 150 10
40911-236 178 12 246 190 14 246 190 14 246 190 14
40912-246 190 14 246 190 14 246 190 14 246 190 14
40913-246 190 14 246 190 14 246 190 14 246 190 14
40914-246 190 14 246 190 14 246 190 14 246 190 14
40915-232 195 16 121 92 8 34 34 34 106 106 106
40916-221 221 221 253 253 253 253 253 253 253 253 253
40917-253 253 253 253 253 253 253 253 253 253 253 253
40918-253 253 253 253 253 253 253 253 253 253 253 253
40919-253 253 253 253 253 253 253 253 253 253 253 253
40920-242 242 242 82 82 82 18 14 6 163 110 8
40921-216 158 10 236 178 12 242 186 14 246 190 14
40922-246 190 14 246 190 14 246 190 14 246 190 14
40923-246 190 14 246 190 14 246 190 14 246 190 14
40924-246 190 14 246 190 14 246 190 14 246 190 14
40925-246 190 14 246 190 14 242 186 14 163 133 67
40926- 46 46 46 18 18 18 6 6 6 0 0 0
40927- 0 0 0 0 0 0 0 0 0 0 0 0
40928- 0 0 0 0 0 0 0 0 0 0 0 0
40929- 0 0 0 0 0 0 0 0 0 10 10 10
40930- 30 30 30 78 78 78 163 133 67 210 150 10
40931-236 178 12 246 186 14 246 190 14 246 190 14
40932-246 190 14 246 190 14 246 190 14 246 190 14
40933-246 190 14 246 190 14 246 190 14 246 190 14
40934-246 190 14 246 190 14 246 190 14 246 190 14
40935-241 196 14 215 174 15 190 178 144 253 253 253
40936-253 253 253 253 253 253 253 253 253 253 253 253
40937-253 253 253 253 253 253 253 253 253 253 253 253
40938-253 253 253 253 253 253 253 253 253 253 253 253
40939-253 253 253 253 253 253 253 253 253 218 218 218
40940- 58 58 58 2 2 6 22 18 6 167 114 7
40941-216 158 10 236 178 12 246 186 14 246 190 14
40942-246 190 14 246 190 14 246 190 14 246 190 14
40943-246 190 14 246 190 14 246 190 14 246 190 14
40944-246 190 14 246 190 14 246 190 14 246 190 14
40945-246 190 14 246 186 14 242 186 14 190 150 46
40946- 54 54 54 22 22 22 6 6 6 0 0 0
40947- 0 0 0 0 0 0 0 0 0 0 0 0
40948- 0 0 0 0 0 0 0 0 0 0 0 0
40949- 0 0 0 0 0 0 0 0 0 14 14 14
40950- 38 38 38 86 86 86 180 133 36 213 154 11
40951-236 178 12 246 186 14 246 190 14 246 190 14
40952-246 190 14 246 190 14 246 190 14 246 190 14
40953-246 190 14 246 190 14 246 190 14 246 190 14
40954-246 190 14 246 190 14 246 190 14 246 190 14
40955-246 190 14 232 195 16 190 146 13 214 214 214
40956-253 253 253 253 253 253 253 253 253 253 253 253
40957-253 253 253 253 253 253 253 253 253 253 253 253
40958-253 253 253 253 253 253 253 253 253 253 253 253
40959-253 253 253 250 250 250 170 170 170 26 26 26
40960- 2 2 6 2 2 6 37 26 9 163 110 8
40961-219 162 10 239 182 13 246 186 14 246 190 14
40962-246 190 14 246 190 14 246 190 14 246 190 14
40963-246 190 14 246 190 14 246 190 14 246 190 14
40964-246 190 14 246 190 14 246 190 14 246 190 14
40965-246 186 14 236 178 12 224 166 10 142 122 72
40966- 46 46 46 18 18 18 6 6 6 0 0 0
40967- 0 0 0 0 0 0 0 0 0 0 0 0
40968- 0 0 0 0 0 0 0 0 0 0 0 0
40969- 0 0 0 0 0 0 6 6 6 18 18 18
40970- 50 50 50 109 106 95 192 133 9 224 166 10
40971-242 186 14 246 190 14 246 190 14 246 190 14
40972-246 190 14 246 190 14 246 190 14 246 190 14
40973-246 190 14 246 190 14 246 190 14 246 190 14
40974-246 190 14 246 190 14 246 190 14 246 190 14
40975-242 186 14 226 184 13 210 162 10 142 110 46
40976-226 226 226 253 253 253 253 253 253 253 253 253
40977-253 253 253 253 253 253 253 253 253 253 253 253
40978-253 253 253 253 253 253 253 253 253 253 253 253
40979-198 198 198 66 66 66 2 2 6 2 2 6
40980- 2 2 6 2 2 6 50 34 6 156 107 11
40981-219 162 10 239 182 13 246 186 14 246 190 14
40982-246 190 14 246 190 14 246 190 14 246 190 14
40983-246 190 14 246 190 14 246 190 14 246 190 14
40984-246 190 14 246 190 14 246 190 14 242 186 14
40985-234 174 13 213 154 11 154 122 46 66 66 66
40986- 30 30 30 10 10 10 0 0 0 0 0 0
40987- 0 0 0 0 0 0 0 0 0 0 0 0
40988- 0 0 0 0 0 0 0 0 0 0 0 0
40989- 0 0 0 0 0 0 6 6 6 22 22 22
40990- 58 58 58 154 121 60 206 145 10 234 174 13
40991-242 186 14 246 186 14 246 190 14 246 190 14
40992-246 190 14 246 190 14 246 190 14 246 190 14
40993-246 190 14 246 190 14 246 190 14 246 190 14
40994-246 190 14 246 190 14 246 190 14 246 190 14
40995-246 186 14 236 178 12 210 162 10 163 110 8
40996- 61 42 6 138 138 138 218 218 218 250 250 250
40997-253 253 253 253 253 253 253 253 253 250 250 250
40998-242 242 242 210 210 210 144 144 144 66 66 66
40999- 6 6 6 2 2 6 2 2 6 2 2 6
41000- 2 2 6 2 2 6 61 42 6 163 110 8
41001-216 158 10 236 178 12 246 190 14 246 190 14
41002-246 190 14 246 190 14 246 190 14 246 190 14
41003-246 190 14 246 190 14 246 190 14 246 190 14
41004-246 190 14 239 182 13 230 174 11 216 158 10
41005-190 142 34 124 112 88 70 70 70 38 38 38
41006- 18 18 18 6 6 6 0 0 0 0 0 0
41007- 0 0 0 0 0 0 0 0 0 0 0 0
41008- 0 0 0 0 0 0 0 0 0 0 0 0
41009- 0 0 0 0 0 0 6 6 6 22 22 22
41010- 62 62 62 168 124 44 206 145 10 224 166 10
41011-236 178 12 239 182 13 242 186 14 242 186 14
41012-246 186 14 246 190 14 246 190 14 246 190 14
41013-246 190 14 246 190 14 246 190 14 246 190 14
41014-246 190 14 246 190 14 246 190 14 246 190 14
41015-246 190 14 236 178 12 216 158 10 175 118 6
41016- 80 54 7 2 2 6 6 6 6 30 30 30
41017- 54 54 54 62 62 62 50 50 50 38 38 38
41018- 14 14 14 2 2 6 2 2 6 2 2 6
41019- 2 2 6 2 2 6 2 2 6 2 2 6
41020- 2 2 6 6 6 6 80 54 7 167 114 7
41021-213 154 11 236 178 12 246 190 14 246 190 14
41022-246 190 14 246 190 14 246 190 14 246 190 14
41023-246 190 14 242 186 14 239 182 13 239 182 13
41024-230 174 11 210 150 10 174 135 50 124 112 88
41025- 82 82 82 54 54 54 34 34 34 18 18 18
41026- 6 6 6 0 0 0 0 0 0 0 0 0
41027- 0 0 0 0 0 0 0 0 0 0 0 0
41028- 0 0 0 0 0 0 0 0 0 0 0 0
41029- 0 0 0 0 0 0 6 6 6 18 18 18
41030- 50 50 50 158 118 36 192 133 9 200 144 11
41031-216 158 10 219 162 10 224 166 10 226 170 11
41032-230 174 11 236 178 12 239 182 13 239 182 13
41033-242 186 14 246 186 14 246 190 14 246 190 14
41034-246 190 14 246 190 14 246 190 14 246 190 14
41035-246 186 14 230 174 11 210 150 10 163 110 8
41036-104 69 6 10 10 10 2 2 6 2 2 6
41037- 2 2 6 2 2 6 2 2 6 2 2 6
41038- 2 2 6 2 2 6 2 2 6 2 2 6
41039- 2 2 6 2 2 6 2 2 6 2 2 6
41040- 2 2 6 6 6 6 91 60 6 167 114 7
41041-206 145 10 230 174 11 242 186 14 246 190 14
41042-246 190 14 246 190 14 246 186 14 242 186 14
41043-239 182 13 230 174 11 224 166 10 213 154 11
41044-180 133 36 124 112 88 86 86 86 58 58 58
41045- 38 38 38 22 22 22 10 10 10 6 6 6
41046- 0 0 0 0 0 0 0 0 0 0 0 0
41047- 0 0 0 0 0 0 0 0 0 0 0 0
41048- 0 0 0 0 0 0 0 0 0 0 0 0
41049- 0 0 0 0 0 0 0 0 0 14 14 14
41050- 34 34 34 70 70 70 138 110 50 158 118 36
41051-167 114 7 180 123 7 192 133 9 197 138 11
41052-200 144 11 206 145 10 213 154 11 219 162 10
41053-224 166 10 230 174 11 239 182 13 242 186 14
41054-246 186 14 246 186 14 246 186 14 246 186 14
41055-239 182 13 216 158 10 185 133 11 152 99 6
41056-104 69 6 18 14 6 2 2 6 2 2 6
41057- 2 2 6 2 2 6 2 2 6 2 2 6
41058- 2 2 6 2 2 6 2 2 6 2 2 6
41059- 2 2 6 2 2 6 2 2 6 2 2 6
41060- 2 2 6 6 6 6 80 54 7 152 99 6
41061-192 133 9 219 162 10 236 178 12 239 182 13
41062-246 186 14 242 186 14 239 182 13 236 178 12
41063-224 166 10 206 145 10 192 133 9 154 121 60
41064- 94 94 94 62 62 62 42 42 42 22 22 22
41065- 14 14 14 6 6 6 0 0 0 0 0 0
41066- 0 0 0 0 0 0 0 0 0 0 0 0
41067- 0 0 0 0 0 0 0 0 0 0 0 0
41068- 0 0 0 0 0 0 0 0 0 0 0 0
41069- 0 0 0 0 0 0 0 0 0 6 6 6
41070- 18 18 18 34 34 34 58 58 58 78 78 78
41071-101 98 89 124 112 88 142 110 46 156 107 11
41072-163 110 8 167 114 7 175 118 6 180 123 7
41073-185 133 11 197 138 11 210 150 10 219 162 10
41074-226 170 11 236 178 12 236 178 12 234 174 13
41075-219 162 10 197 138 11 163 110 8 130 83 6
41076- 91 60 6 10 10 10 2 2 6 2 2 6
41077- 18 18 18 38 38 38 38 38 38 38 38 38
41078- 38 38 38 38 38 38 38 38 38 38 38 38
41079- 38 38 38 38 38 38 26 26 26 2 2 6
41080- 2 2 6 6 6 6 70 47 6 137 92 6
41081-175 118 6 200 144 11 219 162 10 230 174 11
41082-234 174 13 230 174 11 219 162 10 210 150 10
41083-192 133 9 163 110 8 124 112 88 82 82 82
41084- 50 50 50 30 30 30 14 14 14 6 6 6
41085- 0 0 0 0 0 0 0 0 0 0 0 0
41086- 0 0 0 0 0 0 0 0 0 0 0 0
41087- 0 0 0 0 0 0 0 0 0 0 0 0
41088- 0 0 0 0 0 0 0 0 0 0 0 0
41089- 0 0 0 0 0 0 0 0 0 0 0 0
41090- 6 6 6 14 14 14 22 22 22 34 34 34
41091- 42 42 42 58 58 58 74 74 74 86 86 86
41092-101 98 89 122 102 70 130 98 46 121 87 25
41093-137 92 6 152 99 6 163 110 8 180 123 7
41094-185 133 11 197 138 11 206 145 10 200 144 11
41095-180 123 7 156 107 11 130 83 6 104 69 6
41096- 50 34 6 54 54 54 110 110 110 101 98 89
41097- 86 86 86 82 82 82 78 78 78 78 78 78
41098- 78 78 78 78 78 78 78 78 78 78 78 78
41099- 78 78 78 82 82 82 86 86 86 94 94 94
41100-106 106 106 101 101 101 86 66 34 124 80 6
41101-156 107 11 180 123 7 192 133 9 200 144 11
41102-206 145 10 200 144 11 192 133 9 175 118 6
41103-139 102 15 109 106 95 70 70 70 42 42 42
41104- 22 22 22 10 10 10 0 0 0 0 0 0
41105- 0 0 0 0 0 0 0 0 0 0 0 0
41106- 0 0 0 0 0 0 0 0 0 0 0 0
41107- 0 0 0 0 0 0 0 0 0 0 0 0
41108- 0 0 0 0 0 0 0 0 0 0 0 0
41109- 0 0 0 0 0 0 0 0 0 0 0 0
41110- 0 0 0 0 0 0 6 6 6 10 10 10
41111- 14 14 14 22 22 22 30 30 30 38 38 38
41112- 50 50 50 62 62 62 74 74 74 90 90 90
41113-101 98 89 112 100 78 121 87 25 124 80 6
41114-137 92 6 152 99 6 152 99 6 152 99 6
41115-138 86 6 124 80 6 98 70 6 86 66 30
41116-101 98 89 82 82 82 58 58 58 46 46 46
41117- 38 38 38 34 34 34 34 34 34 34 34 34
41118- 34 34 34 34 34 34 34 34 34 34 34 34
41119- 34 34 34 34 34 34 38 38 38 42 42 42
41120- 54 54 54 82 82 82 94 86 76 91 60 6
41121-134 86 6 156 107 11 167 114 7 175 118 6
41122-175 118 6 167 114 7 152 99 6 121 87 25
41123-101 98 89 62 62 62 34 34 34 18 18 18
41124- 6 6 6 0 0 0 0 0 0 0 0 0
41125- 0 0 0 0 0 0 0 0 0 0 0 0
41126- 0 0 0 0 0 0 0 0 0 0 0 0
41127- 0 0 0 0 0 0 0 0 0 0 0 0
41128- 0 0 0 0 0 0 0 0 0 0 0 0
41129- 0 0 0 0 0 0 0 0 0 0 0 0
41130- 0 0 0 0 0 0 0 0 0 0 0 0
41131- 0 0 0 6 6 6 6 6 6 10 10 10
41132- 18 18 18 22 22 22 30 30 30 42 42 42
41133- 50 50 50 66 66 66 86 86 86 101 98 89
41134-106 86 58 98 70 6 104 69 6 104 69 6
41135-104 69 6 91 60 6 82 62 34 90 90 90
41136- 62 62 62 38 38 38 22 22 22 14 14 14
41137- 10 10 10 10 10 10 10 10 10 10 10 10
41138- 10 10 10 10 10 10 6 6 6 10 10 10
41139- 10 10 10 10 10 10 10 10 10 14 14 14
41140- 22 22 22 42 42 42 70 70 70 89 81 66
41141- 80 54 7 104 69 6 124 80 6 137 92 6
41142-134 86 6 116 81 8 100 82 52 86 86 86
41143- 58 58 58 30 30 30 14 14 14 6 6 6
41144- 0 0 0 0 0 0 0 0 0 0 0 0
41145- 0 0 0 0 0 0 0 0 0 0 0 0
41146- 0 0 0 0 0 0 0 0 0 0 0 0
41147- 0 0 0 0 0 0 0 0 0 0 0 0
41148- 0 0 0 0 0 0 0 0 0 0 0 0
41149- 0 0 0 0 0 0 0 0 0 0 0 0
41150- 0 0 0 0 0 0 0 0 0 0 0 0
41151- 0 0 0 0 0 0 0 0 0 0 0 0
41152- 0 0 0 6 6 6 10 10 10 14 14 14
41153- 18 18 18 26 26 26 38 38 38 54 54 54
41154- 70 70 70 86 86 86 94 86 76 89 81 66
41155- 89 81 66 86 86 86 74 74 74 50 50 50
41156- 30 30 30 14 14 14 6 6 6 0 0 0
41157- 0 0 0 0 0 0 0 0 0 0 0 0
41158- 0 0 0 0 0 0 0 0 0 0 0 0
41159- 0 0 0 0 0 0 0 0 0 0 0 0
41160- 6 6 6 18 18 18 34 34 34 58 58 58
41161- 82 82 82 89 81 66 89 81 66 89 81 66
41162- 94 86 66 94 86 76 74 74 74 50 50 50
41163- 26 26 26 14 14 14 6 6 6 0 0 0
41164- 0 0 0 0 0 0 0 0 0 0 0 0
41165- 0 0 0 0 0 0 0 0 0 0 0 0
41166- 0 0 0 0 0 0 0 0 0 0 0 0
41167- 0 0 0 0 0 0 0 0 0 0 0 0
41168- 0 0 0 0 0 0 0 0 0 0 0 0
41169- 0 0 0 0 0 0 0 0 0 0 0 0
41170- 0 0 0 0 0 0 0 0 0 0 0 0
41171- 0 0 0 0 0 0 0 0 0 0 0 0
41172- 0 0 0 0 0 0 0 0 0 0 0 0
41173- 6 6 6 6 6 6 14 14 14 18 18 18
41174- 30 30 30 38 38 38 46 46 46 54 54 54
41175- 50 50 50 42 42 42 30 30 30 18 18 18
41176- 10 10 10 0 0 0 0 0 0 0 0 0
41177- 0 0 0 0 0 0 0 0 0 0 0 0
41178- 0 0 0 0 0 0 0 0 0 0 0 0
41179- 0 0 0 0 0 0 0 0 0 0 0 0
41180- 0 0 0 6 6 6 14 14 14 26 26 26
41181- 38 38 38 50 50 50 58 58 58 58 58 58
41182- 54 54 54 42 42 42 30 30 30 18 18 18
41183- 10 10 10 0 0 0 0 0 0 0 0 0
41184- 0 0 0 0 0 0 0 0 0 0 0 0
41185- 0 0 0 0 0 0 0 0 0 0 0 0
41186- 0 0 0 0 0 0 0 0 0 0 0 0
41187- 0 0 0 0 0 0 0 0 0 0 0 0
41188- 0 0 0 0 0 0 0 0 0 0 0 0
41189- 0 0 0 0 0 0 0 0 0 0 0 0
41190- 0 0 0 0 0 0 0 0 0 0 0 0
41191- 0 0 0 0 0 0 0 0 0 0 0 0
41192- 0 0 0 0 0 0 0 0 0 0 0 0
41193- 0 0 0 0 0 0 0 0 0 6 6 6
41194- 6 6 6 10 10 10 14 14 14 18 18 18
41195- 18 18 18 14 14 14 10 10 10 6 6 6
41196- 0 0 0 0 0 0 0 0 0 0 0 0
41197- 0 0 0 0 0 0 0 0 0 0 0 0
41198- 0 0 0 0 0 0 0 0 0 0 0 0
41199- 0 0 0 0 0 0 0 0 0 0 0 0
41200- 0 0 0 0 0 0 0 0 0 6 6 6
41201- 14 14 14 18 18 18 22 22 22 22 22 22
41202- 18 18 18 14 14 14 10 10 10 6 6 6
41203- 0 0 0 0 0 0 0 0 0 0 0 0
41204- 0 0 0 0 0 0 0 0 0 0 0 0
41205- 0 0 0 0 0 0 0 0 0 0 0 0
41206- 0 0 0 0 0 0 0 0 0 0 0 0
41207- 0 0 0 0 0 0 0 0 0 0 0 0
41208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41221+4 4 4 4 4 4
41222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41235+4 4 4 4 4 4
41236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41249+4 4 4 4 4 4
41250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41263+4 4 4 4 4 4
41264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41277+4 4 4 4 4 4
41278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41291+4 4 4 4 4 4
41292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41296+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
41297+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
41298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41301+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
41302+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
41303+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
41304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41305+4 4 4 4 4 4
41306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41310+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
41311+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
41312+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41315+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
41316+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
41317+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
41318+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41319+4 4 4 4 4 4
41320+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41324+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
41325+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
41326+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
41327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41329+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
41330+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
41331+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
41332+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
41333+4 4 4 4 4 4
41334+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41337+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
41338+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
41339+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
41340+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
41341+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41342+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
41343+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
41344+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
41345+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
41346+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
41347+4 4 4 4 4 4
41348+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41351+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
41352+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
41353+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
41354+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
41355+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41356+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
41357+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
41358+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
41359+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
41360+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
41361+4 4 4 4 4 4
41362+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41365+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
41366+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
41367+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
41368+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
41369+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
41370+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
41371+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
41372+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
41373+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
41374+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
41375+4 4 4 4 4 4
41376+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41378+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
41379+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
41380+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
41381+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
41382+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
41383+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
41384+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
41385+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
41386+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
41387+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
41388+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
41389+4 4 4 4 4 4
41390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41392+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
41393+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
41394+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
41395+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
41396+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
41397+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
41398+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
41399+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
41400+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
41401+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
41402+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
41403+4 4 4 4 4 4
41404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41406+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
41407+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
41408+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
41409+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
41410+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
41411+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
41412+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
41413+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
41414+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
41415+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
41416+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
41417+4 4 4 4 4 4
41418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41420+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
41421+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
41422+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
41423+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
41424+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
41425+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
41426+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
41427+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
41428+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
41429+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
41430+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
41431+4 4 4 4 4 4
41432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41433+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
41434+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
41435+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
41436+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
41437+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
41438+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
41439+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
41440+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
41441+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
41442+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
41443+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
41444+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
41445+4 4 4 4 4 4
41446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41447+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
41448+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
41449+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
41450+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41451+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
41452+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
41453+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
41454+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
41455+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
41456+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
41457+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
41458+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
41459+0 0 0 4 4 4
41460+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
41461+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
41462+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
41463+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
41464+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
41465+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
41466+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
41467+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
41468+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
41469+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
41470+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
41471+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
41472+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
41473+2 0 0 0 0 0
41474+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
41475+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
41476+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
41477+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
41478+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
41479+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
41480+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
41481+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
41482+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
41483+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
41484+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
41485+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
41486+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
41487+37 38 37 0 0 0
41488+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
41489+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
41490+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
41491+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
41492+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
41493+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
41494+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
41495+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
41496+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
41497+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
41498+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
41499+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
41500+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
41501+85 115 134 4 0 0
41502+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
41503+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
41504+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
41505+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
41506+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
41507+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
41508+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
41509+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
41510+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
41511+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
41512+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
41513+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
41514+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
41515+60 73 81 4 0 0
41516+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
41517+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
41518+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
41519+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
41520+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
41521+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
41522+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
41523+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
41524+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
41525+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
41526+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
41527+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
41528+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
41529+16 19 21 4 0 0
41530+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
41531+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
41532+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
41533+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
41534+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
41535+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
41536+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
41537+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
41538+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
41539+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
41540+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
41541+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
41542+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
41543+4 0 0 4 3 3
41544+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
41545+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
41546+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
41547+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
41548+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
41549+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
41550+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
41551+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
41552+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
41553+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
41554+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
41555+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
41556+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
41557+3 2 2 4 4 4
41558+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
41559+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
41560+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
41561+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
41562+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
41563+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
41564+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
41565+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
41566+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
41567+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
41568+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
41569+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
41570+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
41571+4 4 4 4 4 4
41572+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
41573+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
41574+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
41575+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
41576+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
41577+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
41578+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
41579+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
41580+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
41581+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
41582+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
41583+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
41584+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
41585+4 4 4 4 4 4
41586+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
41587+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
41588+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
41589+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
41590+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
41591+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
41592+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
41593+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
41594+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
41595+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
41596+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
41597+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
41598+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
41599+5 5 5 5 5 5
41600+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
41601+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
41602+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
41603+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
41604+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
41605+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41606+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
41607+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
41608+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
41609+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
41610+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
41611+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
41612+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
41613+5 5 5 4 4 4
41614+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
41615+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
41616+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
41617+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
41618+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41619+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
41620+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
41621+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
41622+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
41623+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
41624+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
41625+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41626+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41627+4 4 4 4 4 4
41628+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
41629+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
41630+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
41631+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
41632+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
41633+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41634+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41635+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
41636+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
41637+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
41638+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
41639+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
41640+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41641+4 4 4 4 4 4
41642+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
41643+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
41644+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
41645+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
41646+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41647+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
41648+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
41649+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
41650+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
41651+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
41652+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
41653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41654+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41655+4 4 4 4 4 4
41656+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
41657+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
41658+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
41659+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
41660+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41661+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41662+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41663+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
41664+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
41665+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
41666+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
41667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41668+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41669+4 4 4 4 4 4
41670+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
41671+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
41672+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
41673+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
41674+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41675+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
41676+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
41677+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
41678+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
41679+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
41680+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41681+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41683+4 4 4 4 4 4
41684+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
41685+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
41686+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
41687+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
41688+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41689+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
41690+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
41691+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
41692+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
41693+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
41694+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
41695+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41696+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41697+4 4 4 4 4 4
41698+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
41699+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
41700+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
41701+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
41702+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41703+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
41704+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
41705+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
41706+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
41707+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
41708+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
41709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41711+4 4 4 4 4 4
41712+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
41713+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
41714+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
41715+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
41716+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
41717+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
41718+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
41719+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
41720+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
41721+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
41722+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41725+4 4 4 4 4 4
41726+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
41727+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
41728+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
41729+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
41730+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41731+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
41732+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
41733+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
41734+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
41735+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
41736+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41739+4 4 4 4 4 4
41740+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
41741+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
41742+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
41743+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
41744+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41745+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
41746+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
41747+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
41748+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
41749+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
41750+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41753+4 4 4 4 4 4
41754+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
41755+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
41756+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
41757+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
41758+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41759+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
41760+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
41761+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
41762+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
41763+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41764+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41767+4 4 4 4 4 4
41768+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
41769+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
41770+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
41771+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
41772+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
41773+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
41774+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
41775+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
41776+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41777+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41778+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41781+4 4 4 4 4 4
41782+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
41783+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
41784+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
41785+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
41786+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41787+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
41788+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
41789+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
41790+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41791+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41792+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41795+4 4 4 4 4 4
41796+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
41797+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
41798+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
41799+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
41800+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
41801+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
41802+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
41803+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
41804+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41805+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41806+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41809+4 4 4 4 4 4
41810+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
41811+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
41812+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41813+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
41814+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
41815+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
41816+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
41817+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
41818+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
41819+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41820+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41823+4 4 4 4 4 4
41824+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
41825+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
41826+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
41827+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
41828+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
41829+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
41830+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
41831+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
41832+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41833+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41834+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41837+4 4 4 4 4 4
41838+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
41839+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
41840+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41841+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
41842+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
41843+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
41844+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
41845+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
41846+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
41847+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41848+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41851+4 4 4 4 4 4
41852+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
41853+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
41854+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
41855+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
41856+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
41857+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
41858+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
41859+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
41860+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41861+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41862+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41865+4 4 4 4 4 4
41866+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41867+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
41868+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41869+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
41870+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
41871+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
41872+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
41873+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
41874+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41875+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41876+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41879+4 4 4 4 4 4
41880+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41881+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
41882+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
41883+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
41884+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
41885+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
41886+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41887+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
41888+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41889+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41890+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41893+4 4 4 4 4 4
41894+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41895+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
41896+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
41897+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41898+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
41899+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
41900+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41901+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
41902+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41903+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41904+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41907+4 4 4 4 4 4
41908+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41909+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
41910+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
41911+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
41912+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
41913+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
41914+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
41915+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
41916+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
41917+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41918+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41921+4 4 4 4 4 4
41922+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41923+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
41924+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
41925+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
41926+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
41927+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
41928+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
41929+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
41930+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
41931+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41932+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41935+4 4 4 4 4 4
41936+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
41937+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
41938+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
41939+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
41940+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41941+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
41942+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
41943+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
41944+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
41945+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41946+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41947+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41949+4 4 4 4 4 4
41950+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41951+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
41952+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
41953+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
41954+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
41955+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
41956+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
41957+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
41958+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
41959+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41960+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41963+4 4 4 4 4 4
41964+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
41965+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
41966+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
41967+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
41968+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
41969+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
41970+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
41971+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
41972+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
41973+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
41974+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41977+4 4 4 4 4 4
41978+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
41979+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41980+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
41981+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
41982+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
41983+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
41984+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
41985+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
41986+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
41987+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
41988+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41991+4 4 4 4 4 4
41992+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
41993+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41994+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
41995+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
41996+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
41997+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
41998+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41999+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
42000+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
42001+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
42002+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
42003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42005+4 4 4 4 4 4
42006+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
42007+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
42008+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
42009+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
42010+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
42011+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
42012+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
42013+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
42014+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
42015+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
42016+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42019+4 4 4 4 4 4
42020+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
42021+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
42022+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
42023+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
42024+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
42025+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
42026+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
42027+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
42028+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
42029+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
42030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42033+4 4 4 4 4 4
42034+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
42035+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
42036+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
42037+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
42038+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
42039+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
42040+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
42041+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
42042+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
42043+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
42044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42047+4 4 4 4 4 4
42048+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
42049+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
42050+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
42051+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
42052+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
42053+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
42054+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
42055+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
42056+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
42057+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
42058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42061+4 4 4 4 4 4
42062+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
42063+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
42064+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
42065+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
42066+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
42067+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
42068+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
42069+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
42070+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
42071+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
42072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42075+4 4 4 4 4 4
42076+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
42077+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
42078+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
42079+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
42080+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
42081+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
42082+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
42083+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
42084+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
42085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42089+4 4 4 4 4 4
42090+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
42091+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
42092+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
42093+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
42094+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
42095+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
42096+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
42097+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
42098+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
42099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42103+4 4 4 4 4 4
42104+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
42105+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
42106+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
42107+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
42108+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
42109+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
42110+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
42111+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
42112+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42117+4 4 4 4 4 4
42118+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
42119+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
42120+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
42121+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
42122+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
42123+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
42124+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
42125+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
42126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42131+4 4 4 4 4 4
42132+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
42133+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
42134+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
42135+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
42136+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
42137+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
42138+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
42139+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
42140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42145+4 4 4 4 4 4
42146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
42147+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
42148+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
42149+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
42150+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
42151+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
42152+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
42153+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
42154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42159+4 4 4 4 4 4
42160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42161+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
42162+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
42163+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
42164+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
42165+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
42166+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
42167+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
42168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42173+4 4 4 4 4 4
42174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42175+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
42176+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
42177+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
42178+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
42179+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
42180+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
42181+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
42182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42187+4 4 4 4 4 4
42188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42190+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
42191+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
42192+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
42193+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
42194+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
42195+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
42196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42201+4 4 4 4 4 4
42202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
42205+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
42206+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
42207+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
42208+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
42209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42215+4 4 4 4 4 4
42216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42219+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
42220+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
42221+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
42222+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
42223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42229+4 4 4 4 4 4
42230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42233+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
42234+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
42235+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
42236+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
42237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42243+4 4 4 4 4 4
42244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42247+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
42248+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
42249+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
42250+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
42251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42257+4 4 4 4 4 4
42258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
42262+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
42263+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
42264+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
42265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42271+4 4 4 4 4 4
42272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42276+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
42277+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
42278+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
42279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42285+4 4 4 4 4 4
42286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42290+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
42291+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
42292+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42299+4 4 4 4 4 4
42300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42304+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
42305+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
42306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42313+4 4 4 4 4 4
42314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42318+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
42319+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
42320+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42327+4 4 4 4 4 4
fe2de317 42328diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
5e856224 42329index a40c05e..785c583 100644
fe2de317
MT
42330--- a/drivers/video/udlfb.c
42331+++ b/drivers/video/udlfb.c
4c928ab7 42332@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
8308f9c9
MT
42333 dlfb_urb_completion(urb);
42334
42335 error:
42336- atomic_add(bytes_sent, &dev->bytes_sent);
42337- atomic_add(bytes_identical, &dev->bytes_identical);
42338- atomic_add(width*height*2, &dev->bytes_rendered);
42339+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
42340+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
42341+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
42342 end_cycles = get_cycles();
42343- atomic_add(((unsigned int) ((end_cycles - start_cycles)
42344+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
42345 >> 10)), /* Kcycles */
42346 &dev->cpu_kcycles_used);
42347
4c928ab7 42348@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
8308f9c9
MT
42349 dlfb_urb_completion(urb);
42350
42351 error:
42352- atomic_add(bytes_sent, &dev->bytes_sent);
42353- atomic_add(bytes_identical, &dev->bytes_identical);
42354- atomic_add(bytes_rendered, &dev->bytes_rendered);
42355+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
42356+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
42357+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
42358 end_cycles = get_cycles();
42359- atomic_add(((unsigned int) ((end_cycles - start_cycles)
42360+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
42361 >> 10)), /* Kcycles */
42362 &dev->cpu_kcycles_used);
42363 }
4c928ab7 42364@@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
8308f9c9
MT
42365 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42366 struct dlfb_data *dev = fb_info->par;
42367 return snprintf(buf, PAGE_SIZE, "%u\n",
42368- atomic_read(&dev->bytes_rendered));
42369+ atomic_read_unchecked(&dev->bytes_rendered));
42370 }
42371
42372 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
4c928ab7 42373@@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
8308f9c9
MT
42374 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42375 struct dlfb_data *dev = fb_info->par;
42376 return snprintf(buf, PAGE_SIZE, "%u\n",
42377- atomic_read(&dev->bytes_identical));
42378+ atomic_read_unchecked(&dev->bytes_identical));
42379 }
42380
42381 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
4c928ab7 42382@@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
8308f9c9
MT
42383 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42384 struct dlfb_data *dev = fb_info->par;
42385 return snprintf(buf, PAGE_SIZE, "%u\n",
42386- atomic_read(&dev->bytes_sent));
42387+ atomic_read_unchecked(&dev->bytes_sent));
42388 }
42389
42390 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
4c928ab7 42391@@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
8308f9c9
MT
42392 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42393 struct dlfb_data *dev = fb_info->par;
42394 return snprintf(buf, PAGE_SIZE, "%u\n",
42395- atomic_read(&dev->cpu_kcycles_used));
42396+ atomic_read_unchecked(&dev->cpu_kcycles_used));
42397 }
42398
42399 static ssize_t edid_show(
4c928ab7 42400@@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
8308f9c9
MT
42401 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42402 struct dlfb_data *dev = fb_info->par;
42403
42404- atomic_set(&dev->bytes_rendered, 0);
42405- atomic_set(&dev->bytes_identical, 0);
42406- atomic_set(&dev->bytes_sent, 0);
42407- atomic_set(&dev->cpu_kcycles_used, 0);
42408+ atomic_set_unchecked(&dev->bytes_rendered, 0);
42409+ atomic_set_unchecked(&dev->bytes_identical, 0);
42410+ atomic_set_unchecked(&dev->bytes_sent, 0);
42411+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
42412
42413 return count;
42414 }
fe2de317 42415diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
5e856224 42416index 8408543..d6f20f1 100644
fe2de317
MT
42417--- a/drivers/video/uvesafb.c
42418+++ b/drivers/video/uvesafb.c
df50ba0c 42419@@ -19,6 +19,7 @@
58c5fc13
MT
42420 #include <linux/io.h>
42421 #include <linux/mutex.h>
df50ba0c 42422 #include <linux/slab.h>
58c5fc13
MT
42423+#include <linux/moduleloader.h>
42424 #include <video/edid.h>
42425 #include <video/uvesafb.h>
42426 #ifdef CONFIG_X86
5e856224
MT
42427@@ -73,7 +74,7 @@ static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *ns
42428 struct uvesafb_task *utask;
42429 struct uvesafb_ktask *task;
42430
42431- if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
42432+ if (!capable(CAP_SYS_ADMIN))
42433 return;
42434
42435 if (msg->seq >= UVESAFB_TASKS_MAX)
df50ba0c 42436@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
58c5fc13
MT
42437 NULL,
42438 };
42439
42440- return call_usermodehelper(v86d_path, argv, envp, 1);
42441+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
42442 }
42443
42444 /*
fe2de317 42445@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
58c5fc13
MT
42446 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
42447 par->pmi_setpal = par->ypan = 0;
42448 } else {
42449+
42450+#ifdef CONFIG_PAX_KERNEXEC
42451+#ifdef CONFIG_MODULES
58c5fc13
MT
42452+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
42453+#endif
42454+ if (!par->pmi_code) {
42455+ par->pmi_setpal = par->ypan = 0;
42456+ return 0;
42457+ }
42458+#endif
42459+
42460 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
42461 + task->t.regs.edi);
42462+
42463+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
ae4e228f 42464+ pax_open_kernel();
58c5fc13 42465+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
ae4e228f 42466+ pax_close_kernel();
58c5fc13
MT
42467+
42468+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
42469+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
42470+#else
42471 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
42472 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
42473+#endif
42474+
42475 printk(KERN_INFO "uvesafb: protected mode interface info at "
42476 "%04x:%04x\n",
42477 (u16)task->t.regs.es, (u16)task->t.regs.edi);
5e856224
MT
42478@@ -816,13 +839,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
42479 par->ypan = ypan;
42480
42481 if (par->pmi_setpal || par->ypan) {
42482+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
42483 if (__supported_pte_mask & _PAGE_NX) {
42484 par->pmi_setpal = par->ypan = 0;
42485 printk(KERN_WARNING "uvesafb: NX protection is actively."
42486 "We have better not to use the PMI.\n");
42487- } else {
42488+ } else
42489+#endif
42490 uvesafb_vbe_getpmi(task, par);
42491- }
42492 }
42493 #else
42494 /* The protected mode interface is not available on non-x86. */
42495@@ -1828,6 +1852,11 @@ out:
58c5fc13
MT
42496 if (par->vbe_modes)
42497 kfree(par->vbe_modes);
42498
42499+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42500+ if (par->pmi_code)
42501+ module_free_exec(NULL, par->pmi_code);
42502+#endif
42503+
42504 framebuffer_release(info);
42505 return err;
42506 }
5e856224 42507@@ -1854,6 +1883,12 @@ static int uvesafb_remove(struct platform_device *dev)
58c5fc13
MT
42508 kfree(par->vbe_state_orig);
42509 if (par->vbe_state_saved)
42510 kfree(par->vbe_state_saved);
42511+
42512+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42513+ if (par->pmi_code)
42514+ module_free_exec(NULL, par->pmi_code);
42515+#endif
42516+
42517 }
42518
42519 framebuffer_release(info);
fe2de317
MT
42520diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
42521index 501b340..86bd4cf 100644
42522--- a/drivers/video/vesafb.c
42523+++ b/drivers/video/vesafb.c
58c5fc13
MT
42524@@ -9,6 +9,7 @@
42525 */
42526
42527 #include <linux/module.h>
42528+#include <linux/moduleloader.h>
42529 #include <linux/kernel.h>
42530 #include <linux/errno.h>
42531 #include <linux/string.h>
fe2de317 42532@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
58c5fc13
MT
42533 static int vram_total __initdata; /* Set total amount of memory */
42534 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
42535 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
42536-static void (*pmi_start)(void) __read_mostly;
42537-static void (*pmi_pal) (void) __read_mostly;
42538+static void (*pmi_start)(void) __read_only;
42539+static void (*pmi_pal) (void) __read_only;
42540 static int depth __read_mostly;
42541 static int vga_compat __read_mostly;
42542 /* --------------------------------------------------------------------- */
fe2de317 42543@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
58c5fc13
MT
42544 unsigned int size_vmode;
42545 unsigned int size_remap;
42546 unsigned int size_total;
42547+ void *pmi_code = NULL;
42548
42549 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
42550 return -ENODEV;
fe2de317 42551@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
58c5fc13
MT
42552 size_remap = size_total;
42553 vesafb_fix.smem_len = size_remap;
42554
42555-#ifndef __i386__
42556- screen_info.vesapm_seg = 0;
42557-#endif
42558-
42559 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
42560 printk(KERN_WARNING
42561 "vesafb: cannot reserve video memory at 0x%lx\n",
fe2de317 42562@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
58c5fc13
MT
42563 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
42564 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
42565
42566+#ifdef __i386__
42567+
42568+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42569+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
42570+ if (!pmi_code)
42571+#elif !defined(CONFIG_PAX_KERNEXEC)
42572+ if (0)
42573+#endif
42574+
42575+#endif
42576+ screen_info.vesapm_seg = 0;
42577+
42578 if (screen_info.vesapm_seg) {
42579- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
42580- screen_info.vesapm_seg,screen_info.vesapm_off);
42581+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
42582+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
42583 }
42584
42585 if (screen_info.vesapm_seg < 0xc000)
fe2de317 42586@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
58c5fc13
MT
42587
42588 if (ypan || pmi_setpal) {
42589 unsigned short *pmi_base;
15a11c5b
MT
42590+
42591 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
58c5fc13
MT
42592- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
42593- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
42594+
58c5fc13 42595+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
ae4e228f 42596+ pax_open_kernel();
58c5fc13
MT
42597+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
42598+#else
15a11c5b 42599+ pmi_code = pmi_base;
58c5fc13
MT
42600+#endif
42601+
42602+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
42603+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
42604+
42605+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42606+ pmi_start = ktva_ktla(pmi_start);
42607+ pmi_pal = ktva_ktla(pmi_pal);
ae4e228f 42608+ pax_close_kernel();
58c5fc13
MT
42609+#endif
42610+
42611 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
42612 if (pmi_base[3]) {
42613 printk(KERN_INFO "vesafb: pmi: ports = ");
fe2de317 42614@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
58c5fc13
MT
42615 info->node, info->fix.id);
42616 return 0;
42617 err:
42618+
42619+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42620+ module_free_exec(NULL, pmi_code);
42621+#endif
42622+
42623 if (info->screen_base)
42624 iounmap(info->screen_base);
42625 framebuffer_release(info);
fe2de317
MT
42626diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
42627index 88714ae..16c2e11 100644
42628--- a/drivers/video/via/via_clock.h
42629+++ b/drivers/video/via/via_clock.h
15a11c5b
MT
42630@@ -56,7 +56,7 @@ struct via_clock {
42631
42632 void (*set_engine_pll_state)(u8 state);
42633 void (*set_engine_pll)(struct via_pll_config config);
42634-};
42635+} __no_const;
42636
42637
42638 static inline u32 get_pll_internal_frequency(u32 ref_freq,
fe2de317
MT
42639diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
42640index e56c934..fc22f4b 100644
42641--- a/drivers/xen/xen-pciback/conf_space.h
42642+++ b/drivers/xen/xen-pciback/conf_space.h
6e9df6a3
MT
42643@@ -44,15 +44,15 @@ struct config_field {
42644 struct {
42645 conf_dword_write write;
42646 conf_dword_read read;
42647- } dw;
42648+ } __no_const dw;
42649 struct {
42650 conf_word_write write;
42651 conf_word_read read;
42652- } w;
42653+ } __no_const w;
42654 struct {
42655 conf_byte_write write;
42656 conf_byte_read read;
42657- } b;
42658+ } __no_const b;
42659 } u;
42660 struct list_head list;
42661 };
fe2de317 42662diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
5e856224 42663index 014c8dd..6f3dfe6 100644
fe2de317
MT
42664--- a/fs/9p/vfs_inode.c
42665+++ b/fs/9p/vfs_inode.c
5e856224 42666@@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
16454cff 42667 void
58c5fc13
MT
42668 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
42669 {
42670- char *s = nd_get_link(nd);
42671+ const char *s = nd_get_link(nd);
42672
5e856224
MT
42673 p9_debug(P9_DEBUG_VFS, " %s %s\n",
42674 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
fe2de317 42675diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
5e856224 42676index e95d1b6..3454244 100644
fe2de317
MT
42677--- a/fs/Kconfig.binfmt
42678+++ b/fs/Kconfig.binfmt
5e856224 42679@@ -89,7 +89,7 @@ config HAVE_AOUT
fe2de317
MT
42680
42681 config BINFMT_AOUT
42682 tristate "Kernel support for a.out and ECOFF binaries"
42683- depends on HAVE_AOUT
42684+ depends on HAVE_AOUT && BROKEN
42685 ---help---
42686 A.out (Assembler.OUTput) is a set of formats for libraries and
42687 executables used in the earliest versions of UNIX. Linux used
42688diff --git a/fs/aio.c b/fs/aio.c
4c928ab7 42689index b9d64d8..86cb1d5 100644
fe2de317
MT
42690--- a/fs/aio.c
42691+++ b/fs/aio.c
42692@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
58c5fc13
MT
42693 size += sizeof(struct io_event) * nr_events;
42694 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
42695
42696- if (nr_pages < 0)
42697+ if (nr_pages <= 0)
42698 return -EINVAL;
42699
42700 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
4c928ab7 42701@@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
15a11c5b
MT
42702 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
42703 {
42704 ssize_t ret;
42705+ struct iovec iovstack;
42706
42707 #ifdef CONFIG_COMPAT
42708 if (compat)
42709 ret = compat_rw_copy_check_uvector(type,
42710 (struct compat_iovec __user *)kiocb->ki_buf,
42711- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
42712+ kiocb->ki_nbytes, 1, &iovstack,
4c928ab7 42713 &kiocb->ki_iovec, 1);
15a11c5b
MT
42714 else
42715 #endif
42716 ret = rw_copy_check_uvector(type,
42717 (struct iovec __user *)kiocb->ki_buf,
42718- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
42719+ kiocb->ki_nbytes, 1, &iovstack,
4c928ab7 42720 &kiocb->ki_iovec, 1);
15a11c5b
MT
42721 if (ret < 0)
42722 goto out;
42723
42724+ if (kiocb->ki_iovec == &iovstack) {
42725+ kiocb->ki_inline_vec = iovstack;
42726+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
42727+ }
42728 kiocb->ki_nr_segs = kiocb->ki_nbytes;
42729 kiocb->ki_cur_seg = 0;
42730 /* ki_nbytes/left now reflect bytes instead of segs */
fe2de317 42731diff --git a/fs/attr.c b/fs/attr.c
5e856224 42732index 95053ad..2cc93ca 100644
fe2de317
MT
42733--- a/fs/attr.c
42734+++ b/fs/attr.c
4c928ab7 42735@@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
ae4e228f
MT
42736 unsigned long limit;
42737
df50ba0c 42738 limit = rlimit(RLIMIT_FSIZE);
ae4e228f
MT
42739+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
42740 if (limit != RLIM_INFINITY && offset > limit)
42741 goto out_sig;
42742 if (offset > inode->i_sb->s_maxbytes)
fe2de317 42743diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
5e856224 42744index f624cd0..3d9a559 100644
fe2de317
MT
42745--- a/fs/autofs4/waitq.c
42746+++ b/fs/autofs4/waitq.c
5e856224 42747@@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
6e9df6a3
MT
42748 {
42749 unsigned long sigpipe, flags;
42750 mm_segment_t fs;
42751- const char *data = (const char *)addr;
42752+ const char __user *data = (const char __force_user *)addr;
42753 ssize_t wr = 0;
42754
5e856224 42755 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
fe2de317 42756diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
5e856224 42757index 6e6d536..457113a 100644
fe2de317
MT
42758--- a/fs/befs/linuxvfs.c
42759+++ b/fs/befs/linuxvfs.c
5e856224 42760@@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
58c5fc13
MT
42761 {
42762 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
42763 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
42764- char *link = nd_get_link(nd);
42765+ const char *link = nd_get_link(nd);
42766 if (!IS_ERR(link))
42767 kfree(link);
42768 }
fe2de317 42769diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
5e856224 42770index 1ff9405..f1e376a 100644
fe2de317
MT
42771--- a/fs/binfmt_aout.c
42772+++ b/fs/binfmt_aout.c
58c5fc13
MT
42773@@ -16,6 +16,7 @@
42774 #include <linux/string.h>
42775 #include <linux/fs.h>
42776 #include <linux/file.h>
42777+#include <linux/security.h>
42778 #include <linux/stat.h>
42779 #include <linux/fcntl.h>
42780 #include <linux/ptrace.h>
fe2de317 42781@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
6892158b
MT
42782 #endif
42783 # define START_STACK(u) ((void __user *)u.start_stack)
42784
42785+ memset(&dump, 0, sizeof(dump));
42786+
42787 fs = get_fs();
42788 set_fs(KERNEL_DS);
42789 has_dumped = 1;
fe2de317 42790@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
58c5fc13
MT
42791
42792 /* If the size of the dump file exceeds the rlimit, then see what would happen
42793 if we wrote the stack, but not the data area. */
42794+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
ae4e228f 42795 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
58c5fc13
MT
42796 dump.u_dsize = 0;
42797
42798 /* Make sure we have enough room to write the stack and data areas. */
42799+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
ae4e228f 42800 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
58c5fc13
MT
42801 dump.u_ssize = 0;
42802
fe2de317 42803@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
df50ba0c 42804 rlim = rlimit(RLIMIT_DATA);
58c5fc13
MT
42805 if (rlim >= RLIM_INFINITY)
42806 rlim = ~0;
42807+
42808+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
42809 if (ex.a_data + ex.a_bss > rlim)
42810 return -ENOMEM;
42811
5e856224 42812@@ -269,6 +276,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
58c5fc13
MT
42813 install_exec_creds(bprm);
42814 current->flags &= ~PF_FORKNOEXEC;
42815
42816+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42817+ current->mm->pax_flags = 0UL;
42818+#endif
42819+
42820+#ifdef CONFIG_PAX_PAGEEXEC
42821+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
42822+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
42823+
42824+#ifdef CONFIG_PAX_EMUTRAMP
42825+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
42826+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
42827+#endif
42828+
42829+#ifdef CONFIG_PAX_MPROTECT
42830+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
42831+ current->mm->pax_flags |= MF_PAX_MPROTECT;
42832+#endif
42833+
42834+ }
42835+#endif
42836+
42837 if (N_MAGIC(ex) == OMAGIC) {
42838 unsigned long text_addr, map_size;
42839 loff_t pos;
5e856224 42840@@ -341,7 +369,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
58c5fc13
MT
42841
42842 down_write(&current->mm->mmap_sem);
42843 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
42844- PROT_READ | PROT_WRITE | PROT_EXEC,
42845+ PROT_READ | PROT_WRITE,
42846 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
42847 fd_offset + ex.a_text);
42848 up_write(&current->mm->mmap_sem);
fe2de317 42849diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
5e856224 42850index 07d096c..25762af 100644
fe2de317
MT
42851--- a/fs/binfmt_elf.c
42852+++ b/fs/binfmt_elf.c
4c928ab7
MT
42853@@ -32,6 +32,7 @@
42854 #include <linux/elf.h>
42855 #include <linux/utsname.h>
42856 #include <linux/coredump.h>
42857+#include <linux/xattr.h>
42858 #include <asm/uaccess.h>
42859 #include <asm/param.h>
42860 #include <asm/page.h>
42861@@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
58c5fc13
MT
42862 #define elf_core_dump NULL
42863 #endif
42864
42865+#ifdef CONFIG_PAX_MPROTECT
42866+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
42867+#endif
42868+
42869 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
42870 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
42871 #else
4c928ab7 42872@@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
16454cff
MT
42873 .load_binary = load_elf_binary,
42874 .load_shlib = load_elf_library,
42875 .core_dump = elf_core_dump,
58c5fc13
MT
42876+
42877+#ifdef CONFIG_PAX_MPROTECT
4c928ab7 42878+ .handle_mprotect= elf_handle_mprotect,
58c5fc13
MT
42879+#endif
42880+
16454cff 42881 .min_coredump = ELF_EXEC_PAGESIZE,
58c5fc13 42882 };
16454cff 42883
4c928ab7 42884@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
58c5fc13
MT
42885
42886 static int set_brk(unsigned long start, unsigned long end)
42887 {
42888+ unsigned long e = end;
42889+
42890 start = ELF_PAGEALIGN(start);
42891 end = ELF_PAGEALIGN(end);
42892 if (end > start) {
4c928ab7 42893@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
58c5fc13
MT
42894 if (BAD_ADDR(addr))
42895 return addr;
42896 }
42897- current->mm->start_brk = current->mm->brk = end;
42898+ current->mm->start_brk = current->mm->brk = e;
42899 return 0;
42900 }
42901
4c928ab7 42902@@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58c5fc13
MT
42903 elf_addr_t __user *u_rand_bytes;
42904 const char *k_platform = ELF_PLATFORM;
42905 const char *k_base_platform = ELF_BASE_PLATFORM;
42906- unsigned char k_rand_bytes[16];
42907+ u32 k_rand_bytes[4];
42908 int items;
42909 elf_addr_t *elf_info;
42910 int ei_index = 0;
71d190be
MT
42911 const struct cred *cred = current_cred();
42912 struct vm_area_struct *vma;
42913+ unsigned long saved_auxv[AT_VECTOR_SIZE];
42914
42915 /*
42916 * In some cases (e.g. Hyper-Threading), we want to avoid L1
4c928ab7 42917@@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58c5fc13
MT
42918 * Generate 16 random bytes for userspace PRNG seeding.
42919 */
42920 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
df50ba0c
MT
42921- u_rand_bytes = (elf_addr_t __user *)
42922- STACK_ALLOC(p, sizeof(k_rand_bytes));
58c5fc13
MT
42923+ srandom32(k_rand_bytes[0] ^ random32());
42924+ srandom32(k_rand_bytes[1] ^ random32());
42925+ srandom32(k_rand_bytes[2] ^ random32());
42926+ srandom32(k_rand_bytes[3] ^ random32());
df50ba0c
MT
42927+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
42928+ u_rand_bytes = (elf_addr_t __user *) p;
58c5fc13 42929 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
df50ba0c
MT
42930 return -EFAULT;
42931
4c928ab7 42932@@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
71d190be
MT
42933 return -EFAULT;
42934 current->mm->env_end = p;
42935
42936+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
42937+
42938 /* Put the elf_info on the stack in the right place. */
42939 sp = (elf_addr_t __user *)envp + 1;
42940- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
42941+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
42942 return -EFAULT;
42943 return 0;
42944 }
4c928ab7 42945@@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58c5fc13
MT
42946 {
42947 struct elf_phdr *elf_phdata;
42948 struct elf_phdr *eppnt;
42949- unsigned long load_addr = 0;
42950+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
42951 int load_addr_set = 0;
42952 unsigned long last_bss = 0, elf_bss = 0;
42953- unsigned long error = ~0UL;
42954+ unsigned long error = -EINVAL;
42955 unsigned long total_size;
42956 int retval, i, size;
42957
4c928ab7 42958@@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58c5fc13
MT
42959 goto out_close;
42960 }
42961
42962+#ifdef CONFIG_PAX_SEGMEXEC
42963+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
42964+ pax_task_size = SEGMEXEC_TASK_SIZE;
42965+#endif
42966+
42967 eppnt = elf_phdata;
42968 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
42969 if (eppnt->p_type == PT_LOAD) {
4c928ab7 42970@@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58c5fc13
MT
42971 k = load_addr + eppnt->p_vaddr;
42972 if (BAD_ADDR(k) ||
42973 eppnt->p_filesz > eppnt->p_memsz ||
42974- eppnt->p_memsz > TASK_SIZE ||
42975- TASK_SIZE - eppnt->p_memsz < k) {
42976+ eppnt->p_memsz > pax_task_size ||
42977+ pax_task_size - eppnt->p_memsz < k) {
42978 error = -ENOMEM;
42979 goto out_close;
42980 }
4c928ab7 42981@@ -528,6 +552,351 @@ out:
58c5fc13
MT
42982 return error;
42983 }
42984
4c928ab7
MT
42985+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
42986+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
58c5fc13
MT
42987+{
42988+ unsigned long pax_flags = 0UL;
42989+
4c928ab7
MT
42990+#ifdef CONFIG_PAX_PT_PAX_FLAGS
42991+
58c5fc13
MT
42992+#ifdef CONFIG_PAX_PAGEEXEC
42993+ if (elf_phdata->p_flags & PF_PAGEEXEC)
42994+ pax_flags |= MF_PAX_PAGEEXEC;
42995+#endif
42996+
42997+#ifdef CONFIG_PAX_SEGMEXEC
42998+ if (elf_phdata->p_flags & PF_SEGMEXEC)
42999+ pax_flags |= MF_PAX_SEGMEXEC;
43000+#endif
43001+
43002+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
43003+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
ae4e228f 43004+ if ((__supported_pte_mask & _PAGE_NX))
58c5fc13
MT
43005+ pax_flags &= ~MF_PAX_SEGMEXEC;
43006+ else
43007+ pax_flags &= ~MF_PAX_PAGEEXEC;
43008+ }
43009+#endif
43010+
43011+#ifdef CONFIG_PAX_EMUTRAMP
43012+ if (elf_phdata->p_flags & PF_EMUTRAMP)
43013+ pax_flags |= MF_PAX_EMUTRAMP;
43014+#endif
43015+
43016+#ifdef CONFIG_PAX_MPROTECT
43017+ if (elf_phdata->p_flags & PF_MPROTECT)
43018+ pax_flags |= MF_PAX_MPROTECT;
43019+#endif
43020+
43021+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
43022+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
43023+ pax_flags |= MF_PAX_RANDMMAP;
43024+#endif
43025+
4c928ab7
MT
43026+#endif
43027+
58c5fc13
MT
43028+ return pax_flags;
43029+}
58c5fc13 43030+
4c928ab7 43031+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
58c5fc13
MT
43032+{
43033+ unsigned long pax_flags = 0UL;
43034+
4c928ab7
MT
43035+#ifdef CONFIG_PAX_PT_PAX_FLAGS
43036+
58c5fc13
MT
43037+#ifdef CONFIG_PAX_PAGEEXEC
43038+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
43039+ pax_flags |= MF_PAX_PAGEEXEC;
43040+#endif
43041+
43042+#ifdef CONFIG_PAX_SEGMEXEC
43043+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
43044+ pax_flags |= MF_PAX_SEGMEXEC;
43045+#endif
43046+
43047+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
43048+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
ae4e228f 43049+ if ((__supported_pte_mask & _PAGE_NX))
58c5fc13
MT
43050+ pax_flags &= ~MF_PAX_SEGMEXEC;
43051+ else
43052+ pax_flags &= ~MF_PAX_PAGEEXEC;
43053+ }
43054+#endif
43055+
43056+#ifdef CONFIG_PAX_EMUTRAMP
43057+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
43058+ pax_flags |= MF_PAX_EMUTRAMP;
43059+#endif
43060+
43061+#ifdef CONFIG_PAX_MPROTECT
43062+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
43063+ pax_flags |= MF_PAX_MPROTECT;
43064+#endif
43065+
43066+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
43067+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
43068+ pax_flags |= MF_PAX_RANDMMAP;
43069+#endif
43070+
4c928ab7
MT
43071+#endif
43072+
58c5fc13
MT
43073+ return pax_flags;
43074+}
58c5fc13 43075+
58c5fc13
MT
43076+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
43077+{
43078+ unsigned long pax_flags = 0UL;
43079+
4c928ab7
MT
43080+#ifdef CONFIG_PAX_EI_PAX
43081+
58c5fc13
MT
43082+#ifdef CONFIG_PAX_PAGEEXEC
43083+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
43084+ pax_flags |= MF_PAX_PAGEEXEC;
43085+#endif
43086+
43087+#ifdef CONFIG_PAX_SEGMEXEC
43088+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
43089+ pax_flags |= MF_PAX_SEGMEXEC;
43090+#endif
43091+
43092+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
43093+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
ae4e228f 43094+ if ((__supported_pte_mask & _PAGE_NX))
58c5fc13
MT
43095+ pax_flags &= ~MF_PAX_SEGMEXEC;
43096+ else
43097+ pax_flags &= ~MF_PAX_PAGEEXEC;
43098+ }
43099+#endif
43100+
43101+#ifdef CONFIG_PAX_EMUTRAMP
43102+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
43103+ pax_flags |= MF_PAX_EMUTRAMP;
43104+#endif
43105+
43106+#ifdef CONFIG_PAX_MPROTECT
43107+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
43108+ pax_flags |= MF_PAX_MPROTECT;
43109+#endif
43110+
43111+#ifdef CONFIG_PAX_ASLR
43112+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
43113+ pax_flags |= MF_PAX_RANDMMAP;
43114+#endif
43115+
4c928ab7
MT
43116+#else
43117+
43118+#ifdef CONFIG_PAX_PAGEEXEC
43119+ pax_flags |= MF_PAX_PAGEEXEC;
58c5fc13
MT
43120+#endif
43121+
4c928ab7
MT
43122+#ifdef CONFIG_PAX_MPROTECT
43123+ pax_flags |= MF_PAX_MPROTECT;
43124+#endif
58c5fc13 43125+
4c928ab7
MT
43126+#ifdef CONFIG_PAX_RANDMMAP
43127+ pax_flags |= MF_PAX_RANDMMAP;
58c5fc13
MT
43128+#endif
43129+
4c928ab7
MT
43130+#ifdef CONFIG_PAX_SEGMEXEC
43131+ if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
43132+ pax_flags &= ~MF_PAX_PAGEEXEC;
43133+ pax_flags |= MF_PAX_SEGMEXEC;
43134+ }
58c5fc13
MT
43135+#endif
43136+
4c928ab7
MT
43137+#endif
43138+
43139+ return pax_flags;
43140+}
43141+
43142+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
43143+{
43144+
58c5fc13 43145+#ifdef CONFIG_PAX_PT_PAX_FLAGS
4c928ab7
MT
43146+ unsigned long i;
43147+
58c5fc13
MT
43148+ for (i = 0UL; i < elf_ex->e_phnum; i++)
43149+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
43150+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
43151+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
43152+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
43153+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
43154+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
4c928ab7 43155+ return ~0UL;
58c5fc13
MT
43156+
43157+#ifdef CONFIG_PAX_SOFTMODE
43158+ if (pax_softmode)
4c928ab7 43159+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
58c5fc13
MT
43160+ else
43161+#endif
43162+
4c928ab7 43163+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
58c5fc13
MT
43164+ break;
43165+ }
43166+#endif
43167+
4c928ab7
MT
43168+ return ~0UL;
43169+}
43170+
43171+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
43172+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
43173+{
43174+ unsigned long pax_flags = 0UL;
43175+
43176+#ifdef CONFIG_PAX_PAGEEXEC
43177+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
43178+ pax_flags |= MF_PAX_PAGEEXEC;
43179+#endif
43180+
43181+#ifdef CONFIG_PAX_SEGMEXEC
43182+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
43183+ pax_flags |= MF_PAX_SEGMEXEC;
43184+#endif
43185+
43186+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
43187+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43188+ if ((__supported_pte_mask & _PAGE_NX))
43189+ pax_flags &= ~MF_PAX_SEGMEXEC;
71d190be 43190+ else
4c928ab7
MT
43191+ pax_flags &= ~MF_PAX_PAGEEXEC;
43192+ }
71d190be 43193+#endif
4c928ab7
MT
43194+
43195+#ifdef CONFIG_PAX_EMUTRAMP
43196+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
43197+ pax_flags |= MF_PAX_EMUTRAMP;
43198+#endif
43199+
43200+#ifdef CONFIG_PAX_MPROTECT
43201+ if (pax_flags_softmode & MF_PAX_MPROTECT)
43202+ pax_flags |= MF_PAX_MPROTECT;
43203+#endif
43204+
43205+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
43206+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
43207+ pax_flags |= MF_PAX_RANDMMAP;
43208+#endif
43209+
43210+ return pax_flags;
43211+}
43212+
43213+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
43214+{
43215+ unsigned long pax_flags = 0UL;
43216+
43217+#ifdef CONFIG_PAX_PAGEEXEC
43218+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
43219+ pax_flags |= MF_PAX_PAGEEXEC;
43220+#endif
43221+
43222+#ifdef CONFIG_PAX_SEGMEXEC
43223+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
43224+ pax_flags |= MF_PAX_SEGMEXEC;
43225+#endif
43226+
43227+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
43228+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43229+ if ((__supported_pte_mask & _PAGE_NX))
43230+ pax_flags &= ~MF_PAX_SEGMEXEC;
43231+ else
43232+ pax_flags &= ~MF_PAX_PAGEEXEC;
71d190be
MT
43233+ }
43234+#endif
43235+
4c928ab7
MT
43236+#ifdef CONFIG_PAX_EMUTRAMP
43237+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
43238+ pax_flags |= MF_PAX_EMUTRAMP;
43239+#endif
43240+
43241+#ifdef CONFIG_PAX_MPROTECT
43242+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
43243+ pax_flags |= MF_PAX_MPROTECT;
43244+#endif
43245+
43246+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
43247+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
43248+ pax_flags |= MF_PAX_RANDMMAP;
43249+#endif
43250+
43251+ return pax_flags;
43252+}
43253+#endif
43254+
43255+static unsigned long pax_parse_xattr_pax(struct file * const file)
43256+{
43257+
43258+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
43259+ ssize_t xattr_size, i;
43260+ unsigned char xattr_value[5];
43261+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
43262+
43263+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
43264+ if (xattr_size <= 0)
43265+ return ~0UL;
43266+
43267+ for (i = 0; i < xattr_size; i++)
43268+ switch (xattr_value[i]) {
43269+ default:
43270+ return ~0UL;
43271+
43272+#define parse_flag(option1, option2, flag) \
43273+ case option1: \
43274+ pax_flags_hardmode |= MF_PAX_##flag; \
43275+ break; \
43276+ case option2: \
43277+ pax_flags_softmode |= MF_PAX_##flag; \
43278+ break;
43279+
43280+ parse_flag('p', 'P', PAGEEXEC);
43281+ parse_flag('e', 'E', EMUTRAMP);
43282+ parse_flag('m', 'M', MPROTECT);
43283+ parse_flag('r', 'R', RANDMMAP);
43284+ parse_flag('s', 'S', SEGMEXEC);
43285+
43286+#undef parse_flag
43287+ }
43288+
43289+ if (pax_flags_hardmode & pax_flags_softmode)
43290+ return ~0UL;
43291+
43292+#ifdef CONFIG_PAX_SOFTMODE
43293+ if (pax_softmode)
43294+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
43295+ else
43296+#endif
43297+
43298+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
43299+#else
43300+ return ~0UL;
43301+#endif
43302+
43303+}
43304+
43305+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
43306+{
43307+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
43308+
43309+ pax_flags = pax_parse_ei_pax(elf_ex);
43310+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
43311+ xattr_pax_flags = pax_parse_xattr_pax(file);
43312+
43313+ if (pt_pax_flags == ~0UL)
43314+ pt_pax_flags = xattr_pax_flags;
43315+ else if (xattr_pax_flags == ~0UL)
43316+ xattr_pax_flags = pt_pax_flags;
43317+ if (pt_pax_flags != xattr_pax_flags)
43318+ return -EINVAL;
43319+ if (pt_pax_flags != ~0UL)
43320+ pax_flags = pt_pax_flags;
43321+
58c5fc13
MT
43322+ if (0 > pax_check_flags(&pax_flags))
43323+ return -EINVAL;
43324+
43325+ current->mm->pax_flags = pax_flags;
43326+ return 0;
43327+}
43328+#endif
43329+
43330 /*
43331 * These are the functions used to load ELF style executables and shared
43332 * libraries. There is no binary dependent code anywhere else.
4c928ab7 43333@@ -544,6 +913,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
58c5fc13
MT
43334 {
43335 unsigned int random_variable = 0;
43336
43337+#ifdef CONFIG_PAX_RANDUSTACK
43338+ if (randomize_va_space)
43339+ return stack_top - current->mm->delta_stack;
43340+#endif
43341+
43342 if ((current->flags & PF_RANDOMIZE) &&
43343 !(current->personality & ADDR_NO_RANDOMIZE)) {
43344 random_variable = get_random_int() & STACK_RND_MASK;
4c928ab7 43345@@ -562,7 +936,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
58c5fc13
MT
43346 unsigned long load_addr = 0, load_bias = 0;
43347 int load_addr_set = 0;
43348 char * elf_interpreter = NULL;
43349- unsigned long error;
43350+ unsigned long error = 0;
43351 struct elf_phdr *elf_ppnt, *elf_phdata;
43352 unsigned long elf_bss, elf_brk;
43353 int retval, i;
4c928ab7 43354@@ -572,11 +946,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
58c5fc13 43355 unsigned long start_code, end_code, start_data, end_data;
66a7e928 43356 unsigned long reloc_func_desc __maybe_unused = 0;
58c5fc13
MT
43357 int executable_stack = EXSTACK_DEFAULT;
43358- unsigned long def_flags = 0;
43359 struct {
43360 struct elfhdr elf_ex;
43361 struct elfhdr interp_elf_ex;
43362 } *loc;
43363+ unsigned long pax_task_size = TASK_SIZE;
43364
43365 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
43366 if (!loc) {
4c928ab7 43367@@ -713,11 +1087,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
58c5fc13
MT
43368
43369 /* OK, This is the point of no return */
43370 current->flags &= ~PF_FORKNOEXEC;
43371- current->mm->def_flags = def_flags;
43372+
43373+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
43374+ current->mm->pax_flags = 0UL;
43375+#endif
43376+
43377+#ifdef CONFIG_PAX_DLRESOLVE
43378+ current->mm->call_dl_resolve = 0UL;
43379+#endif
43380+
43381+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
43382+ current->mm->call_syscall = 0UL;
43383+#endif
43384+
43385+#ifdef CONFIG_PAX_ASLR
43386+ current->mm->delta_mmap = 0UL;
43387+ current->mm->delta_stack = 0UL;
43388+#endif
43389+
43390+ current->mm->def_flags = 0;
43391+
4c928ab7
MT
43392+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
43393+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
58c5fc13
MT
43394+ send_sig(SIGKILL, current, 0);
43395+ goto out_free_dentry;
43396+ }
43397+#endif
43398+
43399+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
43400+ pax_set_initial_flags(bprm);
43401+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
43402+ if (pax_set_initial_flags_func)
43403+ (pax_set_initial_flags_func)(bprm);
43404+#endif
43405+
43406+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
ae4e228f 43407+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
58c5fc13
MT
43408+ current->mm->context.user_cs_limit = PAGE_SIZE;
43409+ current->mm->def_flags |= VM_PAGEEXEC;
43410+ }
43411+#endif
43412+
43413+#ifdef CONFIG_PAX_SEGMEXEC
43414+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
43415+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
43416+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
43417+ pax_task_size = SEGMEXEC_TASK_SIZE;
66a7e928 43418+ current->mm->def_flags |= VM_NOHUGEPAGE;
58c5fc13
MT
43419+ }
43420+#endif
43421+
43422+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
43423+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43424+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
43425+ put_cpu();
43426+ }
43427+#endif
ae4e228f
MT
43428
43429 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
43430 may depend on the personality. */
43431 SET_PERSONALITY(loc->elf_ex);
58c5fc13
MT
43432+
43433+#ifdef CONFIG_PAX_ASLR
43434+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
43435+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
43436+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
43437+ }
43438+#endif
58c5fc13
MT
43439+
43440+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43441+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43442+ executable_stack = EXSTACK_DISABLE_X;
43443+ current->personality &= ~READ_IMPLIES_EXEC;
43444+ } else
43445+#endif
43446+
43447 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
43448 current->personality |= READ_IMPLIES_EXEC;
43449
4c928ab7 43450@@ -808,6 +1252,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
58c5fc13
MT
43451 #else
43452 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
43453 #endif
43454+
43455+#ifdef CONFIG_PAX_RANDMMAP
43456+ /* PaX: randomize base address at the default exe base if requested */
43457+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
43458+#ifdef CONFIG_SPARC64
43459+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
43460+#else
43461+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
43462+#endif
43463+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
43464+ elf_flags |= MAP_FIXED;
43465+ }
43466+#endif
43467+
43468 }
43469
43470 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
4c928ab7 43471@@ -840,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
58c5fc13
MT
43472 * allowed task size. Note that p_filesz must always be
43473 * <= p_memsz so it is only necessary to check p_memsz.
43474 */
43475- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
43476- elf_ppnt->p_memsz > TASK_SIZE ||
43477- TASK_SIZE - elf_ppnt->p_memsz < k) {
43478+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
43479+ elf_ppnt->p_memsz > pax_task_size ||
43480+ pax_task_size - elf_ppnt->p_memsz < k) {
43481 /* set_brk can never work. Avoid overflows. */
43482 send_sig(SIGKILL, current, 0);
43483 retval = -EINVAL;
5e856224 43484@@ -881,11 +1339,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
58c5fc13
MT
43485 goto out_free_dentry;
43486 }
43487 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
43488- send_sig(SIGSEGV, current, 0);
43489- retval = -EFAULT; /* Nobody gets to see this, but.. */
43490- goto out_free_dentry;
43491+ /*
43492+ * This bss-zeroing can fail if the ELF
43493+ * file specifies odd protections. So
43494+ * we don't check the return value
43495+ */
43496 }
43497
5e856224
MT
43498+#ifdef CONFIG_PAX_RANDMMAP
43499+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
43500+ unsigned long start, size;
43501+
43502+ start = ELF_PAGEALIGN(elf_brk);
43503+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
43504+ down_write(&current->mm->mmap_sem);
43505+ retval = -ENOMEM;
43506+ if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
43507+ unsigned long prot = PROT_NONE;
43508+
43509+ current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
43510+// if (current->personality & ADDR_NO_RANDOMIZE)
43511+// prot = PROT_READ;
43512+ start = do_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
43513+ retval = IS_ERR_VALUE(start) ? start : 0;
43514+ }
43515+ up_write(&current->mm->mmap_sem);
43516+ if (retval == 0)
43517+ retval = set_brk(start + size, start + size + PAGE_SIZE);
43518+ if (retval < 0) {
43519+ send_sig(SIGKILL, current, 0);
43520+ goto out_free_dentry;
43521+ }
43522+ }
43523+#endif
43524+
58c5fc13 43525 if (elf_interpreter) {
5e856224
MT
43526 unsigned long uninitialized_var(interp_map_addr);
43527
43528@@ -1098,7 +1585,7 @@ out:
58c5fc13
MT
43529 * Decide what to dump of a segment, part, all or none.
43530 */
43531 static unsigned long vma_dump_size(struct vm_area_struct *vma,
43532- unsigned long mm_flags)
43533+ unsigned long mm_flags, long signr)
43534 {
43535 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
43536
5e856224 43537@@ -1132,7 +1619,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
58c5fc13
MT
43538 if (vma->vm_file == NULL)
43539 return 0;
43540
43541- if (FILTER(MAPPED_PRIVATE))
43542+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
43543 goto whole;
43544
43545 /*
5e856224 43546@@ -1354,9 +1841,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
ae4e228f
MT
43547 {
43548 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
43549 int i = 0;
43550- do
43551+ do {
43552 i += 2;
43553- while (auxv[i - 2] != AT_NULL);
43554+ } while (auxv[i - 2] != AT_NULL);
43555 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
43556 }
43557
5e856224 43558@@ -1862,14 +2349,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
df50ba0c
MT
43559 }
43560
43561 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
43562- unsigned long mm_flags)
43563+ struct coredump_params *cprm)
43564 {
43565 struct vm_area_struct *vma;
43566 size_t size = 0;
43567
43568 for (vma = first_vma(current, gate_vma); vma != NULL;
43569 vma = next_vma(vma, gate_vma))
43570- size += vma_dump_size(vma, mm_flags);
43571+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
43572 return size;
43573 }
43574
5e856224 43575@@ -1963,7 +2450,7 @@ static int elf_core_dump(struct coredump_params *cprm)
df50ba0c
MT
43576
43577 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
43578
43579- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
43580+ offset += elf_core_vma_data_size(gate_vma, cprm);
43581 offset += elf_core_extra_data_size();
43582 e_shoff = offset;
43583
5e856224 43584@@ -1977,10 +2464,12 @@ static int elf_core_dump(struct coredump_params *cprm)
df50ba0c
MT
43585 offset = dataoff;
43586
43587 size += sizeof(*elf);
43588+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
43589 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
43590 goto end_coredump;
43591
43592 size += sizeof(*phdr4note);
43593+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
43594 if (size > cprm->limit
43595 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
43596 goto end_coredump;
5e856224 43597@@ -1994,7 +2483,7 @@ static int elf_core_dump(struct coredump_params *cprm)
58c5fc13
MT
43598 phdr.p_offset = offset;
43599 phdr.p_vaddr = vma->vm_start;
43600 phdr.p_paddr = 0;
df50ba0c
MT
43601- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
43602+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
58c5fc13
MT
43603 phdr.p_memsz = vma->vm_end - vma->vm_start;
43604 offset += phdr.p_filesz;
43605 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
5e856224 43606@@ -2005,6 +2494,7 @@ static int elf_core_dump(struct coredump_params *cprm)
df50ba0c
MT
43607 phdr.p_align = ELF_EXEC_PAGESIZE;
43608
43609 size += sizeof(phdr);
43610+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
43611 if (size > cprm->limit
43612 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
43613 goto end_coredump;
5e856224 43614@@ -2029,7 +2519,7 @@ static int elf_core_dump(struct coredump_params *cprm)
58c5fc13
MT
43615 unsigned long addr;
43616 unsigned long end;
43617
df50ba0c
MT
43618- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
43619+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
58c5fc13
MT
43620
43621 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
43622 struct page *page;
5e856224 43623@@ -2038,6 +2528,7 @@ static int elf_core_dump(struct coredump_params *cprm)
ae4e228f
MT
43624 page = get_dump_page(addr);
43625 if (page) {
43626 void *kaddr = kmap(page);
43627+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
43628 stop = ((size += PAGE_SIZE) > cprm->limit) ||
43629 !dump_write(cprm->file, kaddr,
43630 PAGE_SIZE);
5e856224 43631@@ -2055,6 +2546,7 @@ static int elf_core_dump(struct coredump_params *cprm)
df50ba0c
MT
43632
43633 if (e_phnum == PN_XNUM) {
43634 size += sizeof(*shdr4extnum);
43635+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
43636 if (size > cprm->limit
43637 || !dump_write(cprm->file, shdr4extnum,
43638 sizeof(*shdr4extnum)))
5e856224 43639@@ -2075,6 +2567,97 @@ out:
ae4e228f
MT
43640
43641 #endif /* CONFIG_ELF_CORE */
58c5fc13
MT
43642
43643+#ifdef CONFIG_PAX_MPROTECT
43644+/* PaX: non-PIC ELF libraries need relocations on their executable segments
43645+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
43646+ * we'll remove VM_MAYWRITE for good on RELRO segments.
43647+ *
43648+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
43649+ * basis because we want to allow the common case and not the special ones.
43650+ */
43651+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
43652+{
43653+ struct elfhdr elf_h;
43654+ struct elf_phdr elf_p;
43655+ unsigned long i;
43656+ unsigned long oldflags;
43657+ bool is_textrel_rw, is_textrel_rx, is_relro;
43658+
43659+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
43660+ return;
43661+
43662+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
43663+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
43664+
57199397 43665+#ifdef CONFIG_PAX_ELFRELOCS
58c5fc13
MT
43666+ /* possible TEXTREL */
43667+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
43668+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
57199397
MT
43669+#else
43670+ is_textrel_rw = false;
43671+ is_textrel_rx = false;
58c5fc13
MT
43672+#endif
43673+
43674+ /* possible RELRO */
43675+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
43676+
43677+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
43678+ return;
43679+
43680+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
43681+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
43682+
43683+#ifdef CONFIG_PAX_ETEXECRELOCS
43684+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
43685+#else
43686+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
43687+#endif
43688+
43689+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
43690+ !elf_check_arch(&elf_h) ||
43691+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
43692+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
43693+ return;
43694+
43695+ for (i = 0UL; i < elf_h.e_phnum; i++) {
43696+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
43697+ return;
43698+ switch (elf_p.p_type) {
43699+ case PT_DYNAMIC:
43700+ if (!is_textrel_rw && !is_textrel_rx)
43701+ continue;
43702+ i = 0UL;
43703+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
43704+ elf_dyn dyn;
43705+
43706+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
43707+ return;
43708+ if (dyn.d_tag == DT_NULL)
43709+ return;
43710+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
43711+ gr_log_textrel(vma);
43712+ if (is_textrel_rw)
43713+ vma->vm_flags |= VM_MAYWRITE;
43714+ else
43715+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
43716+ vma->vm_flags &= ~VM_MAYWRITE;
43717+ return;
43718+ }
43719+ i++;
43720+ }
43721+ return;
43722+
43723+ case PT_GNU_RELRO:
43724+ if (!is_relro)
43725+ continue;
43726+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
43727+ vma->vm_flags &= ~VM_MAYWRITE;
43728+ return;
43729+ }
43730+ }
43731+}
43732+#endif
43733+
43734 static int __init init_elf_binfmt(void)
43735 {
43736 return register_binfmt(&elf_format);
fe2de317
MT
43737diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
43738index 1bffbe0..c8c283e 100644
43739--- a/fs/binfmt_flat.c
43740+++ b/fs/binfmt_flat.c
43741@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
58c5fc13
MT
43742 realdatastart = (unsigned long) -ENOMEM;
43743 printk("Unable to allocate RAM for process data, errno %d\n",
43744 (int)-realdatastart);
43745+ down_write(&current->mm->mmap_sem);
43746 do_munmap(current->mm, textpos, text_len);
43747+ up_write(&current->mm->mmap_sem);
43748 ret = realdatastart;
43749 goto err;
43750 }
fe2de317 43751@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
58c5fc13 43752 }
ae4e228f 43753 if (IS_ERR_VALUE(result)) {
58c5fc13
MT
43754 printk("Unable to read data+bss, errno %d\n", (int)-result);
43755+ down_write(&current->mm->mmap_sem);
43756 do_munmap(current->mm, textpos, text_len);
57199397 43757 do_munmap(current->mm, realdatastart, len);
58c5fc13
MT
43758+ up_write(&current->mm->mmap_sem);
43759 ret = result;
43760 goto err;
43761 }
fe2de317 43762@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
58c5fc13 43763 }
ae4e228f 43764 if (IS_ERR_VALUE(result)) {
58c5fc13
MT
43765 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
43766+ down_write(&current->mm->mmap_sem);
43767 do_munmap(current->mm, textpos, text_len + data_len + extra +
43768 MAX_SHARED_LIBS * sizeof(unsigned long));
43769+ up_write(&current->mm->mmap_sem);
43770 ret = result;
43771 goto err;
43772 }
fe2de317 43773diff --git a/fs/bio.c b/fs/bio.c
5e856224 43774index b980ecd..74800bf 100644
fe2de317
MT
43775--- a/fs/bio.c
43776+++ b/fs/bio.c
5e856224
MT
43777@@ -833,7 +833,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
43778 /*
43779 * Overflow, abort
43780 */
43781- if (end < start)
43782+ if (end < start || end - start > INT_MAX - nr_pages)
43783 return ERR_PTR(-EINVAL);
43784
43785 nr_pages += end - start;
43786@@ -1229,7 +1229,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
ae4e228f
MT
43787 const int read = bio_data_dir(bio) == READ;
43788 struct bio_map_data *bmd = bio->bi_private;
43789 int i;
43790- char *p = bmd->sgvecs[0].iov_base;
6e9df6a3 43791+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
ae4e228f
MT
43792
43793 __bio_for_each_segment(bvec, bio, i, 0) {
43794 char *addr = page_address(bvec->bv_page);
fe2de317 43795diff --git a/fs/block_dev.c b/fs/block_dev.c
5e856224 43796index 5e9f198..6bf9b1c 100644
fe2de317
MT
43797--- a/fs/block_dev.c
43798+++ b/fs/block_dev.c
5e856224 43799@@ -703,7 +703,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
df50ba0c 43800 else if (bdev->bd_contains == bdev)
57199397
MT
43801 return true; /* is a whole device which isn't held */
43802
16454cff
MT
43803- else if (whole->bd_holder == bd_may_claim)
43804+ else if (whole->bd_holder == (void *)bd_may_claim)
57199397
MT
43805 return true; /* is a partition of a device that is being partitioned */
43806 else if (whole->bd_holder != NULL)
43807 return false; /* is a partition of a held device */
5e856224
MT
43808diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
43809index d986824..af1befd 100644
43810--- a/fs/btrfs/check-integrity.c
43811+++ b/fs/btrfs/check-integrity.c
43812@@ -157,7 +157,7 @@ struct btrfsic_block {
43813 union {
43814 bio_end_io_t *bio;
43815 bh_end_io_t *bh;
43816- } orig_bio_bh_end_io;
43817+ } __no_const orig_bio_bh_end_io;
43818 int submit_bio_bh_rw;
43819 u64 flush_gen; /* only valid if !never_written */
43820 };
fe2de317 43821diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
5e856224 43822index 0639a55..7d9e07f 100644
fe2de317
MT
43823--- a/fs/btrfs/ctree.c
43824+++ b/fs/btrfs/ctree.c
43825@@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
6892158b
MT
43826 free_extent_buffer(buf);
43827 add_root_to_dirty_list(root);
43828 } else {
43829- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
43830- parent_start = parent->start;
43831- else
43832+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
43833+ if (parent)
43834+ parent_start = parent->start;
43835+ else
43836+ parent_start = 0;
43837+ } else
43838 parent_start = 0;
43839
43840 WARN_ON(trans->transid != btrfs_header_generation(parent));
fe2de317 43841diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
5e856224 43842index 892b347..b3db246 100644
fe2de317
MT
43843--- a/fs/btrfs/inode.c
43844+++ b/fs/btrfs/inode.c
5e856224 43845@@ -6930,7 +6930,7 @@ fail:
16454cff
MT
43846 return -ENOMEM;
43847 }
43848
43849-static int btrfs_getattr(struct vfsmount *mnt,
43850+int btrfs_getattr(struct vfsmount *mnt,
43851 struct dentry *dentry, struct kstat *stat)
43852 {
43853 struct inode *inode = dentry->d_inode;
5e856224 43854@@ -6944,6 +6944,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
16454cff
MT
43855 return 0;
43856 }
43857
43858+EXPORT_SYMBOL(btrfs_getattr);
43859+
43860+dev_t get_btrfs_dev_from_inode(struct inode *inode)
43861+{
6e9df6a3 43862+ return BTRFS_I(inode)->root->anon_dev;
16454cff
MT
43863+}
43864+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
43865+
66a7e928
MT
43866 /*
43867 * If a file is moved, it will inherit the cow and compression flags of the new
43868 * directory.
fe2de317 43869diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
5e856224 43870index 1b36f19..5ac7360 100644
fe2de317
MT
43871--- a/fs/btrfs/ioctl.c
43872+++ b/fs/btrfs/ioctl.c
5e856224 43873@@ -2783,9 +2783,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
c52201e0
MT
43874 for (i = 0; i < num_types; i++) {
43875 struct btrfs_space_info *tmp;
43876
43877+ /* Don't copy in more than we allocated */
317566c1
MT
43878 if (!slot_count)
43879 break;
43880
c52201e0
MT
43881+ slot_count--;
43882+
43883 info = NULL;
43884 rcu_read_lock();
43885 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
5e856224 43886@@ -2807,15 +2810,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
317566c1
MT
43887 memcpy(dest, &space, sizeof(space));
43888 dest++;
43889 space_args.total_spaces++;
43890- slot_count--;
43891 }
43892- if (!slot_count)
43893- break;
43894 }
43895 up_read(&info->groups_sem);
43896 }
6e9df6a3
MT
43897
43898- user_dest = (struct btrfs_ioctl_space_info *)
43899+ user_dest = (struct btrfs_ioctl_space_info __user *)
43900 (arg + sizeof(struct btrfs_ioctl_space_args));
43901
43902 if (copy_to_user(user_dest, dest_orig, alloc_size))
fe2de317 43903diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
5e856224 43904index 8c1aae2..1e46446 100644
fe2de317
MT
43905--- a/fs/btrfs/relocation.c
43906+++ b/fs/btrfs/relocation.c
4c928ab7 43907@@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
6892158b
MT
43908 }
43909 spin_unlock(&rc->reloc_root_tree.lock);
43910
43911- BUG_ON((struct btrfs_root *)node->data != root);
43912+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
43913
43914 if (!del) {
43915 spin_lock(&rc->reloc_root_tree.lock);
fe2de317
MT
43916diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
43917index 622f469..e8d2d55 100644
43918--- a/fs/cachefiles/bind.c
43919+++ b/fs/cachefiles/bind.c
43920@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
df50ba0c
MT
43921 args);
43922
43923 /* start by checking things over */
43924- ASSERT(cache->fstop_percent >= 0 &&
43925- cache->fstop_percent < cache->fcull_percent &&
43926+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
43927 cache->fcull_percent < cache->frun_percent &&
43928 cache->frun_percent < 100);
43929
43930- ASSERT(cache->bstop_percent >= 0 &&
43931- cache->bstop_percent < cache->bcull_percent &&
43932+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
43933 cache->bcull_percent < cache->brun_percent &&
43934 cache->brun_percent < 100);
43935
fe2de317
MT
43936diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
43937index 0a1467b..6a53245 100644
43938--- a/fs/cachefiles/daemon.c
43939+++ b/fs/cachefiles/daemon.c
43940@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
ae4e228f
MT
43941 if (n > buflen)
43942 return -EMSGSIZE;
43943
43944- if (copy_to_user(_buffer, buffer, n) != 0)
43945+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
43946 return -EFAULT;
58c5fc13 43947
ae4e228f 43948 return n;
fe2de317 43949@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
df50ba0c
MT
43950 if (test_bit(CACHEFILES_DEAD, &cache->flags))
43951 return -EIO;
43952
43953- if (datalen < 0 || datalen > PAGE_SIZE - 1)
43954+ if (datalen > PAGE_SIZE - 1)
43955 return -EOPNOTSUPP;
43956
43957 /* drag the command string into the kernel so we can parse it */
fe2de317 43958@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
df50ba0c
MT
43959 if (args[0] != '%' || args[1] != '\0')
43960 return -EINVAL;
43961
43962- if (fstop < 0 || fstop >= cache->fcull_percent)
43963+ if (fstop >= cache->fcull_percent)
43964 return cachefiles_daemon_range_error(cache, args);
43965
43966 cache->fstop_percent = fstop;
fe2de317 43967@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
df50ba0c
MT
43968 if (args[0] != '%' || args[1] != '\0')
43969 return -EINVAL;
43970
43971- if (bstop < 0 || bstop >= cache->bcull_percent)
43972+ if (bstop >= cache->bcull_percent)
43973 return cachefiles_daemon_range_error(cache, args);
43974
43975 cache->bstop_percent = bstop;
fe2de317
MT
43976diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
43977index bd6bc1b..b627b53 100644
43978--- a/fs/cachefiles/internal.h
43979+++ b/fs/cachefiles/internal.h
8308f9c9
MT
43980@@ -57,7 +57,7 @@ struct cachefiles_cache {
43981 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
43982 struct rb_root active_nodes; /* active nodes (can't be culled) */
43983 rwlock_t active_lock; /* lock for active_nodes */
43984- atomic_t gravecounter; /* graveyard uniquifier */
43985+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
43986 unsigned frun_percent; /* when to stop culling (% files) */
43987 unsigned fcull_percent; /* when to start culling (% files) */
43988 unsigned fstop_percent; /* when to stop allocating (% files) */
fe2de317 43989@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
8308f9c9
MT
43990 * proc.c
43991 */
43992 #ifdef CONFIG_CACHEFILES_HISTOGRAM
43993-extern atomic_t cachefiles_lookup_histogram[HZ];
43994-extern atomic_t cachefiles_mkdir_histogram[HZ];
43995-extern atomic_t cachefiles_create_histogram[HZ];
43996+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43997+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43998+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
43999
44000 extern int __init cachefiles_proc_init(void);
44001 extern void cachefiles_proc_cleanup(void);
44002 static inline
44003-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
44004+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
44005 {
44006 unsigned long jif = jiffies - start_jif;
44007 if (jif >= HZ)
44008 jif = HZ - 1;
44009- atomic_inc(&histogram[jif]);
44010+ atomic_inc_unchecked(&histogram[jif]);
44011 }
44012
44013 #else
fe2de317
MT
44014diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
44015index a0358c2..d6137f2 100644
44016--- a/fs/cachefiles/namei.c
44017+++ b/fs/cachefiles/namei.c
66a7e928 44018@@ -318,7 +318,7 @@ try_again:
8308f9c9
MT
44019 /* first step is to make up a grave dentry in the graveyard */
44020 sprintf(nbuffer, "%08x%08x",
44021 (uint32_t) get_seconds(),
44022- (uint32_t) atomic_inc_return(&cache->gravecounter));
44023+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
44024
44025 /* do the multiway lock magic */
44026 trap = lock_rename(cache->graveyard, dir);
fe2de317
MT
44027diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
44028index eccd339..4c1d995 100644
44029--- a/fs/cachefiles/proc.c
44030+++ b/fs/cachefiles/proc.c
8308f9c9
MT
44031@@ -14,9 +14,9 @@
44032 #include <linux/seq_file.h>
44033 #include "internal.h"
44034
44035-atomic_t cachefiles_lookup_histogram[HZ];
44036-atomic_t cachefiles_mkdir_histogram[HZ];
44037-atomic_t cachefiles_create_histogram[HZ];
44038+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
44039+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
44040+atomic_unchecked_t cachefiles_create_histogram[HZ];
44041
44042 /*
44043 * display the latency histogram
fe2de317 44044@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
8308f9c9
MT
44045 return 0;
44046 default:
44047 index = (unsigned long) v - 3;
44048- x = atomic_read(&cachefiles_lookup_histogram[index]);
44049- y = atomic_read(&cachefiles_mkdir_histogram[index]);
44050- z = atomic_read(&cachefiles_create_histogram[index]);
44051+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
44052+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
44053+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
44054 if (x == 0 && y == 0 && z == 0)
44055 return 0;
44056
fe2de317
MT
44057diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
44058index 0e3c092..818480e 100644
44059--- a/fs/cachefiles/rdwr.c
44060+++ b/fs/cachefiles/rdwr.c
44061@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
ae4e228f
MT
44062 old_fs = get_fs();
44063 set_fs(KERNEL_DS);
44064 ret = file->f_op->write(
44065- file, (const void __user *) data, len, &pos);
6e9df6a3 44066+ file, (const void __force_user *) data, len, &pos);
ae4e228f
MT
44067 set_fs(old_fs);
44068 kunmap(page);
44069 if (ret != len)
fe2de317 44070diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
5e856224 44071index 3e8094b..cb3ff3d 100644
fe2de317
MT
44072--- a/fs/ceph/dir.c
44073+++ b/fs/ceph/dir.c
44074@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
bc901d79
MT
44075 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
44076 struct ceph_mds_client *mdsc = fsc->mdsc;
6892158b
MT
44077 unsigned frag = fpos_frag(filp->f_pos);
44078- int off = fpos_off(filp->f_pos);
44079+ unsigned int off = fpos_off(filp->f_pos);
44080 int err;
44081 u32 ftype;
44082 struct ceph_mds_reply_info_parsed *rinfo;
4c928ab7
MT
44083@@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
44084 if (nd &&
44085 (nd->flags & LOOKUP_OPEN) &&
44086 !(nd->intent.open.flags & O_CREAT)) {
44087- int mode = nd->intent.open.create_mode & ~current->fs->umask;
44088+ int mode = nd->intent.open.create_mode & ~current_umask();
44089 return ceph_lookup_open(dir, dentry, nd, mode, 1);
44090 }
44091
44092diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
44093index cfd1ce3..6b13a74 100644
44094--- a/fs/cifs/asn1.c
44095+++ b/fs/cifs/asn1.c
44096@@ -416,6 +416,9 @@ asn1_subid_decode(struct asn1_ctx *ctx, unsigned long *subid)
44097
44098 static int
44099 asn1_oid_decode(struct asn1_ctx *ctx,
44100+ unsigned char *eoc, unsigned long **oid, unsigned int *len) __size_overflow(2);
44101+static int
44102+asn1_oid_decode(struct asn1_ctx *ctx,
44103 unsigned char *eoc, unsigned long **oid, unsigned int *len)
44104 {
44105 unsigned long subid;
fe2de317 44106diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
5e856224 44107index 24b3dfc..3cd5454 100644
fe2de317
MT
44108--- a/fs/cifs/cifs_debug.c
44109+++ b/fs/cifs/cifs_debug.c
44110@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
15a11c5b
MT
44111
44112 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
44113 #ifdef CONFIG_CIFS_STATS2
44114- atomic_set(&totBufAllocCount, 0);
44115- atomic_set(&totSmBufAllocCount, 0);
44116+ atomic_set_unchecked(&totBufAllocCount, 0);
44117+ atomic_set_unchecked(&totSmBufAllocCount, 0);
44118 #endif /* CONFIG_CIFS_STATS2 */
44119 spin_lock(&cifs_tcp_ses_lock);
44120 list_for_each(tmp1, &cifs_tcp_ses_list) {
fe2de317 44121@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
8308f9c9 44122 tcon = list_entry(tmp3,
15a11c5b 44123 struct cifs_tcon,
8308f9c9
MT
44124 tcon_list);
44125- atomic_set(&tcon->num_smbs_sent, 0);
44126- atomic_set(&tcon->num_writes, 0);
44127- atomic_set(&tcon->num_reads, 0);
44128- atomic_set(&tcon->num_oplock_brks, 0);
44129- atomic_set(&tcon->num_opens, 0);
44130- atomic_set(&tcon->num_posixopens, 0);
44131- atomic_set(&tcon->num_posixmkdirs, 0);
44132- atomic_set(&tcon->num_closes, 0);
44133- atomic_set(&tcon->num_deletes, 0);
44134- atomic_set(&tcon->num_mkdirs, 0);
44135- atomic_set(&tcon->num_rmdirs, 0);
44136- atomic_set(&tcon->num_renames, 0);
44137- atomic_set(&tcon->num_t2renames, 0);
44138- atomic_set(&tcon->num_ffirst, 0);
44139- atomic_set(&tcon->num_fnext, 0);
44140- atomic_set(&tcon->num_fclose, 0);
44141- atomic_set(&tcon->num_hardlinks, 0);
44142- atomic_set(&tcon->num_symlinks, 0);
44143- atomic_set(&tcon->num_locks, 0);
44144+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
44145+ atomic_set_unchecked(&tcon->num_writes, 0);
44146+ atomic_set_unchecked(&tcon->num_reads, 0);
44147+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
44148+ atomic_set_unchecked(&tcon->num_opens, 0);
44149+ atomic_set_unchecked(&tcon->num_posixopens, 0);
44150+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
44151+ atomic_set_unchecked(&tcon->num_closes, 0);
44152+ atomic_set_unchecked(&tcon->num_deletes, 0);
44153+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
44154+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
44155+ atomic_set_unchecked(&tcon->num_renames, 0);
44156+ atomic_set_unchecked(&tcon->num_t2renames, 0);
44157+ atomic_set_unchecked(&tcon->num_ffirst, 0);
44158+ atomic_set_unchecked(&tcon->num_fnext, 0);
44159+ atomic_set_unchecked(&tcon->num_fclose, 0);
44160+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
44161+ atomic_set_unchecked(&tcon->num_symlinks, 0);
44162+ atomic_set_unchecked(&tcon->num_locks, 0);
44163 }
44164 }
44165 }
fe2de317 44166@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
15a11c5b
MT
44167 smBufAllocCount.counter, cifs_min_small);
44168 #ifdef CONFIG_CIFS_STATS2
44169 seq_printf(m, "Total Large %d Small %d Allocations\n",
44170- atomic_read(&totBufAllocCount),
44171- atomic_read(&totSmBufAllocCount));
44172+ atomic_read_unchecked(&totBufAllocCount),
44173+ atomic_read_unchecked(&totSmBufAllocCount));
44174 #endif /* CONFIG_CIFS_STATS2 */
44175
44176 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
fe2de317 44177@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
8308f9c9
MT
44178 if (tcon->need_reconnect)
44179 seq_puts(m, "\tDISCONNECTED ");
44180 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
44181- atomic_read(&tcon->num_smbs_sent),
44182- atomic_read(&tcon->num_oplock_brks));
44183+ atomic_read_unchecked(&tcon->num_smbs_sent),
44184+ atomic_read_unchecked(&tcon->num_oplock_brks));
44185 seq_printf(m, "\nReads: %d Bytes: %lld",
44186- atomic_read(&tcon->num_reads),
44187+ atomic_read_unchecked(&tcon->num_reads),
44188 (long long)(tcon->bytes_read));
44189 seq_printf(m, "\nWrites: %d Bytes: %lld",
44190- atomic_read(&tcon->num_writes),
44191+ atomic_read_unchecked(&tcon->num_writes),
44192 (long long)(tcon->bytes_written));
44193 seq_printf(m, "\nFlushes: %d",
44194- atomic_read(&tcon->num_flushes));
44195+ atomic_read_unchecked(&tcon->num_flushes));
44196 seq_printf(m, "\nLocks: %d HardLinks: %d "
44197 "Symlinks: %d",
44198- atomic_read(&tcon->num_locks),
44199- atomic_read(&tcon->num_hardlinks),
44200- atomic_read(&tcon->num_symlinks));
44201+ atomic_read_unchecked(&tcon->num_locks),
44202+ atomic_read_unchecked(&tcon->num_hardlinks),
44203+ atomic_read_unchecked(&tcon->num_symlinks));
44204 seq_printf(m, "\nOpens: %d Closes: %d "
44205 "Deletes: %d",
44206- atomic_read(&tcon->num_opens),
44207- atomic_read(&tcon->num_closes),
44208- atomic_read(&tcon->num_deletes));
44209+ atomic_read_unchecked(&tcon->num_opens),
44210+ atomic_read_unchecked(&tcon->num_closes),
44211+ atomic_read_unchecked(&tcon->num_deletes));
44212 seq_printf(m, "\nPosix Opens: %d "
44213 "Posix Mkdirs: %d",
44214- atomic_read(&tcon->num_posixopens),
44215- atomic_read(&tcon->num_posixmkdirs));
44216+ atomic_read_unchecked(&tcon->num_posixopens),
44217+ atomic_read_unchecked(&tcon->num_posixmkdirs));
44218 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
44219- atomic_read(&tcon->num_mkdirs),
44220- atomic_read(&tcon->num_rmdirs));
44221+ atomic_read_unchecked(&tcon->num_mkdirs),
44222+ atomic_read_unchecked(&tcon->num_rmdirs));
44223 seq_printf(m, "\nRenames: %d T2 Renames %d",
44224- atomic_read(&tcon->num_renames),
44225- atomic_read(&tcon->num_t2renames));
44226+ atomic_read_unchecked(&tcon->num_renames),
44227+ atomic_read_unchecked(&tcon->num_t2renames));
44228 seq_printf(m, "\nFindFirst: %d FNext %d "
44229 "FClose %d",
44230- atomic_read(&tcon->num_ffirst),
44231- atomic_read(&tcon->num_fnext),
44232- atomic_read(&tcon->num_fclose));
44233+ atomic_read_unchecked(&tcon->num_ffirst),
44234+ atomic_read_unchecked(&tcon->num_fnext),
44235+ atomic_read_unchecked(&tcon->num_fclose));
44236 }
44237 }
44238 }
fe2de317 44239diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
5e856224 44240index 6ee1cb4..8443157 100644
fe2de317
MT
44241--- a/fs/cifs/cifsfs.c
44242+++ b/fs/cifs/cifsfs.c
4c928ab7 44243@@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
15a11c5b
MT
44244 cifs_req_cachep = kmem_cache_create("cifs_request",
44245 CIFSMaxBufSize +
44246 MAX_CIFS_HDR_SIZE, 0,
44247- SLAB_HWCACHE_ALIGN, NULL);
44248+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
44249 if (cifs_req_cachep == NULL)
44250 return -ENOMEM;
44251
4c928ab7 44252@@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
15a11c5b
MT
44253 efficient to alloc 1 per page off the slab compared to 17K (5page)
44254 alloc of large cifs buffers even when page debugging is on */
44255 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
44256- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
44257+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
44258 NULL);
44259 if (cifs_sm_req_cachep == NULL) {
44260 mempool_destroy(cifs_req_poolp);
4c928ab7 44261@@ -1101,8 +1101,8 @@ init_cifs(void)
15a11c5b
MT
44262 atomic_set(&bufAllocCount, 0);
44263 atomic_set(&smBufAllocCount, 0);
44264 #ifdef CONFIG_CIFS_STATS2
44265- atomic_set(&totBufAllocCount, 0);
44266- atomic_set(&totSmBufAllocCount, 0);
44267+ atomic_set_unchecked(&totBufAllocCount, 0);
44268+ atomic_set_unchecked(&totSmBufAllocCount, 0);
44269 #endif /* CONFIG_CIFS_STATS2 */
44270
44271 atomic_set(&midCount, 0);
fe2de317 44272diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
5e856224 44273index d47d20a..77e8b33 100644
fe2de317
MT
44274--- a/fs/cifs/cifsglob.h
44275+++ b/fs/cifs/cifsglob.h
5e856224 44276@@ -388,28 +388,28 @@ struct cifs_tcon {
8308f9c9
MT
44277 __u16 Flags; /* optional support bits */
44278 enum statusEnum tidStatus;
44279 #ifdef CONFIG_CIFS_STATS
44280- atomic_t num_smbs_sent;
44281- atomic_t num_writes;
44282- atomic_t num_reads;
44283- atomic_t num_flushes;
44284- atomic_t num_oplock_brks;
44285- atomic_t num_opens;
44286- atomic_t num_closes;
44287- atomic_t num_deletes;
44288- atomic_t num_mkdirs;
44289- atomic_t num_posixopens;
44290- atomic_t num_posixmkdirs;
44291- atomic_t num_rmdirs;
44292- atomic_t num_renames;
44293- atomic_t num_t2renames;
44294- atomic_t num_ffirst;
44295- atomic_t num_fnext;
44296- atomic_t num_fclose;
44297- atomic_t num_hardlinks;
44298- atomic_t num_symlinks;
44299- atomic_t num_locks;
44300- atomic_t num_acl_get;
44301- atomic_t num_acl_set;
44302+ atomic_unchecked_t num_smbs_sent;
44303+ atomic_unchecked_t num_writes;
44304+ atomic_unchecked_t num_reads;
44305+ atomic_unchecked_t num_flushes;
44306+ atomic_unchecked_t num_oplock_brks;
44307+ atomic_unchecked_t num_opens;
44308+ atomic_unchecked_t num_closes;
44309+ atomic_unchecked_t num_deletes;
44310+ atomic_unchecked_t num_mkdirs;
44311+ atomic_unchecked_t num_posixopens;
44312+ atomic_unchecked_t num_posixmkdirs;
44313+ atomic_unchecked_t num_rmdirs;
44314+ atomic_unchecked_t num_renames;
44315+ atomic_unchecked_t num_t2renames;
44316+ atomic_unchecked_t num_ffirst;
44317+ atomic_unchecked_t num_fnext;
44318+ atomic_unchecked_t num_fclose;
44319+ atomic_unchecked_t num_hardlinks;
44320+ atomic_unchecked_t num_symlinks;
44321+ atomic_unchecked_t num_locks;
44322+ atomic_unchecked_t num_acl_get;
44323+ atomic_unchecked_t num_acl_set;
44324 #ifdef CONFIG_CIFS_STATS2
44325 unsigned long long time_writes;
44326 unsigned long long time_reads;
5e856224 44327@@ -624,7 +624,7 @@ convert_delimiter(char *path, char delim)
8308f9c9
MT
44328 }
44329
44330 #ifdef CONFIG_CIFS_STATS
44331-#define cifs_stats_inc atomic_inc
44332+#define cifs_stats_inc atomic_inc_unchecked
44333
15a11c5b 44334 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
8308f9c9 44335 unsigned int bytes)
5e856224 44336@@ -983,8 +983,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
15a11c5b
MT
44337 /* Various Debug counters */
44338 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
44339 #ifdef CONFIG_CIFS_STATS2
44340-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
44341-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
44342+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
44343+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
44344 #endif
44345 GLOBAL_EXTERN atomic_t smBufAllocCount;
44346 GLOBAL_EXTERN atomic_t midCount;
fe2de317 44347diff --git a/fs/cifs/link.c b/fs/cifs/link.c
4c928ab7 44348index 6b0e064..94e6c3c 100644
fe2de317
MT
44349--- a/fs/cifs/link.c
44350+++ b/fs/cifs/link.c
4c928ab7 44351@@ -600,7 +600,7 @@ symlink_exit:
58c5fc13
MT
44352
44353 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
44354 {
44355- char *p = nd_get_link(nd);
44356+ const char *p = nd_get_link(nd);
44357 if (!IS_ERR(p))
44358 kfree(p);
44359 }
fe2de317 44360diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
4c928ab7 44361index 703ef5c..2a44ed5 100644
fe2de317
MT
44362--- a/fs/cifs/misc.c
44363+++ b/fs/cifs/misc.c
15a11c5b
MT
44364@@ -156,7 +156,7 @@ cifs_buf_get(void)
44365 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
44366 atomic_inc(&bufAllocCount);
44367 #ifdef CONFIG_CIFS_STATS2
44368- atomic_inc(&totBufAllocCount);
44369+ atomic_inc_unchecked(&totBufAllocCount);
44370 #endif /* CONFIG_CIFS_STATS2 */
44371 }
44372
44373@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
44374 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
44375 atomic_inc(&smBufAllocCount);
44376 #ifdef CONFIG_CIFS_STATS2
44377- atomic_inc(&totSmBufAllocCount);
44378+ atomic_inc_unchecked(&totSmBufAllocCount);
44379 #endif /* CONFIG_CIFS_STATS2 */
44380
44381 }
fe2de317
MT
44382diff --git a/fs/coda/cache.c b/fs/coda/cache.c
44383index 6901578..d402eb5 100644
44384--- a/fs/coda/cache.c
44385+++ b/fs/coda/cache.c
8308f9c9
MT
44386@@ -24,7 +24,7 @@
44387 #include "coda_linux.h"
44388 #include "coda_cache.h"
44389
44390-static atomic_t permission_epoch = ATOMIC_INIT(0);
44391+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
44392
44393 /* replace or extend an acl cache hit */
44394 void coda_cache_enter(struct inode *inode, int mask)
fe2de317 44395@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
8308f9c9
MT
44396 struct coda_inode_info *cii = ITOC(inode);
44397
44398 spin_lock(&cii->c_lock);
44399- cii->c_cached_epoch = atomic_read(&permission_epoch);
44400+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
44401 if (cii->c_uid != current_fsuid()) {
44402 cii->c_uid = current_fsuid();
44403 cii->c_cached_perm = mask;
fe2de317 44404@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
8308f9c9
MT
44405 {
44406 struct coda_inode_info *cii = ITOC(inode);
44407 spin_lock(&cii->c_lock);
44408- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
44409+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
44410 spin_unlock(&cii->c_lock);
44411 }
44412
44413 /* remove all acl caches */
44414 void coda_cache_clear_all(struct super_block *sb)
44415 {
44416- atomic_inc(&permission_epoch);
44417+ atomic_inc_unchecked(&permission_epoch);
44418 }
44419
44420
fe2de317 44421@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
8308f9c9
MT
44422 spin_lock(&cii->c_lock);
44423 hit = (mask & cii->c_cached_perm) == mask &&
44424 cii->c_uid == current_fsuid() &&
44425- cii->c_cached_epoch == atomic_read(&permission_epoch);
44426+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
44427 spin_unlock(&cii->c_lock);
44428
44429 return hit;
fe2de317 44430diff --git a/fs/compat.c b/fs/compat.c
5e856224 44431index 07880ba..3fb2862 100644
fe2de317
MT
44432--- a/fs/compat.c
44433+++ b/fs/compat.c
5e856224 44434@@ -491,7 +491,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
6e9df6a3
MT
44435
44436 set_fs(KERNEL_DS);
44437 /* The __user pointer cast is valid because of the set_fs() */
44438- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
44439+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
44440 set_fs(oldfs);
44441 /* truncating is ok because it's a user address */
44442 if (!ret)
5e856224 44443@@ -549,7 +549,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
6892158b
MT
44444 goto out;
44445
44446 ret = -EINVAL;
44447- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
44448+ if (nr_segs > UIO_MAXIOV)
44449 goto out;
44450 if (nr_segs > fast_segs) {
44451 ret = -ENOMEM;
5e856224 44452@@ -832,6 +832,7 @@ struct compat_old_linux_dirent {
58c5fc13 44453
bc901d79
MT
44454 struct compat_readdir_callback {
44455 struct compat_old_linux_dirent __user *dirent;
44456+ struct file * file;
44457 int result;
44458 };
44459
5e856224 44460@@ -849,6 +850,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
bc901d79
MT
44461 buf->result = -EOVERFLOW;
44462 return -EOVERFLOW;
44463 }
44464+
44465+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44466+ return 0;
44467+
44468 buf->result++;
44469 dirent = buf->dirent;
44470 if (!access_ok(VERIFY_WRITE, dirent,
5e856224 44471@@ -881,6 +886,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
bc901d79
MT
44472
44473 buf.result = 0;
44474 buf.dirent = dirent;
44475+ buf.file = file;
44476
44477 error = vfs_readdir(file, compat_fillonedir, &buf);
44478 if (buf.result)
5e856224 44479@@ -901,6 +907,7 @@ struct compat_linux_dirent {
bc901d79
MT
44480 struct compat_getdents_callback {
44481 struct compat_linux_dirent __user *current_dir;
44482 struct compat_linux_dirent __user *previous;
44483+ struct file * file;
44484 int count;
44485 int error;
44486 };
5e856224 44487@@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
bc901d79
MT
44488 buf->error = -EOVERFLOW;
44489 return -EOVERFLOW;
44490 }
44491+
44492+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44493+ return 0;
44494+
44495 dirent = buf->previous;
44496 if (dirent) {
44497 if (__put_user(offset, &dirent->d_off))
5e856224 44498@@ -969,6 +980,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
bc901d79
MT
44499 buf.previous = NULL;
44500 buf.count = count;
44501 buf.error = 0;
44502+ buf.file = file;
44503
44504 error = vfs_readdir(file, compat_filldir, &buf);
44505 if (error >= 0)
5e856224 44506@@ -990,6 +1002,7 @@ out:
bc901d79
MT
44507 struct compat_getdents_callback64 {
44508 struct linux_dirent64 __user *current_dir;
44509 struct linux_dirent64 __user *previous;
44510+ struct file * file;
44511 int count;
44512 int error;
44513 };
5e856224 44514@@ -1006,6 +1019,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
bc901d79
MT
44515 buf->error = -EINVAL; /* only used if we fail.. */
44516 if (reclen > buf->count)
44517 return -EINVAL;
44518+
44519+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44520+ return 0;
44521+
44522 dirent = buf->previous;
44523
44524 if (dirent) {
5e856224 44525@@ -1057,13 +1074,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
bc901d79
MT
44526 buf.previous = NULL;
44527 buf.count = count;
44528 buf.error = 0;
44529+ buf.file = file;
44530
44531 error = vfs_readdir(file, compat_filldir64, &buf);
44532 if (error >= 0)
6e9df6a3
MT
44533 error = buf.error;
44534 lastdirent = buf.previous;
44535 if (lastdirent) {
44536- typeof(lastdirent->d_off) d_off = file->f_pos;
44537+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
44538 if (__put_user_unaligned(d_off, &lastdirent->d_off))
44539 error = -EFAULT;
44540 else
fe2de317
MT
44541diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
44542index 112e45a..b59845b 100644
44543--- a/fs/compat_binfmt_elf.c
44544+++ b/fs/compat_binfmt_elf.c
44545@@ -30,11 +30,13 @@
44546 #undef elf_phdr
44547 #undef elf_shdr
44548 #undef elf_note
44549+#undef elf_dyn
44550 #undef elf_addr_t
44551 #define elfhdr elf32_hdr
44552 #define elf_phdr elf32_phdr
44553 #define elf_shdr elf32_shdr
44554 #define elf_note elf32_note
44555+#define elf_dyn Elf32_Dyn
44556 #define elf_addr_t Elf32_Addr
44557
44558 /*
44559diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
5e856224 44560index a26bea1..ae23e72 100644
fe2de317
MT
44561--- a/fs/compat_ioctl.c
44562+++ b/fs/compat_ioctl.c
5e856224 44563@@ -211,6 +211,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
6892158b
MT
44564
44565 err = get_user(palp, &up->palette);
44566 err |= get_user(length, &up->length);
44567+ if (err)
44568+ return -EFAULT;
44569
44570 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
44571 err = put_user(compat_ptr(palp), &up_native->palette);
5e856224 44572@@ -622,7 +624,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
6e9df6a3
MT
44573 return -EFAULT;
44574 if (__get_user(udata, &ss32->iomem_base))
44575 return -EFAULT;
44576- ss.iomem_base = compat_ptr(udata);
44577+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
44578 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
44579 __get_user(ss.port_high, &ss32->port_high))
44580 return -EFAULT;
5e856224 44581@@ -797,7 +799,7 @@ static int compat_ioctl_preallocate(struct file *file,
6e9df6a3
MT
44582 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
44583 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
44584 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
44585- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
44586+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
44587 return -EFAULT;
44588
44589 return ioctl_preallocate(file, p);
5e856224 44590@@ -1611,8 +1613,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
bc901d79
MT
44591 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
44592 {
44593 unsigned int a, b;
44594- a = *(unsigned int *)p;
44595- b = *(unsigned int *)q;
44596+ a = *(const unsigned int *)p;
44597+ b = *(const unsigned int *)q;
44598 if (a > b)
44599 return 1;
44600 if (a < b)
fe2de317 44601diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
5e856224 44602index 5ddd7eb..c18bf04 100644
fe2de317
MT
44603--- a/fs/configfs/dir.c
44604+++ b/fs/configfs/dir.c
44605@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
66a7e928
MT
44606 }
44607 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
44608 struct configfs_dirent *next;
44609- const char * name;
44610+ const unsigned char * name;
44611+ char d_name[sizeof(next->s_dentry->d_iname)];
44612 int len;
44613 struct inode *inode = NULL;
44614
fe2de317 44615@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
66a7e928
MT
44616 continue;
44617
44618 name = configfs_get_name(next);
44619- len = strlen(name);
44620+ if (next->s_dentry && name == next->s_dentry->d_iname) {
44621+ len = next->s_dentry->d_name.len;
44622+ memcpy(d_name, name, len);
44623+ name = d_name;
44624+ } else
44625+ len = strlen(name);
44626
44627 /*
44628 * We'll have a dentry and an inode for
5e856224
MT
44629diff --git a/fs/configfs/file.c b/fs/configfs/file.c
44630index 2b6cb23..d76e879 100644
44631--- a/fs/configfs/file.c
44632+++ b/fs/configfs/file.c
44633@@ -135,6 +135,8 @@ out:
44634 */
44635
44636 static int
44637+fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size_t count) __size_overflow(3);
44638+static int
44639 fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size_t count)
44640 {
44641 int error;
fe2de317 44642diff --git a/fs/dcache.c b/fs/dcache.c
5e856224 44643index 2576d14..0cec38d 100644
fe2de317
MT
44644--- a/fs/dcache.c
44645+++ b/fs/dcache.c
5e856224
MT
44646@@ -105,10 +105,10 @@ static unsigned int d_hash_shift __read_mostly;
44647 static struct hlist_bl_head *dentry_hashtable __read_mostly;
44648
44649 static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
44650- unsigned long hash)
44651+ unsigned int hash)
44652 {
44653- hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
44654- hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
44655+ hash += (unsigned long) parent / L1_CACHE_BYTES;
44656+ hash = hash + (hash >> D_HASHBITS);
44657 return dentry_hashtable + (hash & D_HASHMASK);
44658 }
44659
44660@@ -3067,7 +3067,7 @@ void __init vfs_caches_init(unsigned long mempages)
71d190be
MT
44661 mempages -= reserve;
44662
44663 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
44664- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
44665+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
44666
44667 dcache_init();
44668 inode_init();
4c928ab7 44669diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
5e856224 44670index 956d5dd..e755e04 100644
4c928ab7
MT
44671--- a/fs/debugfs/inode.c
44672+++ b/fs/debugfs/inode.c
44673@@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
44674 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
44675 {
44676 return debugfs_create_file(name,
44677+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
44678+ S_IFDIR | S_IRWXU,
44679+#else
44680 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
44681+#endif
44682 parent, NULL, NULL);
44683 }
44684 EXPORT_SYMBOL_GPL(debugfs_create_dir);
fe2de317 44685diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
5e856224 44686index ab35b11..b30af66 100644
fe2de317
MT
44687--- a/fs/ecryptfs/inode.c
44688+++ b/fs/ecryptfs/inode.c
5e856224 44689@@ -672,7 +672,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
ae4e228f
MT
44690 old_fs = get_fs();
44691 set_fs(get_ds());
44692 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
44693- (char __user *)lower_buf,
6e9df6a3 44694+ (char __force_user *)lower_buf,
ae4e228f
MT
44695 lower_bufsiz);
44696 set_fs(old_fs);
df50ba0c 44697 if (rc < 0)
5e856224 44698@@ -718,7 +718,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
ae4e228f
MT
44699 }
44700 old_fs = get_fs();
44701 set_fs(get_ds());
44702- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
6e9df6a3 44703+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
ae4e228f
MT
44704 set_fs(old_fs);
44705 if (rc < 0) {
44706 kfree(buf);
5e856224 44707@@ -733,7 +733,7 @@ out:
ae4e228f
MT
44708 static void
44709 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
44710 {
44711- char *buf = nd_get_link(nd);
44712+ const char *buf = nd_get_link(nd);
44713 if (!IS_ERR(buf)) {
44714 /* Free the char* */
44715 kfree(buf);
fe2de317 44716diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
5e856224 44717index 3a06f40..f7af544 100644
fe2de317
MT
44718--- a/fs/ecryptfs/miscdev.c
44719+++ b/fs/ecryptfs/miscdev.c
5e856224 44720@@ -345,7 +345,7 @@ check_list:
ae4e228f 44721 goto out_unlock_msg_ctx;
5e856224 44722 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
ae4e228f
MT
44723 if (msg_ctx->msg) {
44724- if (copy_to_user(&buf[i], packet_length, packet_length_size))
44725+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
44726 goto out_unlock_msg_ctx;
44727 i += packet_length_size;
44728 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
fe2de317 44729diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
5e856224 44730index b2a34a1..162fa69 100644
fe2de317
MT
44731--- a/fs/ecryptfs/read_write.c
44732+++ b/fs/ecryptfs/read_write.c
44733@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
6e9df6a3
MT
44734 return -EIO;
44735 fs_save = get_fs();
44736 set_fs(get_ds());
44737- rc = vfs_write(lower_file, data, size, &offset);
44738+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
44739 set_fs(fs_save);
44740 mark_inode_dirty_sync(ecryptfs_inode);
44741 return rc;
4c928ab7 44742@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
6e9df6a3
MT
44743 return -EIO;
44744 fs_save = get_fs();
44745 set_fs(get_ds());
44746- rc = vfs_read(lower_file, data, size, &offset);
44747+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
44748 set_fs(fs_save);
44749 return rc;
44750 }
fe2de317 44751diff --git a/fs/exec.c b/fs/exec.c
5e856224 44752index ae42277..32c9035 100644
fe2de317
MT
44753--- a/fs/exec.c
44754+++ b/fs/exec.c
5e856224 44755@@ -55,6 +55,13 @@
ae4e228f 44756 #include <linux/pipe_fs_i.h>
bc901d79 44757 #include <linux/oom.h>
15a11c5b 44758 #include <linux/compat.h>
58c5fc13
MT
44759+#include <linux/random.h>
44760+#include <linux/seq_file.h>
44761+
44762+#ifdef CONFIG_PAX_REFCOUNT
44763+#include <linux/kallsyms.h>
44764+#include <linux/kdebug.h>
44765+#endif
44766
44767 #include <asm/uaccess.h>
44768 #include <asm/mmu_context.h>
5e856224
MT
44769@@ -63,6 +70,15 @@
44770 #include <trace/events/task.h>
58c5fc13
MT
44771 #include "internal.h"
44772
4c928ab7
MT
44773+#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
44774+void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
44775+#endif
44776+
58c5fc13
MT
44777+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
44778+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
44779+EXPORT_SYMBOL(pax_set_initial_flags_func);
44780+#endif
44781+
44782 int core_uses_pid;
44783 char core_pattern[CORENAME_MAX_SIZE] = "core";
ae4e228f 44784 unsigned int core_pipe_limit;
5e856224 44785@@ -72,7 +88,7 @@ struct core_name {
8308f9c9
MT
44786 char *corename;
44787 int used, size;
44788 };
44789-static atomic_t call_count = ATOMIC_INIT(1);
44790+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
44791
44792 /* The maximal length of core_pattern is also specified in sysctl.c */
44793
5e856224 44794@@ -190,18 +206,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
58c5fc13
MT
44795 int write)
44796 {
44797 struct page *page;
44798- int ret;
44799
44800-#ifdef CONFIG_STACK_GROWSUP
44801- if (write) {
15a11c5b 44802- ret = expand_downwards(bprm->vma, pos);
58c5fc13
MT
44803- if (ret < 0)
44804- return NULL;
44805- }
44806-#endif
44807- ret = get_user_pages(current, bprm->mm, pos,
44808- 1, write, 1, &page, NULL);
44809- if (ret <= 0)
15a11c5b 44810+ if (0 > expand_downwards(bprm->vma, pos))
58c5fc13
MT
44811+ return NULL;
44812+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
44813 return NULL;
44814
44815 if (write) {
5e856224 44816@@ -217,6 +225,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
4c928ab7
MT
44817 if (size <= ARG_MAX)
44818 return page;
44819
44820+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44821+ // only allow 512KB for argv+env on suid/sgid binaries
44822+ // to prevent easy ASLR exhaustion
44823+ if (((bprm->cred->euid != current_euid()) ||
44824+ (bprm->cred->egid != current_egid())) &&
44825+ (size > (512 * 1024))) {
44826+ put_page(page);
44827+ return NULL;
44828+ }
44829+#endif
44830+
44831 /*
44832 * Limit to 1/4-th the stack size for the argv+env strings.
44833 * This ensures that:
5e856224 44834@@ -276,6 +295,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
58c5fc13
MT
44835 vma->vm_end = STACK_TOP_MAX;
44836 vma->vm_start = vma->vm_end - PAGE_SIZE;
57199397 44837 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
58c5fc13
MT
44838+
44839+#ifdef CONFIG_PAX_SEGMEXEC
44840+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
44841+#endif
44842+
44843 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
df50ba0c 44844 INIT_LIST_HEAD(&vma->anon_vma_chain);
bc901d79 44845
5e856224 44846@@ -290,6 +314,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
58c5fc13
MT
44847 mm->stack_vm = mm->total_vm = 1;
44848 up_write(&mm->mmap_sem);
44849 bprm->p = vma->vm_end - sizeof(void *);
44850+
44851+#ifdef CONFIG_PAX_RANDUSTACK
44852+ if (randomize_va_space)
4c928ab7 44853+ bprm->p ^= random32() & ~PAGE_MASK;
58c5fc13
MT
44854+#endif
44855+
44856 return 0;
44857 err:
44858 up_write(&mm->mmap_sem);
5e856224 44859@@ -398,19 +428,7 @@ err:
15a11c5b
MT
44860 return err;
44861 }
44862
44863-struct user_arg_ptr {
44864-#ifdef CONFIG_COMPAT
44865- bool is_compat;
44866-#endif
44867- union {
44868- const char __user *const __user *native;
44869-#ifdef CONFIG_COMPAT
44870- compat_uptr_t __user *compat;
44871-#endif
44872- } ptr;
44873-};
44874-
44875-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44876+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44877 {
44878 const char __user *native;
44879
5e856224 44880@@ -419,14 +437,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
6e9df6a3
MT
44881 compat_uptr_t compat;
44882
44883 if (get_user(compat, argv.ptr.compat + nr))
44884- return ERR_PTR(-EFAULT);
44885+ return (const char __force_user *)ERR_PTR(-EFAULT);
44886
44887 return compat_ptr(compat);
44888 }
44889 #endif
44890
44891 if (get_user(native, argv.ptr.native + nr))
44892- return ERR_PTR(-EFAULT);
44893+ return (const char __force_user *)ERR_PTR(-EFAULT);
44894
44895 return native;
44896 }
5e856224 44897@@ -445,7 +463,7 @@ static int count(struct user_arg_ptr argv, int max)
6e9df6a3
MT
44898 if (!p)
44899 break;
44900
44901- if (IS_ERR(p))
44902+ if (IS_ERR((const char __force_kernel *)p))
44903 return -EFAULT;
44904
44905 if (i++ >= max)
5e856224 44906@@ -479,7 +497,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
6e9df6a3
MT
44907
44908 ret = -EFAULT;
44909 str = get_user_arg_ptr(argv, argc);
44910- if (IS_ERR(str))
44911+ if (IS_ERR((const char __force_kernel *)str))
44912 goto out;
44913
44914 len = strnlen_user(str, MAX_ARG_STRLEN);
5e856224 44915@@ -561,7 +579,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
ae4e228f
MT
44916 int r;
44917 mm_segment_t oldfs = get_fs();
15a11c5b
MT
44918 struct user_arg_ptr argv = {
44919- .ptr.native = (const char __user *const __user *)__argv,
6e9df6a3 44920+ .ptr.native = (const char __force_user *const __force_user *)__argv,
15a11c5b
MT
44921 };
44922
ae4e228f 44923 set_fs(KERNEL_DS);
5e856224 44924@@ -596,7 +614,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
58c5fc13 44925 unsigned long new_end = old_end - shift;
15a11c5b 44926 struct mmu_gather tlb;
58c5fc13
MT
44927
44928- BUG_ON(new_start > new_end);
44929+ if (new_start >= new_end || new_start < mmap_min_addr)
bc901d79 44930+ return -ENOMEM;
58c5fc13
MT
44931
44932 /*
44933 * ensure there are no vmas between where we want to go
5e856224 44934@@ -605,6 +624,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
58c5fc13
MT
44935 if (vma != find_vma(mm, new_start))
44936 return -EFAULT;
44937
44938+#ifdef CONFIG_PAX_SEGMEXEC
44939+ BUG_ON(pax_find_mirror_vma(vma));
44940+#endif
44941+
44942 /*
44943 * cover the whole range: [new_start, old_end)
44944 */
5e856224 44945@@ -685,10 +708,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
bc901d79
MT
44946 stack_top = arch_align_stack(stack_top);
44947 stack_top = PAGE_ALIGN(stack_top);
44948
44949- if (unlikely(stack_top < mmap_min_addr) ||
44950- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
44951- return -ENOMEM;
44952-
44953 stack_shift = vma->vm_end - stack_top;
44954
44955 bprm->p -= stack_shift;
5e856224 44956@@ -700,8 +719,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
58c5fc13
MT
44957 bprm->exec -= stack_shift;
44958
44959 down_write(&mm->mmap_sem);
44960+
44961+ /* Move stack pages down in memory. */
44962+ if (stack_shift) {
44963+ ret = shift_arg_pages(vma, stack_shift);
44964+ if (ret)
44965+ goto out_unlock;
44966+ }
44967+
44968 vm_flags = VM_STACK_FLAGS;
44969
58c5fc13
MT
44970+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44971+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44972+ vm_flags &= ~VM_EXEC;
44973+
44974+#ifdef CONFIG_PAX_MPROTECT
44975+ if (mm->pax_flags & MF_PAX_MPROTECT)
44976+ vm_flags &= ~VM_MAYEXEC;
44977+#endif
44978+
44979+ }
44980+#endif
44981+
ae4e228f
MT
44982 /*
44983 * Adjust stack execute permissions; explicitly enable for
44984 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
5e856224 44985@@ -720,13 +759,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
58c5fc13
MT
44986 goto out_unlock;
44987 BUG_ON(prev != vma);
44988
44989- /* Move stack pages down in memory. */
44990- if (stack_shift) {
44991- ret = shift_arg_pages(vma, stack_shift);
ae4e228f
MT
44992- if (ret)
44993- goto out_unlock;
58c5fc13
MT
44994- }
44995-
57199397
MT
44996 /* mprotect_fixup is overkill to remove the temporary stack flags */
44997 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
44998
5e856224 44999@@ -807,7 +839,7 @@ int kernel_read(struct file *file, loff_t offset,
ae4e228f
MT
45000 old_fs = get_fs();
45001 set_fs(get_ds());
45002 /* The cast to a user pointer is valid due to the set_fs() */
45003- result = vfs_read(file, (void __user *)addr, count, &pos);
6e9df6a3 45004+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
ae4e228f
MT
45005 set_fs(old_fs);
45006 return result;
45007 }
5e856224 45008@@ -1255,7 +1287,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
58c5fc13
MT
45009 }
45010 rcu_read_unlock();
45011
45012- if (p->fs->users > n_fs) {
45013+ if (atomic_read(&p->fs->users) > n_fs) {
45014 bprm->unsafe |= LSM_UNSAFE_SHARE;
45015 } else {
45016 res = -EAGAIN;
5e856224 45017@@ -1450,6 +1482,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
4c928ab7
MT
45018
45019 EXPORT_SYMBOL(search_binary_handler);
45020
45021+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45022+static DEFINE_PER_CPU(u64, exec_counter);
45023+static int __init init_exec_counters(void)
45024+{
45025+ unsigned int cpu;
45026+
45027+ for_each_possible_cpu(cpu) {
45028+ per_cpu(exec_counter, cpu) = (u64)cpu;
45029+ }
45030+
45031+ return 0;
45032+}
45033+early_initcall(init_exec_counters);
45034+static inline void increment_exec_counter(void)
45035+{
45036+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
45037+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
45038+}
45039+#else
45040+static inline void increment_exec_counter(void) {}
45041+#endif
45042+
45043 /*
45044 * sys_execve() executes a new program.
45045 */
5e856224 45046@@ -1458,6 +1512,11 @@ static int do_execve_common(const char *filename,
15a11c5b
MT
45047 struct user_arg_ptr envp,
45048 struct pt_regs *regs)
58c5fc13
MT
45049 {
45050+#ifdef CONFIG_GRKERNSEC
45051+ struct file *old_exec_file;
45052+ struct acl_subject_label *old_acl;
45053+ struct rlimit old_rlim[RLIM_NLIMITS];
45054+#endif
45055 struct linux_binprm *bprm;
45056 struct file *file;
45057 struct files_struct *displaced;
5e856224 45058@@ -1465,6 +1524,8 @@ static int do_execve_common(const char *filename,
15a11c5b 45059 int retval;
6e9df6a3
MT
45060 const struct cred *cred = current_cred();
45061
15a11c5b
MT
45062+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
45063+
6e9df6a3
MT
45064 /*
45065 * We move the actual failure in case of RLIMIT_NPROC excess from
45066 * set*uid() to execve() because too many poorly written programs
5e856224 45067@@ -1505,12 +1566,27 @@ static int do_execve_common(const char *filename,
4c928ab7
MT
45068 if (IS_ERR(file))
45069 goto out_unmark;
45070
45071+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
45072+ retval = -EPERM;
45073+ goto out_file;
45074+ }
45075+
45076 sched_exec();
45077
45078 bprm->file = file;
58c5fc13
MT
45079 bprm->filename = filename;
45080 bprm->interp = filename;
45081
71d190be
MT
45082+ if (gr_process_user_ban()) {
45083+ retval = -EPERM;
45084+ goto out_file;
45085+ }
45086+
58c5fc13
MT
45087+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
45088+ retval = -EACCES;
45089+ goto out_file;
45090+ }
45091+
45092 retval = bprm_mm_init(bprm);
45093 if (retval)
45094 goto out_file;
5e856224 45095@@ -1527,24 +1603,65 @@ static int do_execve_common(const char *filename,
58c5fc13
MT
45096 if (retval < 0)
45097 goto out;
45098
58c5fc13
MT
45099+#ifdef CONFIG_GRKERNSEC
45100+ old_acl = current->acl;
45101+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
45102+ old_exec_file = current->exec_file;
45103+ get_file(file);
45104+ current->exec_file = file;
45105+#endif
4c928ab7
MT
45106+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45107+ /* limit suid stack to 8MB
45108+ we saved the old limits above and will restore them if this exec fails
45109+ */
45110+ if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
45111+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
45112+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
45113+#endif
45114+
45115+ if (!gr_tpe_allow(file)) {
45116+ retval = -EACCES;
45117+ goto out_fail;
45118+ }
45119+
45120+ if (gr_check_crash_exec(file)) {
45121+ retval = -EACCES;
45122+ goto out_fail;
45123+ }
58c5fc13
MT
45124+
45125+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
4c928ab7 45126+ bprm->unsafe);
58c5fc13
MT
45127+ if (retval < 0)
45128+ goto out_fail;
45129+
4c928ab7
MT
45130 retval = copy_strings_kernel(1, &bprm->filename, bprm);
45131 if (retval < 0)
45132- goto out;
45133+ goto out_fail;
45134
45135 bprm->exec = bprm->p;
45136 retval = copy_strings(bprm->envc, envp, bprm);
45137 if (retval < 0)
45138- goto out;
45139+ goto out_fail;
45140
45141 retval = copy_strings(bprm->argc, argv, bprm);
45142 if (retval < 0)
45143- goto out;
45144+ goto out_fail;
45145+
45146+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
45147+
45148+ gr_handle_exec_args(bprm, argv);
45149
58c5fc13
MT
45150 retval = search_binary_handler(bprm,regs);
45151 if (retval < 0)
45152- goto out;
45153+ goto out_fail;
45154+#ifdef CONFIG_GRKERNSEC
45155+ if (old_exec_file)
45156+ fput(old_exec_file);
45157+#endif
45158
df50ba0c 45159 /* execve succeeded */
4c928ab7
MT
45160+
45161+ increment_exec_counter();
df50ba0c 45162 current->fs->in_exec = 0;
4c928ab7
MT
45163 current->in_execve = 0;
45164 acct_update_integrals(current);
5e856224 45165@@ -1553,6 +1670,14 @@ static int do_execve_common(const char *filename,
58c5fc13
MT
45166 put_files_struct(displaced);
45167 return retval;
45168
45169+out_fail:
45170+#ifdef CONFIG_GRKERNSEC
45171+ current->acl = old_acl;
45172+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
45173+ fput(current->exec_file);
45174+ current->exec_file = old_exec_file;
45175+#endif
45176+
45177 out:
bc901d79
MT
45178 if (bprm->mm) {
45179 acct_arg_size(bprm, 0);
5e856224 45180@@ -1626,7 +1751,7 @@ static int expand_corename(struct core_name *cn)
8308f9c9
MT
45181 {
45182 char *old_corename = cn->corename;
45183
45184- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
45185+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
45186 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
45187
45188 if (!cn->corename) {
5e856224 45189@@ -1723,7 +1848,7 @@ static int format_corename(struct core_name *cn, long signr)
8308f9c9
MT
45190 int pid_in_pattern = 0;
45191 int err = 0;
45192
45193- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
45194+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
45195 cn->corename = kmalloc(cn->size, GFP_KERNEL);
45196 cn->used = 0;
45197
5e856224 45198@@ -1820,6 +1945,228 @@ out:
58c5fc13
MT
45199 return ispipe;
45200 }
45201
45202+int pax_check_flags(unsigned long *flags)
45203+{
45204+ int retval = 0;
45205+
45206+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
45207+ if (*flags & MF_PAX_SEGMEXEC)
45208+ {
45209+ *flags &= ~MF_PAX_SEGMEXEC;
45210+ retval = -EINVAL;
45211+ }
45212+#endif
45213+
45214+ if ((*flags & MF_PAX_PAGEEXEC)
45215+
45216+#ifdef CONFIG_PAX_PAGEEXEC
45217+ && (*flags & MF_PAX_SEGMEXEC)
45218+#endif
45219+
45220+ )
45221+ {
45222+ *flags &= ~MF_PAX_PAGEEXEC;
45223+ retval = -EINVAL;
45224+ }
45225+
45226+ if ((*flags & MF_PAX_MPROTECT)
45227+
45228+#ifdef CONFIG_PAX_MPROTECT
45229+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
45230+#endif
45231+
45232+ )
45233+ {
45234+ *flags &= ~MF_PAX_MPROTECT;
45235+ retval = -EINVAL;
45236+ }
45237+
45238+ if ((*flags & MF_PAX_EMUTRAMP)
45239+
45240+#ifdef CONFIG_PAX_EMUTRAMP
45241+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
45242+#endif
45243+
45244+ )
45245+ {
45246+ *flags &= ~MF_PAX_EMUTRAMP;
45247+ retval = -EINVAL;
45248+ }
45249+
45250+ return retval;
45251+}
45252+
45253+EXPORT_SYMBOL(pax_check_flags);
45254+
45255+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
45256+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
45257+{
45258+ struct task_struct *tsk = current;
45259+ struct mm_struct *mm = current->mm;
45260+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
45261+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
45262+ char *path_exec = NULL;
45263+ char *path_fault = NULL;
45264+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
45265+
45266+ if (buffer_exec && buffer_fault) {
45267+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
45268+
45269+ down_read(&mm->mmap_sem);
45270+ vma = mm->mmap;
45271+ while (vma && (!vma_exec || !vma_fault)) {
45272+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
45273+ vma_exec = vma;
45274+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
45275+ vma_fault = vma;
45276+ vma = vma->vm_next;
45277+ }
45278+ if (vma_exec) {
45279+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
45280+ if (IS_ERR(path_exec))
45281+ path_exec = "<path too long>";
45282+ else {
45283+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
45284+ if (path_exec) {
45285+ *path_exec = 0;
45286+ path_exec = buffer_exec;
45287+ } else
45288+ path_exec = "<path too long>";
45289+ }
45290+ }
45291+ if (vma_fault) {
45292+ start = vma_fault->vm_start;
45293+ end = vma_fault->vm_end;
45294+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
45295+ if (vma_fault->vm_file) {
45296+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
45297+ if (IS_ERR(path_fault))
45298+ path_fault = "<path too long>";
45299+ else {
45300+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
45301+ if (path_fault) {
45302+ *path_fault = 0;
45303+ path_fault = buffer_fault;
45304+ } else
45305+ path_fault = "<path too long>";
45306+ }
45307+ } else
45308+ path_fault = "<anonymous mapping>";
45309+ }
45310+ up_read(&mm->mmap_sem);
45311+ }
45312+ if (tsk->signal->curr_ip)
ae4e228f 45313+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
58c5fc13
MT
45314+ else
45315+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
45316+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
45317+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
45318+ task_uid(tsk), task_euid(tsk), pc, sp);
45319+ free_page((unsigned long)buffer_exec);
45320+ free_page((unsigned long)buffer_fault);
6e9df6a3 45321+ pax_report_insns(regs, pc, sp);
58c5fc13
MT
45322+ do_coredump(SIGKILL, SIGKILL, regs);
45323+}
45324+#endif
45325+
45326+#ifdef CONFIG_PAX_REFCOUNT
45327+void pax_report_refcount_overflow(struct pt_regs *regs)
45328+{
45329+ if (current->signal->curr_ip)
ae4e228f
MT
45330+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
45331+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
58c5fc13
MT
45332+ else
45333+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
45334+ current->comm, task_pid_nr(current), current_uid(), current_euid());
45335+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
45336+ show_regs(regs);
ae4e228f 45337+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
58c5fc13
MT
45338+}
45339+#endif
45340+
45341+#ifdef CONFIG_PAX_USERCOPY
6892158b
MT
45342+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
45343+int object_is_on_stack(const void *obj, unsigned long len)
45344+{
45345+ const void * const stack = task_stack_page(current);
45346+ const void * const stackend = stack + THREAD_SIZE;
45347+
57199397 45348+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
6892158b
MT
45349+ const void *frame = NULL;
45350+ const void *oldframe;
57199397
MT
45351+#endif
45352+
6892158b
MT
45353+ if (obj + len < obj)
45354+ return -1;
57199397 45355+
6892158b
MT
45356+ if (obj + len <= stack || stackend <= obj)
45357+ return 0;
57199397 45358+
6892158b 45359+ if (obj < stack || stackend < obj + len)
57199397
MT
45360+ return -1;
45361+
57199397 45362+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
6892158b
MT
45363+ oldframe = __builtin_frame_address(1);
45364+ if (oldframe)
45365+ frame = __builtin_frame_address(2);
45366+ /*
45367+ low ----------------------------------------------> high
45368+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
45369+ ^----------------^
45370+ allow copies only within here
45371+ */
45372+ while (stack <= frame && frame < stackend) {
45373+ /* if obj + len extends past the last frame, this
45374+ check won't pass and the next frame will be 0,
45375+ causing us to bail out and correctly report
45376+ the copy as invalid
57199397 45377+ */
6892158b
MT
45378+ if (obj + len <= frame)
45379+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
45380+ oldframe = frame;
45381+ frame = *(const void * const *)frame;
57199397 45382+ }
57199397 45383+ return -1;
6892158b
MT
45384+#else
45385+ return 1;
45386+#endif
57199397
MT
45387+}
45388+
4c928ab7 45389+__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
58c5fc13 45390+{
ae4e228f 45391+ if (current->signal->curr_ip)
71d190be
MT
45392+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
45393+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
ae4e228f 45394+ else
71d190be
MT
45395+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
45396+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
58c5fc13 45397+ dump_stack();
71d190be 45398+ gr_handle_kernel_exploit();
58c5fc13
MT
45399+ do_group_exit(SIGKILL);
45400+}
45401+#endif
15a11c5b
MT
45402+
45403+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
45404+void pax_track_stack(void)
45405+{
45406+ unsigned long sp = (unsigned long)&sp;
45407+ if (sp < current_thread_info()->lowest_stack &&
45408+ sp > (unsigned long)task_stack_page(current))
45409+ current_thread_info()->lowest_stack = sp;
45410+}
45411+EXPORT_SYMBOL(pax_track_stack);
45412+#endif
4c928ab7
MT
45413+
45414+#ifdef CONFIG_PAX_SIZE_OVERFLOW
45415+void report_size_overflow(const char *file, unsigned int line, const char *func)
45416+{
45417+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line);
45418+ dump_stack();
45419+ do_group_exit(SIGKILL);
45420+}
45421+EXPORT_SYMBOL(report_size_overflow);
45422+#endif
58c5fc13 45423+
df50ba0c 45424 static int zap_process(struct task_struct *start, int exit_code)
58c5fc13
MT
45425 {
45426 struct task_struct *t;
5e856224 45427@@ -2017,17 +2364,17 @@ static void wait_for_dump_helpers(struct file *file)
ae4e228f
MT
45428 pipe = file->f_path.dentry->d_inode->i_pipe;
45429
45430 pipe_lock(pipe);
45431- pipe->readers++;
45432- pipe->writers--;
45433+ atomic_inc(&pipe->readers);
45434+ atomic_dec(&pipe->writers);
45435
45436- while ((pipe->readers > 1) && (!signal_pending(current))) {
45437+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
45438 wake_up_interruptible_sync(&pipe->wait);
45439 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45440 pipe_wait(pipe);
45441 }
45442
45443- pipe->readers--;
45444- pipe->writers++;
45445+ atomic_dec(&pipe->readers);
45446+ atomic_inc(&pipe->writers);
45447 pipe_unlock(pipe);
45448
45449 }
5e856224 45450@@ -2088,7 +2435,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
8308f9c9
MT
45451 int retval = 0;
45452 int flag = 0;
45453 int ispipe;
45454- static atomic_t core_dump_count = ATOMIC_INIT(0);
45455+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
45456 struct coredump_params cprm = {
45457 .signr = signr,
45458 .regs = regs,
5e856224 45459@@ -2103,6 +2450,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
71d190be
MT
45460
45461 audit_core_dumps(signr);
45462
45463+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
45464+ gr_handle_brute_attach(current, cprm.mm_flags);
45465+
45466 binfmt = mm->binfmt;
45467 if (!binfmt || !binfmt->core_dump)
45468 goto fail;
5e856224 45469@@ -2170,7 +2520,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
8308f9c9
MT
45470 }
45471 cprm.limit = RLIM_INFINITY;
45472
45473- dump_count = atomic_inc_return(&core_dump_count);
45474+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
45475 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
45476 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
45477 task_tgid_vnr(current), current->comm);
5e856224 45478@@ -2197,6 +2547,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
6e9df6a3
MT
45479 } else {
45480 struct inode *inode;
45481
45482+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
45483+
45484 if (cprm.limit < binfmt->min_coredump)
45485 goto fail_unlock;
45486
5e856224 45487@@ -2240,7 +2592,7 @@ close_fail:
8308f9c9
MT
45488 filp_close(cprm.file, NULL);
45489 fail_dropcount:
45490 if (ispipe)
45491- atomic_dec(&core_dump_count);
45492+ atomic_dec_unchecked(&core_dump_count);
45493 fail_unlock:
45494 kfree(cn.corename);
45495 fail_corename:
5e856224 45496@@ -2259,7 +2611,7 @@ fail:
6e9df6a3
MT
45497 */
45498 int dump_write(struct file *file, const void *addr, int nr)
45499 {
45500- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
45501+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
45502 }
45503 EXPORT_SYMBOL(dump_write);
45504
fe2de317 45505diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
4c928ab7 45506index a8cbe1b..fed04cb 100644
fe2de317
MT
45507--- a/fs/ext2/balloc.c
45508+++ b/fs/ext2/balloc.c
45509@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
58c5fc13
MT
45510
45511 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
45512 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
45513- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
45514+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
45515 sbi->s_resuid != current_fsuid() &&
45516 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
45517 return 0;
fe2de317 45518diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
4c928ab7 45519index a203892..4e64db5 100644
fe2de317
MT
45520--- a/fs/ext3/balloc.c
45521+++ b/fs/ext3/balloc.c
4c928ab7 45522@@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
58c5fc13
MT
45523
45524 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
45525 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
45526- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
4c928ab7
MT
45527+ if (free_blocks < root_blocks + 1 &&
45528 !use_reservation && sbi->s_resuid != current_fsuid() &&
45529- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
45530+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
45531+ !capable_nolog(CAP_SYS_RESOURCE)) {
58c5fc13 45532 return 0;
4c928ab7
MT
45533 }
45534 return 1;
fe2de317 45535diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
5e856224 45536index f9e2cd8..bfdc476 100644
fe2de317
MT
45537--- a/fs/ext4/balloc.c
45538+++ b/fs/ext4/balloc.c
5e856224 45539@@ -438,8 +438,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
4c928ab7 45540 /* Hm, nope. Are (enough) root reserved clusters available? */
58c5fc13
MT
45541 if (sbi->s_resuid == current_fsuid() ||
45542 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
15a11c5b
MT
45543- capable(CAP_SYS_RESOURCE) ||
45544- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
4c928ab7
MT
45545+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
45546+ capable_nolog(CAP_SYS_RESOURCE)) {
15a11c5b 45547
4c928ab7 45548 if (free_clusters >= (nclusters + dirty_clusters))
58c5fc13 45549 return 1;
fe2de317 45550diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
5e856224 45551index 9983ba8..2a5272c 100644
fe2de317
MT
45552--- a/fs/ext4/ext4.h
45553+++ b/fs/ext4/ext4.h
5e856224 45554@@ -1217,19 +1217,19 @@ struct ext4_sb_info {
bc901d79
MT
45555 unsigned long s_mb_last_start;
45556
45557 /* stats for buddy allocator */
45558- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
45559- atomic_t s_bal_success; /* we found long enough chunks */
45560- atomic_t s_bal_allocated; /* in blocks */
45561- atomic_t s_bal_ex_scanned; /* total extents scanned */
45562- atomic_t s_bal_goals; /* goal hits */
45563- atomic_t s_bal_breaks; /* too long searches */
45564- atomic_t s_bal_2orders; /* 2^order hits */
45565+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
45566+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
45567+ atomic_unchecked_t s_bal_allocated; /* in blocks */
45568+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
45569+ atomic_unchecked_t s_bal_goals; /* goal hits */
45570+ atomic_unchecked_t s_bal_breaks; /* too long searches */
45571+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
45572 spinlock_t s_bal_lock;
45573 unsigned long s_mb_buddies_generated;
45574 unsigned long long s_mb_generation_time;
45575- atomic_t s_mb_lost_chunks;
45576- atomic_t s_mb_preallocated;
45577- atomic_t s_mb_discarded;
45578+ atomic_unchecked_t s_mb_lost_chunks;
45579+ atomic_unchecked_t s_mb_preallocated;
45580+ atomic_unchecked_t s_mb_discarded;
45581 atomic_t s_lock_busy;
45582
45583 /* locality groups */
fe2de317 45584diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
5e856224 45585index cb990b2..4820141 100644
fe2de317
MT
45586--- a/fs/ext4/mballoc.c
45587+++ b/fs/ext4/mballoc.c
4c928ab7 45588@@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
bc901d79
MT
45589 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
45590
45591 if (EXT4_SB(sb)->s_mb_stats)
45592- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
45593+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
45594
45595 break;
45596 }
4c928ab7 45597@@ -2088,7 +2088,7 @@ repeat:
bc901d79
MT
45598 ac->ac_status = AC_STATUS_CONTINUE;
45599 ac->ac_flags |= EXT4_MB_HINT_FIRST;
45600 cr = 3;
45601- atomic_inc(&sbi->s_mb_lost_chunks);
45602+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
45603 goto repeat;
45604 }
45605 }
4c928ab7 45606@@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
bc901d79 45607 if (sbi->s_mb_stats) {
6e9df6a3
MT
45608 ext4_msg(sb, KERN_INFO,
45609 "mballoc: %u blocks %u reqs (%u success)",
bc901d79
MT
45610- atomic_read(&sbi->s_bal_allocated),
45611- atomic_read(&sbi->s_bal_reqs),
45612- atomic_read(&sbi->s_bal_success));
45613+ atomic_read_unchecked(&sbi->s_bal_allocated),
45614+ atomic_read_unchecked(&sbi->s_bal_reqs),
45615+ atomic_read_unchecked(&sbi->s_bal_success));
6e9df6a3
MT
45616 ext4_msg(sb, KERN_INFO,
45617 "mballoc: %u extents scanned, %u goal hits, "
45618 "%u 2^N hits, %u breaks, %u lost",
bc901d79
MT
45619- atomic_read(&sbi->s_bal_ex_scanned),
45620- atomic_read(&sbi->s_bal_goals),
45621- atomic_read(&sbi->s_bal_2orders),
45622- atomic_read(&sbi->s_bal_breaks),
45623- atomic_read(&sbi->s_mb_lost_chunks));
45624+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
45625+ atomic_read_unchecked(&sbi->s_bal_goals),
45626+ atomic_read_unchecked(&sbi->s_bal_2orders),
45627+ atomic_read_unchecked(&sbi->s_bal_breaks),
45628+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
6e9df6a3
MT
45629 ext4_msg(sb, KERN_INFO,
45630 "mballoc: %lu generated and it took %Lu",
45631 sbi->s_mb_buddies_generated,
bc901d79 45632 sbi->s_mb_generation_time);
6e9df6a3
MT
45633 ext4_msg(sb, KERN_INFO,
45634 "mballoc: %u preallocated, %u discarded",
bc901d79
MT
45635- atomic_read(&sbi->s_mb_preallocated),
45636- atomic_read(&sbi->s_mb_discarded));
45637+ atomic_read_unchecked(&sbi->s_mb_preallocated),
45638+ atomic_read_unchecked(&sbi->s_mb_discarded));
45639 }
45640
45641 free_percpu(sbi->s_locality_groups);
4c928ab7 45642@@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
bc901d79
MT
45643 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
45644
45645 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
45646- atomic_inc(&sbi->s_bal_reqs);
45647- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
45648+ atomic_inc_unchecked(&sbi->s_bal_reqs);
45649+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
45650 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
45651- atomic_inc(&sbi->s_bal_success);
45652- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
45653+ atomic_inc_unchecked(&sbi->s_bal_success);
45654+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
45655 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
45656 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
45657- atomic_inc(&sbi->s_bal_goals);
45658+ atomic_inc_unchecked(&sbi->s_bal_goals);
45659 if (ac->ac_found > sbi->s_mb_max_to_scan)
45660- atomic_inc(&sbi->s_bal_breaks);
45661+ atomic_inc_unchecked(&sbi->s_bal_breaks);
45662 }
45663
45664 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
4c928ab7 45665@@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
bc901d79
MT
45666 trace_ext4_mb_new_inode_pa(ac, pa);
45667
45668 ext4_mb_use_inode_pa(ac, pa);
4c928ab7
MT
45669- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
45670+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
bc901d79
MT
45671
45672 ei = EXT4_I(ac->ac_inode);
45673 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4c928ab7 45674@@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
bc901d79
MT
45675 trace_ext4_mb_new_group_pa(ac, pa);
45676
45677 ext4_mb_use_group_pa(ac, pa);
45678- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
45679+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
45680
45681 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
45682 lg = ac->ac_lg;
4c928ab7 45683@@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
bc901d79
MT
45684 * from the bitmap and continue.
45685 */
45686 }
45687- atomic_add(free, &sbi->s_mb_discarded);
45688+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
45689
45690 return err;
45691 }
4c928ab7 45692@@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
bc901d79
MT
45693 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
45694 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
45695 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
45696- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
45697+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
45698 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
45699
45700 return 0;
fe2de317
MT
45701diff --git a/fs/fcntl.c b/fs/fcntl.c
45702index 22764c7..86372c9 100644
45703--- a/fs/fcntl.c
45704+++ b/fs/fcntl.c
45705@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
57199397
MT
45706 if (err)
45707 return err;
45708
45709+ if (gr_handle_chroot_fowner(pid, type))
45710+ return -ENOENT;
45711+ if (gr_check_protected_task_fowner(pid, type))
45712+ return -EACCES;
45713+
45714 f_modown(filp, pid, type, force);
45715 return 0;
45716 }
6e9df6a3
MT
45717@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
45718
45719 static int f_setown_ex(struct file *filp, unsigned long arg)
45720 {
45721- struct f_owner_ex * __user owner_p = (void * __user)arg;
45722+ struct f_owner_ex __user *owner_p = (void __user *)arg;
45723 struct f_owner_ex owner;
45724 struct pid *pid;
45725 int type;
fe2de317 45726@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
6e9df6a3
MT
45727
45728 static int f_getown_ex(struct file *filp, unsigned long arg)
45729 {
45730- struct f_owner_ex * __user owner_p = (void * __user)arg;
45731+ struct f_owner_ex __user *owner_p = (void __user *)arg;
45732 struct f_owner_ex owner;
45733 int ret = 0;
45734
fe2de317 45735@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
58c5fc13
MT
45736 switch (cmd) {
45737 case F_DUPFD:
45738 case F_DUPFD_CLOEXEC:
45739+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
df50ba0c 45740 if (arg >= rlimit(RLIMIT_NOFILE))
58c5fc13
MT
45741 break;
45742 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
fe2de317
MT
45743diff --git a/fs/fifo.c b/fs/fifo.c
45744index b1a524d..4ee270e 100644
45745--- a/fs/fifo.c
45746+++ b/fs/fifo.c
45747@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
ae4e228f
MT
45748 */
45749 filp->f_op = &read_pipefifo_fops;
45750 pipe->r_counter++;
45751- if (pipe->readers++ == 0)
45752+ if (atomic_inc_return(&pipe->readers) == 1)
45753 wake_up_partner(inode);
45754
45755- if (!pipe->writers) {
45756+ if (!atomic_read(&pipe->writers)) {
45757 if ((filp->f_flags & O_NONBLOCK)) {
45758 /* suppress POLLHUP until we have
45759 * seen a writer */
fe2de317 45760@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
ae4e228f
MT
45761 * errno=ENXIO when there is no process reading the FIFO.
45762 */
45763 ret = -ENXIO;
45764- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
45765+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
45766 goto err;
45767
45768 filp->f_op = &write_pipefifo_fops;
45769 pipe->w_counter++;
45770- if (!pipe->writers++)
45771+ if (atomic_inc_return(&pipe->writers) == 1)
45772 wake_up_partner(inode);
45773
45774- if (!pipe->readers) {
45775+ if (!atomic_read(&pipe->readers)) {
45776 wait_for_partner(inode, &pipe->r_counter);
45777 if (signal_pending(current))
45778 goto err_wr;
fe2de317 45779@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
ae4e228f
MT
45780 */
45781 filp->f_op = &rdwr_pipefifo_fops;
45782
45783- pipe->readers++;
45784- pipe->writers++;
45785+ atomic_inc(&pipe->readers);
45786+ atomic_inc(&pipe->writers);
45787 pipe->r_counter++;
45788 pipe->w_counter++;
45789- if (pipe->readers == 1 || pipe->writers == 1)
45790+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
45791 wake_up_partner(inode);
45792 break;
45793
fe2de317 45794@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
ae4e228f
MT
45795 return 0;
45796
45797 err_rd:
45798- if (!--pipe->readers)
45799+ if (atomic_dec_and_test(&pipe->readers))
45800 wake_up_interruptible(&pipe->wait);
45801 ret = -ERESTARTSYS;
45802 goto err;
45803
45804 err_wr:
45805- if (!--pipe->writers)
45806+ if (atomic_dec_and_test(&pipe->writers))
45807 wake_up_interruptible(&pipe->wait);
45808 ret = -ERESTARTSYS;
45809 goto err;
45810
45811 err:
45812- if (!pipe->readers && !pipe->writers)
45813+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
45814 free_pipe_info(inode);
45815
45816 err_nocleanup:
fe2de317
MT
45817diff --git a/fs/file.c b/fs/file.c
45818index 4c6992d..104cdea 100644
45819--- a/fs/file.c
45820+++ b/fs/file.c
66a7e928 45821@@ -15,6 +15,7 @@
58c5fc13
MT
45822 #include <linux/slab.h>
45823 #include <linux/vmalloc.h>
45824 #include <linux/file.h>
45825+#include <linux/security.h>
45826 #include <linux/fdtable.h>
45827 #include <linux/bitops.h>
45828 #include <linux/interrupt.h>
fe2de317 45829@@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
58c5fc13
MT
45830 * N.B. For clone tasks sharing a files structure, this test
45831 * will limit the total number of files that can be opened.
45832 */
58c5fc13 45833+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
df50ba0c 45834 if (nr >= rlimit(RLIMIT_NOFILE))
58c5fc13
MT
45835 return -EMFILE;
45836
fe2de317 45837diff --git a/fs/filesystems.c b/fs/filesystems.c
5e856224 45838index 96f2428..f5eeb8e 100644
fe2de317
MT
45839--- a/fs/filesystems.c
45840+++ b/fs/filesystems.c
5e856224 45841@@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
71d190be
MT
45842 int len = dot ? dot - name : strlen(name);
45843
45844 fs = __get_fs_type(name, len);
45845+
45846+#ifdef CONFIG_GRKERNSEC_MODHARDEN
45847+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
45848+#else
45849 if (!fs && (request_module("%.*s", len, name) == 0))
45850+#endif
45851 fs = __get_fs_type(name, len);
45852
45853 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
fe2de317 45854diff --git a/fs/fs_struct.c b/fs/fs_struct.c
4c928ab7 45855index 78b519c..a8b4979 100644
fe2de317
MT
45856--- a/fs/fs_struct.c
45857+++ b/fs/fs_struct.c
45858@@ -4,6 +4,7 @@
45859 #include <linux/path.h>
45860 #include <linux/slab.h>
45861 #include <linux/fs_struct.h>
45862+#include <linux/grsecurity.h>
45863 #include "internal.h"
45864
45865 static inline void path_get_longterm(struct path *path)
45866@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
45867 old_root = fs->root;
45868 fs->root = *path;
45869 path_get_longterm(path);
45870+ gr_set_chroot_entries(current, path);
45871 write_seqcount_end(&fs->seq);
45872 spin_unlock(&fs->lock);
45873 if (old_root.dentry)
45874@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
45875 && fs->root.mnt == old_root->mnt) {
45876 path_get_longterm(new_root);
45877 fs->root = *new_root;
45878+ gr_set_chroot_entries(p, new_root);
45879 count++;
45880 }
45881 if (fs->pwd.dentry == old_root->dentry
45882@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
45883 spin_lock(&fs->lock);
45884 write_seqcount_begin(&fs->seq);
45885 tsk->fs = NULL;
45886- kill = !--fs->users;
45887+ gr_clear_chroot_entries(tsk);
45888+ kill = !atomic_dec_return(&fs->users);
45889 write_seqcount_end(&fs->seq);
45890 spin_unlock(&fs->lock);
45891 task_unlock(tsk);
45892@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
45893 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
45894 /* We don't need to lock fs - think why ;-) */
45895 if (fs) {
45896- fs->users = 1;
45897+ atomic_set(&fs->users, 1);
45898 fs->in_exec = 0;
45899 spin_lock_init(&fs->lock);
45900 seqcount_init(&fs->seq);
45901@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
45902 spin_lock(&old->lock);
45903 fs->root = old->root;
45904 path_get_longterm(&fs->root);
45905+ /* instead of calling gr_set_chroot_entries here,
45906+ we call it from every caller of this function
45907+ */
45908 fs->pwd = old->pwd;
45909 path_get_longterm(&fs->pwd);
45910 spin_unlock(&old->lock);
45911@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
45912
45913 task_lock(current);
45914 spin_lock(&fs->lock);
45915- kill = !--fs->users;
45916+ kill = !atomic_dec_return(&fs->users);
45917 current->fs = new_fs;
45918+ gr_set_chroot_entries(current, &new_fs->root);
45919 spin_unlock(&fs->lock);
45920 task_unlock(current);
45921
4c928ab7
MT
45922@@ -164,13 +172,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
45923
45924 int current_umask(void)
45925 {
45926- return current->fs->umask;
45927+ return current->fs->umask | gr_acl_umask();
45928 }
45929 EXPORT_SYMBOL(current_umask);
fe2de317
MT
45930
45931 /* to be mentioned only in INIT_TASK */
45932 struct fs_struct init_fs = {
45933- .users = 1,
45934+ .users = ATOMIC_INIT(1),
45935 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
45936 .seq = SEQCNT_ZERO,
45937 .umask = 0022,
45938@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
45939 task_lock(current);
45940
45941 spin_lock(&init_fs.lock);
45942- init_fs.users++;
45943+ atomic_inc(&init_fs.users);
45944 spin_unlock(&init_fs.lock);
45945
45946 spin_lock(&fs->lock);
45947 current->fs = &init_fs;
45948- kill = !--fs->users;
45949+ gr_set_chroot_entries(current, &current->fs->root);
45950+ kill = !atomic_dec_return(&fs->users);
45951 spin_unlock(&fs->lock);
45952
45953 task_unlock(current);
45954diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
45955index 9905350..02eaec4 100644
45956--- a/fs/fscache/cookie.c
45957+++ b/fs/fscache/cookie.c
45958@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
8308f9c9
MT
45959 parent ? (char *) parent->def->name : "<no-parent>",
45960 def->name, netfs_data);
45961
45962- fscache_stat(&fscache_n_acquires);
45963+ fscache_stat_unchecked(&fscache_n_acquires);
45964
45965 /* if there's no parent cookie, then we don't create one here either */
45966 if (!parent) {
45967- fscache_stat(&fscache_n_acquires_null);
45968+ fscache_stat_unchecked(&fscache_n_acquires_null);
45969 _leave(" [no parent]");
45970 return NULL;
45971 }
fe2de317 45972@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
8308f9c9
MT
45973 /* allocate and initialise a cookie */
45974 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
45975 if (!cookie) {
45976- fscache_stat(&fscache_n_acquires_oom);
45977+ fscache_stat_unchecked(&fscache_n_acquires_oom);
45978 _leave(" [ENOMEM]");
45979 return NULL;
45980 }
fe2de317 45981@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
8308f9c9
MT
45982
45983 switch (cookie->def->type) {
45984 case FSCACHE_COOKIE_TYPE_INDEX:
45985- fscache_stat(&fscache_n_cookie_index);
45986+ fscache_stat_unchecked(&fscache_n_cookie_index);
45987 break;
45988 case FSCACHE_COOKIE_TYPE_DATAFILE:
45989- fscache_stat(&fscache_n_cookie_data);
45990+ fscache_stat_unchecked(&fscache_n_cookie_data);
45991 break;
45992 default:
45993- fscache_stat(&fscache_n_cookie_special);
45994+ fscache_stat_unchecked(&fscache_n_cookie_special);
45995 break;
45996 }
45997
fe2de317 45998@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
8308f9c9
MT
45999 if (fscache_acquire_non_index_cookie(cookie) < 0) {
46000 atomic_dec(&parent->n_children);
46001 __fscache_cookie_put(cookie);
46002- fscache_stat(&fscache_n_acquires_nobufs);
46003+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
46004 _leave(" = NULL");
46005 return NULL;
46006 }
46007 }
46008
46009- fscache_stat(&fscache_n_acquires_ok);
46010+ fscache_stat_unchecked(&fscache_n_acquires_ok);
46011 _leave(" = %p", cookie);
46012 return cookie;
46013 }
fe2de317 46014@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
8308f9c9
MT
46015 cache = fscache_select_cache_for_object(cookie->parent);
46016 if (!cache) {
46017 up_read(&fscache_addremove_sem);
46018- fscache_stat(&fscache_n_acquires_no_cache);
46019+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
46020 _leave(" = -ENOMEDIUM [no cache]");
46021 return -ENOMEDIUM;
46022 }
fe2de317 46023@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
8308f9c9
MT
46024 object = cache->ops->alloc_object(cache, cookie);
46025 fscache_stat_d(&fscache_n_cop_alloc_object);
46026 if (IS_ERR(object)) {
46027- fscache_stat(&fscache_n_object_no_alloc);
46028+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
46029 ret = PTR_ERR(object);
46030 goto error;
46031 }
46032
46033- fscache_stat(&fscache_n_object_alloc);
46034+ fscache_stat_unchecked(&fscache_n_object_alloc);
46035
46036 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
46037
fe2de317 46038@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
8308f9c9
MT
46039 struct fscache_object *object;
46040 struct hlist_node *_p;
46041
46042- fscache_stat(&fscache_n_updates);
46043+ fscache_stat_unchecked(&fscache_n_updates);
46044
46045 if (!cookie) {
46046- fscache_stat(&fscache_n_updates_null);
46047+ fscache_stat_unchecked(&fscache_n_updates_null);
46048 _leave(" [no cookie]");
46049 return;
46050 }
fe2de317 46051@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
8308f9c9
MT
46052 struct fscache_object *object;
46053 unsigned long event;
46054
46055- fscache_stat(&fscache_n_relinquishes);
46056+ fscache_stat_unchecked(&fscache_n_relinquishes);
46057 if (retire)
46058- fscache_stat(&fscache_n_relinquishes_retire);
46059+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
46060
46061 if (!cookie) {
46062- fscache_stat(&fscache_n_relinquishes_null);
46063+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
46064 _leave(" [no cookie]");
46065 return;
46066 }
fe2de317 46067@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
8308f9c9
MT
46068
46069 /* wait for the cookie to finish being instantiated (or to fail) */
46070 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
46071- fscache_stat(&fscache_n_relinquishes_waitcrt);
46072+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
46073 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
46074 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
46075 }
fe2de317
MT
46076diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
46077index f6aad48..88dcf26 100644
46078--- a/fs/fscache/internal.h
46079+++ b/fs/fscache/internal.h
8308f9c9
MT
46080@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
46081 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
46082 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
46083
46084-extern atomic_t fscache_n_op_pend;
46085-extern atomic_t fscache_n_op_run;
46086-extern atomic_t fscache_n_op_enqueue;
46087-extern atomic_t fscache_n_op_deferred_release;
46088-extern atomic_t fscache_n_op_release;
46089-extern atomic_t fscache_n_op_gc;
46090-extern atomic_t fscache_n_op_cancelled;
46091-extern atomic_t fscache_n_op_rejected;
fe2de317
MT
46092+extern atomic_unchecked_t fscache_n_op_pend;
46093+extern atomic_unchecked_t fscache_n_op_run;
46094+extern atomic_unchecked_t fscache_n_op_enqueue;
46095+extern atomic_unchecked_t fscache_n_op_deferred_release;
46096+extern atomic_unchecked_t fscache_n_op_release;
46097+extern atomic_unchecked_t fscache_n_op_gc;
46098+extern atomic_unchecked_t fscache_n_op_cancelled;
46099+extern atomic_unchecked_t fscache_n_op_rejected;
46100
8308f9c9
MT
46101-extern atomic_t fscache_n_attr_changed;
46102-extern atomic_t fscache_n_attr_changed_ok;
46103-extern atomic_t fscache_n_attr_changed_nobufs;
46104-extern atomic_t fscache_n_attr_changed_nomem;
46105-extern atomic_t fscache_n_attr_changed_calls;
fe2de317
MT
46106+extern atomic_unchecked_t fscache_n_attr_changed;
46107+extern atomic_unchecked_t fscache_n_attr_changed_ok;
46108+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
46109+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
46110+extern atomic_unchecked_t fscache_n_attr_changed_calls;
46111
8308f9c9
MT
46112-extern atomic_t fscache_n_allocs;
46113-extern atomic_t fscache_n_allocs_ok;
46114-extern atomic_t fscache_n_allocs_wait;
46115-extern atomic_t fscache_n_allocs_nobufs;
46116-extern atomic_t fscache_n_allocs_intr;
46117-extern atomic_t fscache_n_allocs_object_dead;
46118-extern atomic_t fscache_n_alloc_ops;
46119-extern atomic_t fscache_n_alloc_op_waits;
fe2de317
MT
46120+extern atomic_unchecked_t fscache_n_allocs;
46121+extern atomic_unchecked_t fscache_n_allocs_ok;
46122+extern atomic_unchecked_t fscache_n_allocs_wait;
46123+extern atomic_unchecked_t fscache_n_allocs_nobufs;
46124+extern atomic_unchecked_t fscache_n_allocs_intr;
46125+extern atomic_unchecked_t fscache_n_allocs_object_dead;
46126+extern atomic_unchecked_t fscache_n_alloc_ops;
46127+extern atomic_unchecked_t fscache_n_alloc_op_waits;
46128
8308f9c9
MT
46129-extern atomic_t fscache_n_retrievals;
46130-extern atomic_t fscache_n_retrievals_ok;
46131-extern atomic_t fscache_n_retrievals_wait;
46132-extern atomic_t fscache_n_retrievals_nodata;
46133-extern atomic_t fscache_n_retrievals_nobufs;
46134-extern atomic_t fscache_n_retrievals_intr;
46135-extern atomic_t fscache_n_retrievals_nomem;
46136-extern atomic_t fscache_n_retrievals_object_dead;
46137-extern atomic_t fscache_n_retrieval_ops;
46138-extern atomic_t fscache_n_retrieval_op_waits;
8308f9c9
MT
46139+extern atomic_unchecked_t fscache_n_retrievals;
46140+extern atomic_unchecked_t fscache_n_retrievals_ok;
46141+extern atomic_unchecked_t fscache_n_retrievals_wait;
46142+extern atomic_unchecked_t fscache_n_retrievals_nodata;
46143+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
46144+extern atomic_unchecked_t fscache_n_retrievals_intr;
46145+extern atomic_unchecked_t fscache_n_retrievals_nomem;
46146+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
46147+extern atomic_unchecked_t fscache_n_retrieval_ops;
46148+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
fe2de317
MT
46149
46150-extern atomic_t fscache_n_stores;
46151-extern atomic_t fscache_n_stores_ok;
46152-extern atomic_t fscache_n_stores_again;
46153-extern atomic_t fscache_n_stores_nobufs;
46154-extern atomic_t fscache_n_stores_oom;
46155-extern atomic_t fscache_n_store_ops;
46156-extern atomic_t fscache_n_store_calls;
46157-extern atomic_t fscache_n_store_pages;
46158-extern atomic_t fscache_n_store_radix_deletes;
46159-extern atomic_t fscache_n_store_pages_over_limit;
8308f9c9
MT
46160+extern atomic_unchecked_t fscache_n_stores;
46161+extern atomic_unchecked_t fscache_n_stores_ok;
46162+extern atomic_unchecked_t fscache_n_stores_again;
46163+extern atomic_unchecked_t fscache_n_stores_nobufs;
46164+extern atomic_unchecked_t fscache_n_stores_oom;
46165+extern atomic_unchecked_t fscache_n_store_ops;
46166+extern atomic_unchecked_t fscache_n_store_calls;
46167+extern atomic_unchecked_t fscache_n_store_pages;
46168+extern atomic_unchecked_t fscache_n_store_radix_deletes;
46169+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
fe2de317
MT
46170
46171-extern atomic_t fscache_n_store_vmscan_not_storing;
46172-extern atomic_t fscache_n_store_vmscan_gone;
46173-extern atomic_t fscache_n_store_vmscan_busy;
46174-extern atomic_t fscache_n_store_vmscan_cancelled;
8308f9c9
MT
46175+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
46176+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
46177+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
46178+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
fe2de317
MT
46179
46180-extern atomic_t fscache_n_marks;
46181-extern atomic_t fscache_n_uncaches;
8308f9c9
MT
46182+extern atomic_unchecked_t fscache_n_marks;
46183+extern atomic_unchecked_t fscache_n_uncaches;
fe2de317
MT
46184
46185-extern atomic_t fscache_n_acquires;
46186-extern atomic_t fscache_n_acquires_null;
46187-extern atomic_t fscache_n_acquires_no_cache;
46188-extern atomic_t fscache_n_acquires_ok;
46189-extern atomic_t fscache_n_acquires_nobufs;
46190-extern atomic_t fscache_n_acquires_oom;
8308f9c9
MT
46191+extern atomic_unchecked_t fscache_n_acquires;
46192+extern atomic_unchecked_t fscache_n_acquires_null;
46193+extern atomic_unchecked_t fscache_n_acquires_no_cache;
46194+extern atomic_unchecked_t fscache_n_acquires_ok;
46195+extern atomic_unchecked_t fscache_n_acquires_nobufs;
46196+extern atomic_unchecked_t fscache_n_acquires_oom;
fe2de317
MT
46197
46198-extern atomic_t fscache_n_updates;
46199-extern atomic_t fscache_n_updates_null;
46200-extern atomic_t fscache_n_updates_run;
8308f9c9
MT
46201+extern atomic_unchecked_t fscache_n_updates;
46202+extern atomic_unchecked_t fscache_n_updates_null;
46203+extern atomic_unchecked_t fscache_n_updates_run;
fe2de317
MT
46204
46205-extern atomic_t fscache_n_relinquishes;
46206-extern atomic_t fscache_n_relinquishes_null;
46207-extern atomic_t fscache_n_relinquishes_waitcrt;
46208-extern atomic_t fscache_n_relinquishes_retire;
8308f9c9
MT
46209+extern atomic_unchecked_t fscache_n_relinquishes;
46210+extern atomic_unchecked_t fscache_n_relinquishes_null;
46211+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
46212+extern atomic_unchecked_t fscache_n_relinquishes_retire;
fe2de317
MT
46213
46214-extern atomic_t fscache_n_cookie_index;
46215-extern atomic_t fscache_n_cookie_data;
46216-extern atomic_t fscache_n_cookie_special;
8308f9c9
MT
46217+extern atomic_unchecked_t fscache_n_cookie_index;
46218+extern atomic_unchecked_t fscache_n_cookie_data;
46219+extern atomic_unchecked_t fscache_n_cookie_special;
fe2de317
MT
46220
46221-extern atomic_t fscache_n_object_alloc;
46222-extern atomic_t fscache_n_object_no_alloc;
46223-extern atomic_t fscache_n_object_lookups;
46224-extern atomic_t fscache_n_object_lookups_negative;
46225-extern atomic_t fscache_n_object_lookups_positive;
46226-extern atomic_t fscache_n_object_lookups_timed_out;
46227-extern atomic_t fscache_n_object_created;
46228-extern atomic_t fscache_n_object_avail;
46229-extern atomic_t fscache_n_object_dead;
8308f9c9
MT
46230+extern atomic_unchecked_t fscache_n_object_alloc;
46231+extern atomic_unchecked_t fscache_n_object_no_alloc;
46232+extern atomic_unchecked_t fscache_n_object_lookups;
46233+extern atomic_unchecked_t fscache_n_object_lookups_negative;
46234+extern atomic_unchecked_t fscache_n_object_lookups_positive;
46235+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
46236+extern atomic_unchecked_t fscache_n_object_created;
46237+extern atomic_unchecked_t fscache_n_object_avail;
46238+extern atomic_unchecked_t fscache_n_object_dead;
fe2de317
MT
46239
46240-extern atomic_t fscache_n_checkaux_none;
46241-extern atomic_t fscache_n_checkaux_okay;
46242-extern atomic_t fscache_n_checkaux_update;
46243-extern atomic_t fscache_n_checkaux_obsolete;
8308f9c9
MT
46244+extern atomic_unchecked_t fscache_n_checkaux_none;
46245+extern atomic_unchecked_t fscache_n_checkaux_okay;
46246+extern atomic_unchecked_t fscache_n_checkaux_update;
46247+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
46248
46249 extern atomic_t fscache_n_cop_alloc_object;
46250 extern atomic_t fscache_n_cop_lookup_object;
fe2de317 46251@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
8308f9c9
MT
46252 atomic_inc(stat);
46253 }
46254
46255+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
46256+{
46257+ atomic_inc_unchecked(stat);
46258+}
46259+
46260 static inline void fscache_stat_d(atomic_t *stat)
46261 {
46262 atomic_dec(stat);
fe2de317 46263@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
66a7e928
MT
46264
46265 #define __fscache_stat(stat) (NULL)
46266 #define fscache_stat(stat) do {} while (0)
46267+#define fscache_stat_unchecked(stat) do {} while (0)
46268 #define fscache_stat_d(stat) do {} while (0)
46269 #endif
46270
fe2de317
MT
46271diff --git a/fs/fscache/object.c b/fs/fscache/object.c
46272index b6b897c..0ffff9c 100644
46273--- a/fs/fscache/object.c
46274+++ b/fs/fscache/object.c
46275@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
8308f9c9
MT
46276 /* update the object metadata on disk */
46277 case FSCACHE_OBJECT_UPDATING:
46278 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
46279- fscache_stat(&fscache_n_updates_run);
46280+ fscache_stat_unchecked(&fscache_n_updates_run);
46281 fscache_stat(&fscache_n_cop_update_object);
46282 object->cache->ops->update_object(object);
46283 fscache_stat_d(&fscache_n_cop_update_object);
fe2de317 46284@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
8308f9c9
MT
46285 spin_lock(&object->lock);
46286 object->state = FSCACHE_OBJECT_DEAD;
46287 spin_unlock(&object->lock);
46288- fscache_stat(&fscache_n_object_dead);
46289+ fscache_stat_unchecked(&fscache_n_object_dead);
46290 goto terminal_transit;
46291
46292 /* handle the parent cache of this object being withdrawn from
fe2de317 46293@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
8308f9c9
MT
46294 spin_lock(&object->lock);
46295 object->state = FSCACHE_OBJECT_DEAD;
46296 spin_unlock(&object->lock);
46297- fscache_stat(&fscache_n_object_dead);
46298+ fscache_stat_unchecked(&fscache_n_object_dead);
46299 goto terminal_transit;
46300
46301 /* complain about the object being woken up once it is
fe2de317 46302@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
8308f9c9
MT
46303 parent->cookie->def->name, cookie->def->name,
46304 object->cache->tag->name);
46305
46306- fscache_stat(&fscache_n_object_lookups);
46307+ fscache_stat_unchecked(&fscache_n_object_lookups);
46308 fscache_stat(&fscache_n_cop_lookup_object);
46309 ret = object->cache->ops->lookup_object(object);
46310 fscache_stat_d(&fscache_n_cop_lookup_object);
fe2de317 46311@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
8308f9c9
MT
46312 if (ret == -ETIMEDOUT) {
46313 /* probably stuck behind another object, so move this one to
46314 * the back of the queue */
46315- fscache_stat(&fscache_n_object_lookups_timed_out);
46316+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
46317 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
46318 }
46319
fe2de317 46320@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
8308f9c9
MT
46321
46322 spin_lock(&object->lock);
46323 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
46324- fscache_stat(&fscache_n_object_lookups_negative);
46325+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
46326
46327 /* transit here to allow write requests to begin stacking up
46328 * and read requests to begin returning ENODATA */
fe2de317 46329@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
8308f9c9
MT
46330 * result, in which case there may be data available */
46331 spin_lock(&object->lock);
46332 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
46333- fscache_stat(&fscache_n_object_lookups_positive);
46334+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
46335
46336 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
46337
fe2de317 46338@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
8308f9c9
MT
46339 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
46340 } else {
46341 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
46342- fscache_stat(&fscache_n_object_created);
46343+ fscache_stat_unchecked(&fscache_n_object_created);
46344
46345 object->state = FSCACHE_OBJECT_AVAILABLE;
46346 spin_unlock(&object->lock);
fe2de317 46347@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
8308f9c9
MT
46348 fscache_enqueue_dependents(object);
46349
46350 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
46351- fscache_stat(&fscache_n_object_avail);
46352+ fscache_stat_unchecked(&fscache_n_object_avail);
46353
46354 _leave("");
46355 }
fe2de317 46356@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
8308f9c9
MT
46357 enum fscache_checkaux result;
46358
46359 if (!object->cookie->def->check_aux) {
46360- fscache_stat(&fscache_n_checkaux_none);
46361+ fscache_stat_unchecked(&fscache_n_checkaux_none);
46362 return FSCACHE_CHECKAUX_OKAY;
46363 }
46364
fe2de317 46365@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
8308f9c9
MT
46366 switch (result) {
46367 /* entry okay as is */
46368 case FSCACHE_CHECKAUX_OKAY:
46369- fscache_stat(&fscache_n_checkaux_okay);
46370+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
46371 break;
46372
46373 /* entry requires update */
46374 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
46375- fscache_stat(&fscache_n_checkaux_update);
46376+ fscache_stat_unchecked(&fscache_n_checkaux_update);
46377 break;
46378
46379 /* entry requires deletion */
46380 case FSCACHE_CHECKAUX_OBSOLETE:
46381- fscache_stat(&fscache_n_checkaux_obsolete);
46382+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
46383 break;
46384
46385 default:
fe2de317
MT
46386diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
46387index 30afdfa..2256596 100644
46388--- a/fs/fscache/operation.c
46389+++ b/fs/fscache/operation.c
8308f9c9
MT
46390@@ -17,7 +17,7 @@
46391 #include <linux/slab.h>
46392 #include "internal.h"
46393
46394-atomic_t fscache_op_debug_id;
46395+atomic_unchecked_t fscache_op_debug_id;
46396 EXPORT_SYMBOL(fscache_op_debug_id);
46397
46398 /**
fe2de317 46399@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
8308f9c9
MT
46400 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
46401 ASSERTCMP(atomic_read(&op->usage), >, 0);
46402
46403- fscache_stat(&fscache_n_op_enqueue);
46404+ fscache_stat_unchecked(&fscache_n_op_enqueue);
46405 switch (op->flags & FSCACHE_OP_TYPE) {
46406 case FSCACHE_OP_ASYNC:
46407 _debug("queue async");
fe2de317 46408@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
8308f9c9
MT
46409 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
46410 if (op->processor)
46411 fscache_enqueue_operation(op);
46412- fscache_stat(&fscache_n_op_run);
46413+ fscache_stat_unchecked(&fscache_n_op_run);
46414 }
46415
46416 /*
fe2de317 46417@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
8308f9c9
MT
46418 if (object->n_ops > 1) {
46419 atomic_inc(&op->usage);
46420 list_add_tail(&op->pend_link, &object->pending_ops);
46421- fscache_stat(&fscache_n_op_pend);
46422+ fscache_stat_unchecked(&fscache_n_op_pend);
46423 } else if (!list_empty(&object->pending_ops)) {
46424 atomic_inc(&op->usage);
46425 list_add_tail(&op->pend_link, &object->pending_ops);
46426- fscache_stat(&fscache_n_op_pend);
46427+ fscache_stat_unchecked(&fscache_n_op_pend);
46428 fscache_start_operations(object);
46429 } else {
46430 ASSERTCMP(object->n_in_progress, ==, 0);
fe2de317 46431@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
8308f9c9
MT
46432 object->n_exclusive++; /* reads and writes must wait */
46433 atomic_inc(&op->usage);
46434 list_add_tail(&op->pend_link, &object->pending_ops);
46435- fscache_stat(&fscache_n_op_pend);
46436+ fscache_stat_unchecked(&fscache_n_op_pend);
46437 ret = 0;
46438 } else {
46439 /* not allowed to submit ops in any other state */
fe2de317 46440@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
8308f9c9
MT
46441 if (object->n_exclusive > 0) {
46442 atomic_inc(&op->usage);
46443 list_add_tail(&op->pend_link, &object->pending_ops);
46444- fscache_stat(&fscache_n_op_pend);
46445+ fscache_stat_unchecked(&fscache_n_op_pend);
46446 } else if (!list_empty(&object->pending_ops)) {
46447 atomic_inc(&op->usage);
46448 list_add_tail(&op->pend_link, &object->pending_ops);
46449- fscache_stat(&fscache_n_op_pend);
46450+ fscache_stat_unchecked(&fscache_n_op_pend);
46451 fscache_start_operations(object);
46452 } else {
46453 ASSERTCMP(object->n_exclusive, ==, 0);
fe2de317 46454@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
8308f9c9
MT
46455 object->n_ops++;
46456 atomic_inc(&op->usage);
46457 list_add_tail(&op->pend_link, &object->pending_ops);
46458- fscache_stat(&fscache_n_op_pend);
46459+ fscache_stat_unchecked(&fscache_n_op_pend);
46460 ret = 0;
46461 } else if (object->state == FSCACHE_OBJECT_DYING ||
46462 object->state == FSCACHE_OBJECT_LC_DYING ||
46463 object->state == FSCACHE_OBJECT_WITHDRAWING) {
46464- fscache_stat(&fscache_n_op_rejected);
46465+ fscache_stat_unchecked(&fscache_n_op_rejected);
46466 ret = -ENOBUFS;
46467 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
46468 fscache_report_unexpected_submission(object, op, ostate);
fe2de317 46469@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
8308f9c9
MT
46470
46471 ret = -EBUSY;
46472 if (!list_empty(&op->pend_link)) {
46473- fscache_stat(&fscache_n_op_cancelled);
46474+ fscache_stat_unchecked(&fscache_n_op_cancelled);
46475 list_del_init(&op->pend_link);
46476 object->n_ops--;
46477 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
fe2de317 46478@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
8308f9c9
MT
46479 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
46480 BUG();
46481
46482- fscache_stat(&fscache_n_op_release);
46483+ fscache_stat_unchecked(&fscache_n_op_release);
46484
46485 if (op->release) {
46486 op->release(op);
fe2de317 46487@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
8308f9c9
MT
46488 * lock, and defer it otherwise */
46489 if (!spin_trylock(&object->lock)) {
46490 _debug("defer put");
46491- fscache_stat(&fscache_n_op_deferred_release);
46492+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
46493
46494 cache = object->cache;
46495 spin_lock(&cache->op_gc_list_lock);
fe2de317 46496@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
8308f9c9
MT
46497
46498 _debug("GC DEFERRED REL OBJ%x OP%x",
46499 object->debug_id, op->debug_id);
46500- fscache_stat(&fscache_n_op_gc);
46501+ fscache_stat_unchecked(&fscache_n_op_gc);
46502
46503 ASSERTCMP(atomic_read(&op->usage), ==, 0);
46504
fe2de317
MT
46505diff --git a/fs/fscache/page.c b/fs/fscache/page.c
46506index 3f7a59b..cf196cc 100644
46507--- a/fs/fscache/page.c
46508+++ b/fs/fscache/page.c
46509@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
8308f9c9
MT
46510 val = radix_tree_lookup(&cookie->stores, page->index);
46511 if (!val) {
46512 rcu_read_unlock();
46513- fscache_stat(&fscache_n_store_vmscan_not_storing);
46514+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
46515 __fscache_uncache_page(cookie, page);
46516 return true;
46517 }
fe2de317 46518@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
8308f9c9
MT
46519 spin_unlock(&cookie->stores_lock);
46520
46521 if (xpage) {
46522- fscache_stat(&fscache_n_store_vmscan_cancelled);
46523- fscache_stat(&fscache_n_store_radix_deletes);
46524+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
46525+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
46526 ASSERTCMP(xpage, ==, page);
46527 } else {
46528- fscache_stat(&fscache_n_store_vmscan_gone);
46529+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
46530 }
46531
46532 wake_up_bit(&cookie->flags, 0);
46533@@ -107,7 +107,7 @@ page_busy:
46534 /* we might want to wait here, but that could deadlock the allocator as
46535 * the work threads writing to the cache may all end up sleeping
46536 * on memory allocation */
46537- fscache_stat(&fscache_n_store_vmscan_busy);
46538+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
46539 return false;
46540 }
46541 EXPORT_SYMBOL(__fscache_maybe_release_page);
fe2de317 46542@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
8308f9c9
MT
46543 FSCACHE_COOKIE_STORING_TAG);
46544 if (!radix_tree_tag_get(&cookie->stores, page->index,
46545 FSCACHE_COOKIE_PENDING_TAG)) {
46546- fscache_stat(&fscache_n_store_radix_deletes);
46547+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
46548 xpage = radix_tree_delete(&cookie->stores, page->index);
46549 }
46550 spin_unlock(&cookie->stores_lock);
fe2de317 46551@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
8308f9c9
MT
46552
46553 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
46554
46555- fscache_stat(&fscache_n_attr_changed_calls);
46556+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
46557
46558 if (fscache_object_is_active(object)) {
15a11c5b 46559 fscache_stat(&fscache_n_cop_attr_changed);
fe2de317 46560@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
8308f9c9
MT
46561
46562 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46563
46564- fscache_stat(&fscache_n_attr_changed);
46565+ fscache_stat_unchecked(&fscache_n_attr_changed);
46566
46567 op = kzalloc(sizeof(*op), GFP_KERNEL);
46568 if (!op) {
46569- fscache_stat(&fscache_n_attr_changed_nomem);
46570+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
46571 _leave(" = -ENOMEM");
46572 return -ENOMEM;
46573 }
fe2de317 46574@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
8308f9c9
MT
46575 if (fscache_submit_exclusive_op(object, op) < 0)
46576 goto nobufs;
46577 spin_unlock(&cookie->lock);
46578- fscache_stat(&fscache_n_attr_changed_ok);
46579+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
46580 fscache_put_operation(op);
46581 _leave(" = 0");
46582 return 0;
fe2de317 46583@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
8308f9c9
MT
46584 nobufs:
46585 spin_unlock(&cookie->lock);
46586 kfree(op);
46587- fscache_stat(&fscache_n_attr_changed_nobufs);
46588+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
46589 _leave(" = %d", -ENOBUFS);
46590 return -ENOBUFS;
46591 }
fe2de317 46592@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
8308f9c9
MT
46593 /* allocate a retrieval operation and attempt to submit it */
46594 op = kzalloc(sizeof(*op), GFP_NOIO);
46595 if (!op) {
46596- fscache_stat(&fscache_n_retrievals_nomem);
46597+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
46598 return NULL;
46599 }
46600
fe2de317 46601@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
8308f9c9
MT
46602 return 0;
46603 }
46604
46605- fscache_stat(&fscache_n_retrievals_wait);
46606+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
46607
46608 jif = jiffies;
46609 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
46610 fscache_wait_bit_interruptible,
46611 TASK_INTERRUPTIBLE) != 0) {
46612- fscache_stat(&fscache_n_retrievals_intr);
46613+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
46614 _leave(" = -ERESTARTSYS");
46615 return -ERESTARTSYS;
46616 }
fe2de317 46617@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
8308f9c9
MT
46618 */
46619 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
46620 struct fscache_retrieval *op,
46621- atomic_t *stat_op_waits,
46622- atomic_t *stat_object_dead)
46623+ atomic_unchecked_t *stat_op_waits,
46624+ atomic_unchecked_t *stat_object_dead)
46625 {
46626 int ret;
46627
fe2de317 46628@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
8308f9c9
MT
46629 goto check_if_dead;
46630
46631 _debug(">>> WT");
46632- fscache_stat(stat_op_waits);
46633+ fscache_stat_unchecked(stat_op_waits);
46634 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
46635 fscache_wait_bit_interruptible,
46636 TASK_INTERRUPTIBLE) < 0) {
fe2de317 46637@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
8308f9c9
MT
46638
46639 check_if_dead:
46640 if (unlikely(fscache_object_is_dead(object))) {
46641- fscache_stat(stat_object_dead);
46642+ fscache_stat_unchecked(stat_object_dead);
46643 return -ENOBUFS;
46644 }
46645 return 0;
fe2de317 46646@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
8308f9c9
MT
46647
46648 _enter("%p,%p,,,", cookie, page);
46649
46650- fscache_stat(&fscache_n_retrievals);
46651+ fscache_stat_unchecked(&fscache_n_retrievals);
46652
46653 if (hlist_empty(&cookie->backing_objects))
46654 goto nobufs;
fe2de317 46655@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
8308f9c9
MT
46656 goto nobufs_unlock;
46657 spin_unlock(&cookie->lock);
46658
46659- fscache_stat(&fscache_n_retrieval_ops);
46660+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
46661
46662 /* pin the netfs read context in case we need to do the actual netfs
46663 * read because we've encountered a cache read failure */
fe2de317 46664@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
8308f9c9
MT
46665
46666 error:
46667 if (ret == -ENOMEM)
46668- fscache_stat(&fscache_n_retrievals_nomem);
46669+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
46670 else if (ret == -ERESTARTSYS)
46671- fscache_stat(&fscache_n_retrievals_intr);
46672+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
46673 else if (ret == -ENODATA)
46674- fscache_stat(&fscache_n_retrievals_nodata);
46675+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
46676 else if (ret < 0)
46677- fscache_stat(&fscache_n_retrievals_nobufs);
46678+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46679 else
46680- fscache_stat(&fscache_n_retrievals_ok);
46681+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
46682
46683 fscache_put_retrieval(op);
46684 _leave(" = %d", ret);
15a11c5b 46685@@ -429,7 +429,7 @@ nobufs_unlock:
8308f9c9
MT
46686 spin_unlock(&cookie->lock);
46687 kfree(op);
46688 nobufs:
46689- fscache_stat(&fscache_n_retrievals_nobufs);
46690+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46691 _leave(" = -ENOBUFS");
46692 return -ENOBUFS;
46693 }
fe2de317 46694@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
8308f9c9
MT
46695
46696 _enter("%p,,%d,,,", cookie, *nr_pages);
46697
46698- fscache_stat(&fscache_n_retrievals);
46699+ fscache_stat_unchecked(&fscache_n_retrievals);
46700
46701 if (hlist_empty(&cookie->backing_objects))
46702 goto nobufs;
fe2de317 46703@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
8308f9c9
MT
46704 goto nobufs_unlock;
46705 spin_unlock(&cookie->lock);
46706
46707- fscache_stat(&fscache_n_retrieval_ops);
46708+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
46709
46710 /* pin the netfs read context in case we need to do the actual netfs
46711 * read because we've encountered a cache read failure */
fe2de317 46712@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
8308f9c9
MT
46713
46714 error:
46715 if (ret == -ENOMEM)
46716- fscache_stat(&fscache_n_retrievals_nomem);
46717+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
46718 else if (ret == -ERESTARTSYS)
46719- fscache_stat(&fscache_n_retrievals_intr);
46720+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
46721 else if (ret == -ENODATA)
46722- fscache_stat(&fscache_n_retrievals_nodata);
46723+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
46724 else if (ret < 0)
46725- fscache_stat(&fscache_n_retrievals_nobufs);
46726+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46727 else
46728- fscache_stat(&fscache_n_retrievals_ok);
46729+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
46730
46731 fscache_put_retrieval(op);
46732 _leave(" = %d", ret);
15a11c5b 46733@@ -545,7 +545,7 @@ nobufs_unlock:
8308f9c9
MT
46734 spin_unlock(&cookie->lock);
46735 kfree(op);
46736 nobufs:
46737- fscache_stat(&fscache_n_retrievals_nobufs);
46738+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46739 _leave(" = -ENOBUFS");
46740 return -ENOBUFS;
46741 }
fe2de317 46742@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
8308f9c9
MT
46743
46744 _enter("%p,%p,,,", cookie, page);
46745
46746- fscache_stat(&fscache_n_allocs);
46747+ fscache_stat_unchecked(&fscache_n_allocs);
46748
46749 if (hlist_empty(&cookie->backing_objects))
46750 goto nobufs;
fe2de317 46751@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
8308f9c9
MT
46752 goto nobufs_unlock;
46753 spin_unlock(&cookie->lock);
46754
46755- fscache_stat(&fscache_n_alloc_ops);
46756+ fscache_stat_unchecked(&fscache_n_alloc_ops);
46757
46758 ret = fscache_wait_for_retrieval_activation(
46759 object, op,
fe2de317 46760@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
8308f9c9
MT
46761
46762 error:
46763 if (ret == -ERESTARTSYS)
46764- fscache_stat(&fscache_n_allocs_intr);
46765+ fscache_stat_unchecked(&fscache_n_allocs_intr);
46766 else if (ret < 0)
46767- fscache_stat(&fscache_n_allocs_nobufs);
46768+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
46769 else
46770- fscache_stat(&fscache_n_allocs_ok);
46771+ fscache_stat_unchecked(&fscache_n_allocs_ok);
46772
46773 fscache_put_retrieval(op);
46774 _leave(" = %d", ret);
15a11c5b 46775@@ -625,7 +625,7 @@ nobufs_unlock:
8308f9c9
MT
46776 spin_unlock(&cookie->lock);
46777 kfree(op);
46778 nobufs:
46779- fscache_stat(&fscache_n_allocs_nobufs);
46780+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
46781 _leave(" = -ENOBUFS");
46782 return -ENOBUFS;
46783 }
fe2de317 46784@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
8308f9c9
MT
46785
46786 spin_lock(&cookie->stores_lock);
46787
46788- fscache_stat(&fscache_n_store_calls);
46789+ fscache_stat_unchecked(&fscache_n_store_calls);
46790
46791 /* find a page to store */
46792 page = NULL;
fe2de317 46793@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
8308f9c9
MT
46794 page = results[0];
46795 _debug("gang %d [%lx]", n, page->index);
46796 if (page->index > op->store_limit) {
46797- fscache_stat(&fscache_n_store_pages_over_limit);
46798+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
46799 goto superseded;
46800 }
46801
fe2de317 46802@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
15a11c5b 46803 spin_unlock(&cookie->stores_lock);
8308f9c9
MT
46804 spin_unlock(&object->lock);
46805
8308f9c9
MT
46806- fscache_stat(&fscache_n_store_pages);
46807+ fscache_stat_unchecked(&fscache_n_store_pages);
46808 fscache_stat(&fscache_n_cop_write_page);
46809 ret = object->cache->ops->write_page(op, page);
46810 fscache_stat_d(&fscache_n_cop_write_page);
fe2de317 46811@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
8308f9c9
MT
46812 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46813 ASSERT(PageFsCache(page));
46814
46815- fscache_stat(&fscache_n_stores);
46816+ fscache_stat_unchecked(&fscache_n_stores);
46817
46818 op = kzalloc(sizeof(*op), GFP_NOIO);
46819 if (!op)
fe2de317 46820@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
8308f9c9
MT
46821 spin_unlock(&cookie->stores_lock);
46822 spin_unlock(&object->lock);
46823
46824- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
46825+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
46826 op->store_limit = object->store_limit;
46827
46828 if (fscache_submit_op(object, &op->op) < 0)
fe2de317 46829@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
8308f9c9
MT
46830
46831 spin_unlock(&cookie->lock);
46832 radix_tree_preload_end();
46833- fscache_stat(&fscache_n_store_ops);
46834- fscache_stat(&fscache_n_stores_ok);
46835+ fscache_stat_unchecked(&fscache_n_store_ops);
46836+ fscache_stat_unchecked(&fscache_n_stores_ok);
46837
46838 /* the work queue now carries its own ref on the object */
46839 fscache_put_operation(&op->op);
fe2de317 46840@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
8308f9c9
MT
46841 return 0;
46842
46843 already_queued:
46844- fscache_stat(&fscache_n_stores_again);
46845+ fscache_stat_unchecked(&fscache_n_stores_again);
46846 already_pending:
46847 spin_unlock(&cookie->stores_lock);
46848 spin_unlock(&object->lock);
46849 spin_unlock(&cookie->lock);
46850 radix_tree_preload_end();
46851 kfree(op);
46852- fscache_stat(&fscache_n_stores_ok);
46853+ fscache_stat_unchecked(&fscache_n_stores_ok);
46854 _leave(" = 0");
46855 return 0;
46856
15a11c5b 46857@@ -851,14 +851,14 @@ nobufs:
8308f9c9
MT
46858 spin_unlock(&cookie->lock);
46859 radix_tree_preload_end();
46860 kfree(op);
46861- fscache_stat(&fscache_n_stores_nobufs);
46862+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
46863 _leave(" = -ENOBUFS");
46864 return -ENOBUFS;
46865
46866 nomem_free:
46867 kfree(op);
46868 nomem:
46869- fscache_stat(&fscache_n_stores_oom);
46870+ fscache_stat_unchecked(&fscache_n_stores_oom);
46871 _leave(" = -ENOMEM");
46872 return -ENOMEM;
46873 }
fe2de317 46874@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
8308f9c9
MT
46875 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46876 ASSERTCMP(page, !=, NULL);
46877
46878- fscache_stat(&fscache_n_uncaches);
46879+ fscache_stat_unchecked(&fscache_n_uncaches);
46880
46881 /* cache withdrawal may beat us to it */
46882 if (!PageFsCache(page))
fe2de317 46883@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
8308f9c9
MT
46884 unsigned long loop;
46885
46886 #ifdef CONFIG_FSCACHE_STATS
46887- atomic_add(pagevec->nr, &fscache_n_marks);
46888+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
46889 #endif
46890
46891 for (loop = 0; loop < pagevec->nr; loop++) {
fe2de317
MT
46892diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
46893index 4765190..2a067f2 100644
46894--- a/fs/fscache/stats.c
46895+++ b/fs/fscache/stats.c
8308f9c9
MT
46896@@ -18,95 +18,95 @@
46897 /*
46898 * operation counters
46899 */
46900-atomic_t fscache_n_op_pend;
46901-atomic_t fscache_n_op_run;
46902-atomic_t fscache_n_op_enqueue;
46903-atomic_t fscache_n_op_requeue;
46904-atomic_t fscache_n_op_deferred_release;
46905-atomic_t fscache_n_op_release;
46906-atomic_t fscache_n_op_gc;
46907-atomic_t fscache_n_op_cancelled;
46908-atomic_t fscache_n_op_rejected;
fe2de317
MT
46909+atomic_unchecked_t fscache_n_op_pend;
46910+atomic_unchecked_t fscache_n_op_run;
46911+atomic_unchecked_t fscache_n_op_enqueue;
46912+atomic_unchecked_t fscache_n_op_requeue;
46913+atomic_unchecked_t fscache_n_op_deferred_release;
46914+atomic_unchecked_t fscache_n_op_release;
46915+atomic_unchecked_t fscache_n_op_gc;
46916+atomic_unchecked_t fscache_n_op_cancelled;
46917+atomic_unchecked_t fscache_n_op_rejected;
46918
8308f9c9
MT
46919-atomic_t fscache_n_attr_changed;
46920-atomic_t fscache_n_attr_changed_ok;
46921-atomic_t fscache_n_attr_changed_nobufs;
46922-atomic_t fscache_n_attr_changed_nomem;
46923-atomic_t fscache_n_attr_changed_calls;
fe2de317
MT
46924+atomic_unchecked_t fscache_n_attr_changed;
46925+atomic_unchecked_t fscache_n_attr_changed_ok;
46926+atomic_unchecked_t fscache_n_attr_changed_nobufs;
46927+atomic_unchecked_t fscache_n_attr_changed_nomem;
46928+atomic_unchecked_t fscache_n_attr_changed_calls;
46929
8308f9c9
MT
46930-atomic_t fscache_n_allocs;
46931-atomic_t fscache_n_allocs_ok;
46932-atomic_t fscache_n_allocs_wait;
46933-atomic_t fscache_n_allocs_nobufs;
46934-atomic_t fscache_n_allocs_intr;
46935-atomic_t fscache_n_allocs_object_dead;
46936-atomic_t fscache_n_alloc_ops;
46937-atomic_t fscache_n_alloc_op_waits;
fe2de317
MT
46938+atomic_unchecked_t fscache_n_allocs;
46939+atomic_unchecked_t fscache_n_allocs_ok;
46940+atomic_unchecked_t fscache_n_allocs_wait;
46941+atomic_unchecked_t fscache_n_allocs_nobufs;
46942+atomic_unchecked_t fscache_n_allocs_intr;
46943+atomic_unchecked_t fscache_n_allocs_object_dead;
46944+atomic_unchecked_t fscache_n_alloc_ops;
46945+atomic_unchecked_t fscache_n_alloc_op_waits;
46946
8308f9c9
MT
46947-atomic_t fscache_n_retrievals;
46948-atomic_t fscache_n_retrievals_ok;
46949-atomic_t fscache_n_retrievals_wait;
46950-atomic_t fscache_n_retrievals_nodata;
46951-atomic_t fscache_n_retrievals_nobufs;
46952-atomic_t fscache_n_retrievals_intr;
46953-atomic_t fscache_n_retrievals_nomem;
46954-atomic_t fscache_n_retrievals_object_dead;
46955-atomic_t fscache_n_retrieval_ops;
46956-atomic_t fscache_n_retrieval_op_waits;
8308f9c9
MT
46957+atomic_unchecked_t fscache_n_retrievals;
46958+atomic_unchecked_t fscache_n_retrievals_ok;
46959+atomic_unchecked_t fscache_n_retrievals_wait;
46960+atomic_unchecked_t fscache_n_retrievals_nodata;
46961+atomic_unchecked_t fscache_n_retrievals_nobufs;
46962+atomic_unchecked_t fscache_n_retrievals_intr;
46963+atomic_unchecked_t fscache_n_retrievals_nomem;
46964+atomic_unchecked_t fscache_n_retrievals_object_dead;
46965+atomic_unchecked_t fscache_n_retrieval_ops;
46966+atomic_unchecked_t fscache_n_retrieval_op_waits;
fe2de317
MT
46967
46968-atomic_t fscache_n_stores;
46969-atomic_t fscache_n_stores_ok;
46970-atomic_t fscache_n_stores_again;
46971-atomic_t fscache_n_stores_nobufs;
46972-atomic_t fscache_n_stores_oom;
46973-atomic_t fscache_n_store_ops;
46974-atomic_t fscache_n_store_calls;
46975-atomic_t fscache_n_store_pages;
46976-atomic_t fscache_n_store_radix_deletes;
46977-atomic_t fscache_n_store_pages_over_limit;
8308f9c9
MT
46978+atomic_unchecked_t fscache_n_stores;
46979+atomic_unchecked_t fscache_n_stores_ok;
46980+atomic_unchecked_t fscache_n_stores_again;
46981+atomic_unchecked_t fscache_n_stores_nobufs;
46982+atomic_unchecked_t fscache_n_stores_oom;
46983+atomic_unchecked_t fscache_n_store_ops;
46984+atomic_unchecked_t fscache_n_store_calls;
46985+atomic_unchecked_t fscache_n_store_pages;
46986+atomic_unchecked_t fscache_n_store_radix_deletes;
46987+atomic_unchecked_t fscache_n_store_pages_over_limit;
fe2de317
MT
46988
46989-atomic_t fscache_n_store_vmscan_not_storing;
46990-atomic_t fscache_n_store_vmscan_gone;
46991-atomic_t fscache_n_store_vmscan_busy;
46992-atomic_t fscache_n_store_vmscan_cancelled;
8308f9c9
MT
46993+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
46994+atomic_unchecked_t fscache_n_store_vmscan_gone;
46995+atomic_unchecked_t fscache_n_store_vmscan_busy;
46996+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
fe2de317
MT
46997
46998-atomic_t fscache_n_marks;
46999-atomic_t fscache_n_uncaches;
8308f9c9
MT
47000+atomic_unchecked_t fscache_n_marks;
47001+atomic_unchecked_t fscache_n_uncaches;
fe2de317
MT
47002
47003-atomic_t fscache_n_acquires;
47004-atomic_t fscache_n_acquires_null;
47005-atomic_t fscache_n_acquires_no_cache;
47006-atomic_t fscache_n_acquires_ok;
47007-atomic_t fscache_n_acquires_nobufs;
47008-atomic_t fscache_n_acquires_oom;
8308f9c9
MT
47009+atomic_unchecked_t fscache_n_acquires;
47010+atomic_unchecked_t fscache_n_acquires_null;
47011+atomic_unchecked_t fscache_n_acquires_no_cache;
47012+atomic_unchecked_t fscache_n_acquires_ok;
47013+atomic_unchecked_t fscache_n_acquires_nobufs;
47014+atomic_unchecked_t fscache_n_acquires_oom;
fe2de317
MT
47015
47016-atomic_t fscache_n_updates;
47017-atomic_t fscache_n_updates_null;
47018-atomic_t fscache_n_updates_run;
8308f9c9
MT
47019+atomic_unchecked_t fscache_n_updates;
47020+atomic_unchecked_t fscache_n_updates_null;
47021+atomic_unchecked_t fscache_n_updates_run;
fe2de317
MT
47022
47023-atomic_t fscache_n_relinquishes;
47024-atomic_t fscache_n_relinquishes_null;
47025-atomic_t fscache_n_relinquishes_waitcrt;
47026-atomic_t fscache_n_relinquishes_retire;
8308f9c9
MT
47027+atomic_unchecked_t fscache_n_relinquishes;
47028+atomic_unchecked_t fscache_n_relinquishes_null;
47029+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
47030+atomic_unchecked_t fscache_n_relinquishes_retire;
fe2de317
MT
47031
47032-atomic_t fscache_n_cookie_index;
47033-atomic_t fscache_n_cookie_data;
47034-atomic_t fscache_n_cookie_special;
8308f9c9
MT
47035+atomic_unchecked_t fscache_n_cookie_index;
47036+atomic_unchecked_t fscache_n_cookie_data;
47037+atomic_unchecked_t fscache_n_cookie_special;
fe2de317
MT
47038
47039-atomic_t fscache_n_object_alloc;
47040-atomic_t fscache_n_object_no_alloc;
47041-atomic_t fscache_n_object_lookups;
47042-atomic_t fscache_n_object_lookups_negative;
47043-atomic_t fscache_n_object_lookups_positive;
47044-atomic_t fscache_n_object_lookups_timed_out;
47045-atomic_t fscache_n_object_created;
47046-atomic_t fscache_n_object_avail;
47047-atomic_t fscache_n_object_dead;
8308f9c9
MT
47048+atomic_unchecked_t fscache_n_object_alloc;
47049+atomic_unchecked_t fscache_n_object_no_alloc;
47050+atomic_unchecked_t fscache_n_object_lookups;
47051+atomic_unchecked_t fscache_n_object_lookups_negative;
47052+atomic_unchecked_t fscache_n_object_lookups_positive;
47053+atomic_unchecked_t fscache_n_object_lookups_timed_out;
47054+atomic_unchecked_t fscache_n_object_created;
47055+atomic_unchecked_t fscache_n_object_avail;
47056+atomic_unchecked_t fscache_n_object_dead;
fe2de317
MT
47057
47058-atomic_t fscache_n_checkaux_none;
47059-atomic_t fscache_n_checkaux_okay;
47060-atomic_t fscache_n_checkaux_update;
47061-atomic_t fscache_n_checkaux_obsolete;
8308f9c9
MT
47062+atomic_unchecked_t fscache_n_checkaux_none;
47063+atomic_unchecked_t fscache_n_checkaux_okay;
47064+atomic_unchecked_t fscache_n_checkaux_update;
47065+atomic_unchecked_t fscache_n_checkaux_obsolete;
47066
47067 atomic_t fscache_n_cop_alloc_object;
47068 atomic_t fscache_n_cop_lookup_object;
fe2de317 47069@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
8308f9c9
MT
47070 seq_puts(m, "FS-Cache statistics\n");
47071
47072 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
47073- atomic_read(&fscache_n_cookie_index),
47074- atomic_read(&fscache_n_cookie_data),
47075- atomic_read(&fscache_n_cookie_special));
47076+ atomic_read_unchecked(&fscache_n_cookie_index),
47077+ atomic_read_unchecked(&fscache_n_cookie_data),
47078+ atomic_read_unchecked(&fscache_n_cookie_special));
47079
47080 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
47081- atomic_read(&fscache_n_object_alloc),
47082- atomic_read(&fscache_n_object_no_alloc),
47083- atomic_read(&fscache_n_object_avail),
47084- atomic_read(&fscache_n_object_dead));
47085+ atomic_read_unchecked(&fscache_n_object_alloc),
47086+ atomic_read_unchecked(&fscache_n_object_no_alloc),
47087+ atomic_read_unchecked(&fscache_n_object_avail),
47088+ atomic_read_unchecked(&fscache_n_object_dead));
47089 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
47090- atomic_read(&fscache_n_checkaux_none),
47091- atomic_read(&fscache_n_checkaux_okay),
47092- atomic_read(&fscache_n_checkaux_update),
47093- atomic_read(&fscache_n_checkaux_obsolete));
47094+ atomic_read_unchecked(&fscache_n_checkaux_none),
47095+ atomic_read_unchecked(&fscache_n_checkaux_okay),
47096+ atomic_read_unchecked(&fscache_n_checkaux_update),
47097+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
47098
47099 seq_printf(m, "Pages : mrk=%u unc=%u\n",
47100- atomic_read(&fscache_n_marks),
47101- atomic_read(&fscache_n_uncaches));
47102+ atomic_read_unchecked(&fscache_n_marks),
47103+ atomic_read_unchecked(&fscache_n_uncaches));
47104
47105 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
47106 " oom=%u\n",
47107- atomic_read(&fscache_n_acquires),
47108- atomic_read(&fscache_n_acquires_null),
47109- atomic_read(&fscache_n_acquires_no_cache),
47110- atomic_read(&fscache_n_acquires_ok),
47111- atomic_read(&fscache_n_acquires_nobufs),
47112- atomic_read(&fscache_n_acquires_oom));
47113+ atomic_read_unchecked(&fscache_n_acquires),
47114+ atomic_read_unchecked(&fscache_n_acquires_null),
47115+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
47116+ atomic_read_unchecked(&fscache_n_acquires_ok),
47117+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
47118+ atomic_read_unchecked(&fscache_n_acquires_oom));
47119
47120 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
47121- atomic_read(&fscache_n_object_lookups),
47122- atomic_read(&fscache_n_object_lookups_negative),
47123- atomic_read(&fscache_n_object_lookups_positive),
47124- atomic_read(&fscache_n_object_created),
47125- atomic_read(&fscache_n_object_lookups_timed_out));
47126+ atomic_read_unchecked(&fscache_n_object_lookups),
47127+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
47128+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
47129+ atomic_read_unchecked(&fscache_n_object_created),
47130+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
47131
47132 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
47133- atomic_read(&fscache_n_updates),
47134- atomic_read(&fscache_n_updates_null),
47135- atomic_read(&fscache_n_updates_run));
47136+ atomic_read_unchecked(&fscache_n_updates),
47137+ atomic_read_unchecked(&fscache_n_updates_null),
47138+ atomic_read_unchecked(&fscache_n_updates_run));
47139
47140 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
47141- atomic_read(&fscache_n_relinquishes),
47142- atomic_read(&fscache_n_relinquishes_null),
47143- atomic_read(&fscache_n_relinquishes_waitcrt),
47144- atomic_read(&fscache_n_relinquishes_retire));
47145+ atomic_read_unchecked(&fscache_n_relinquishes),
47146+ atomic_read_unchecked(&fscache_n_relinquishes_null),
47147+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
47148+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
47149
47150 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
47151- atomic_read(&fscache_n_attr_changed),
47152- atomic_read(&fscache_n_attr_changed_ok),
47153- atomic_read(&fscache_n_attr_changed_nobufs),
47154- atomic_read(&fscache_n_attr_changed_nomem),
47155- atomic_read(&fscache_n_attr_changed_calls));
47156+ atomic_read_unchecked(&fscache_n_attr_changed),
47157+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
47158+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
47159+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
47160+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
47161
47162 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
47163- atomic_read(&fscache_n_allocs),
47164- atomic_read(&fscache_n_allocs_ok),
47165- atomic_read(&fscache_n_allocs_wait),
47166- atomic_read(&fscache_n_allocs_nobufs),
47167- atomic_read(&fscache_n_allocs_intr));
47168+ atomic_read_unchecked(&fscache_n_allocs),
47169+ atomic_read_unchecked(&fscache_n_allocs_ok),
47170+ atomic_read_unchecked(&fscache_n_allocs_wait),
47171+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
47172+ atomic_read_unchecked(&fscache_n_allocs_intr));
47173 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
47174- atomic_read(&fscache_n_alloc_ops),
47175- atomic_read(&fscache_n_alloc_op_waits),
47176- atomic_read(&fscache_n_allocs_object_dead));
47177+ atomic_read_unchecked(&fscache_n_alloc_ops),
47178+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
47179+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
47180
47181 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
47182 " int=%u oom=%u\n",
47183- atomic_read(&fscache_n_retrievals),
47184- atomic_read(&fscache_n_retrievals_ok),
47185- atomic_read(&fscache_n_retrievals_wait),
47186- atomic_read(&fscache_n_retrievals_nodata),
47187- atomic_read(&fscache_n_retrievals_nobufs),
47188- atomic_read(&fscache_n_retrievals_intr),
47189- atomic_read(&fscache_n_retrievals_nomem));
47190+ atomic_read_unchecked(&fscache_n_retrievals),
47191+ atomic_read_unchecked(&fscache_n_retrievals_ok),
47192+ atomic_read_unchecked(&fscache_n_retrievals_wait),
47193+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
47194+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
47195+ atomic_read_unchecked(&fscache_n_retrievals_intr),
47196+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
47197 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
47198- atomic_read(&fscache_n_retrieval_ops),
47199- atomic_read(&fscache_n_retrieval_op_waits),
47200- atomic_read(&fscache_n_retrievals_object_dead));
47201+ atomic_read_unchecked(&fscache_n_retrieval_ops),
47202+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
47203+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
47204
47205 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
47206- atomic_read(&fscache_n_stores),
47207- atomic_read(&fscache_n_stores_ok),
47208- atomic_read(&fscache_n_stores_again),
47209- atomic_read(&fscache_n_stores_nobufs),
47210- atomic_read(&fscache_n_stores_oom));
47211+ atomic_read_unchecked(&fscache_n_stores),
47212+ atomic_read_unchecked(&fscache_n_stores_ok),
47213+ atomic_read_unchecked(&fscache_n_stores_again),
47214+ atomic_read_unchecked(&fscache_n_stores_nobufs),
47215+ atomic_read_unchecked(&fscache_n_stores_oom));
47216 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
47217- atomic_read(&fscache_n_store_ops),
47218- atomic_read(&fscache_n_store_calls),
47219- atomic_read(&fscache_n_store_pages),
47220- atomic_read(&fscache_n_store_radix_deletes),
47221- atomic_read(&fscache_n_store_pages_over_limit));
47222+ atomic_read_unchecked(&fscache_n_store_ops),
47223+ atomic_read_unchecked(&fscache_n_store_calls),
47224+ atomic_read_unchecked(&fscache_n_store_pages),
47225+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
47226+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
47227
47228 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
47229- atomic_read(&fscache_n_store_vmscan_not_storing),
47230- atomic_read(&fscache_n_store_vmscan_gone),
47231- atomic_read(&fscache_n_store_vmscan_busy),
47232- atomic_read(&fscache_n_store_vmscan_cancelled));
47233+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
47234+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
47235+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
47236+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
47237
47238 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
47239- atomic_read(&fscache_n_op_pend),
47240- atomic_read(&fscache_n_op_run),
47241- atomic_read(&fscache_n_op_enqueue),
47242- atomic_read(&fscache_n_op_cancelled),
47243- atomic_read(&fscache_n_op_rejected));
47244+ atomic_read_unchecked(&fscache_n_op_pend),
47245+ atomic_read_unchecked(&fscache_n_op_run),
47246+ atomic_read_unchecked(&fscache_n_op_enqueue),
47247+ atomic_read_unchecked(&fscache_n_op_cancelled),
47248+ atomic_read_unchecked(&fscache_n_op_rejected));
47249 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
47250- atomic_read(&fscache_n_op_deferred_release),
47251- atomic_read(&fscache_n_op_release),
47252- atomic_read(&fscache_n_op_gc));
47253+ atomic_read_unchecked(&fscache_n_op_deferred_release),
47254+ atomic_read_unchecked(&fscache_n_op_release),
47255+ atomic_read_unchecked(&fscache_n_op_gc));
47256
47257 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
47258 atomic_read(&fscache_n_cop_alloc_object),
fe2de317 47259diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
4c928ab7 47260index 3426521..3b75162 100644
fe2de317
MT
47261--- a/fs/fuse/cuse.c
47262+++ b/fs/fuse/cuse.c
4c928ab7 47263@@ -587,10 +587,12 @@ static int __init cuse_init(void)
ae4e228f
MT
47264 INIT_LIST_HEAD(&cuse_conntbl[i]);
47265
15a11c5b 47266 /* inherit and extend fuse_dev_operations */
ae4e228f
MT
47267- cuse_channel_fops = fuse_dev_operations;
47268- cuse_channel_fops.owner = THIS_MODULE;
47269- cuse_channel_fops.open = cuse_channel_open;
47270- cuse_channel_fops.release = cuse_channel_release;
15a11c5b
MT
47271+ pax_open_kernel();
47272+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
47273+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
47274+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
47275+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
47276+ pax_close_kernel();
47277
ae4e228f
MT
47278 cuse_class = class_create(THIS_MODULE, "cuse");
47279 if (IS_ERR(cuse_class))
fe2de317 47280diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
5e856224 47281index 5f3368a..8306426 100644
fe2de317
MT
47282--- a/fs/fuse/dev.c
47283+++ b/fs/fuse/dev.c
47284@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
57199397
MT
47285 ret = 0;
47286 pipe_lock(pipe);
47287
47288- if (!pipe->readers) {
47289+ if (!atomic_read(&pipe->readers)) {
47290 send_sig(SIGPIPE, current, 0);
47291 if (!ret)
47292 ret = -EPIPE;
fe2de317 47293diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
5e856224 47294index 2066328..f5add3b 100644
fe2de317
MT
47295--- a/fs/fuse/dir.c
47296+++ b/fs/fuse/dir.c
5e856224 47297@@ -1175,7 +1175,7 @@ static char *read_link(struct dentry *dentry)
58c5fc13
MT
47298 return link;
47299 }
47300
47301-static void free_link(char *link)
47302+static void free_link(const char *link)
47303 {
47304 if (!IS_ERR(link))
47305 free_page((unsigned long) link);
fe2de317 47306diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
5e856224 47307index 5698746..6086012 100644
fe2de317
MT
47308--- a/fs/gfs2/inode.c
47309+++ b/fs/gfs2/inode.c
5e856224 47310@@ -1487,7 +1487,7 @@ out:
66a7e928
MT
47311
47312 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
47313 {
47314- char *s = nd_get_link(nd);
47315+ const char *s = nd_get_link(nd);
47316 if (!IS_ERR(s))
47317 kfree(s);
47318 }
fe2de317 47319diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
5e856224 47320index c60267e..193d9e4 100644
fe2de317
MT
47321--- a/fs/hugetlbfs/inode.c
47322+++ b/fs/hugetlbfs/inode.c
5e856224 47323@@ -902,7 +902,7 @@ static struct file_system_type hugetlbfs_fs_type = {
df50ba0c
MT
47324 .kill_sb = kill_litter_super,
47325 };
47326
47327-static struct vfsmount *hugetlbfs_vfsmount;
47328+struct vfsmount *hugetlbfs_vfsmount;
47329
47330 static int can_do_hugetlb_shm(void)
47331 {
fe2de317 47332diff --git a/fs/inode.c b/fs/inode.c
5e856224 47333index 83ab215..8842101 100644
fe2de317
MT
47334--- a/fs/inode.c
47335+++ b/fs/inode.c
5e856224 47336@@ -870,8 +870,8 @@ unsigned int get_next_ino(void)
58c5fc13 47337
71d190be
MT
47338 #ifdef CONFIG_SMP
47339 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
47340- static atomic_t shared_last_ino;
47341- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
47342+ static atomic_unchecked_t shared_last_ino;
47343+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
58c5fc13 47344
71d190be
MT
47345 res = next - LAST_INO_BATCH;
47346 }
fe2de317 47347diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
5e856224 47348index eafb8d3..f423d37 100644
fe2de317
MT
47349--- a/fs/jffs2/erase.c
47350+++ b/fs/jffs2/erase.c
5e856224 47351@@ -438,7 +438,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
58c5fc13
MT
47352 struct jffs2_unknown_node marker = {
47353 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
47354 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
47355- .totlen = cpu_to_je32(c->cleanmarker_size)
47356+ .totlen = cpu_to_je32(c->cleanmarker_size),
47357+ .hdr_crc = cpu_to_je32(0)
47358 };
47359
47360 jffs2_prealloc_raw_node_refs(c, jeb, 1);
fe2de317 47361diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
5e856224 47362index 30e8f47..21f600c 100644
fe2de317
MT
47363--- a/fs/jffs2/wbuf.c
47364+++ b/fs/jffs2/wbuf.c
5e856224 47365@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
58c5fc13
MT
47366 {
47367 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
47368 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
47369- .totlen = constant_cpu_to_je32(8)
47370+ .totlen = constant_cpu_to_je32(8),
47371+ .hdr_crc = constant_cpu_to_je32(0)
47372 };
47373
47374 /*
fe2de317 47375diff --git a/fs/jfs/super.c b/fs/jfs/super.c
5e856224 47376index 682bca6..86b8e6e 100644
fe2de317
MT
47377--- a/fs/jfs/super.c
47378+++ b/fs/jfs/super.c
5e856224 47379@@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
15a11c5b
MT
47380
47381 jfs_inode_cachep =
47382 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
47383- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
47384+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
47385 init_once);
47386 if (jfs_inode_cachep == NULL)
47387 return -ENOMEM;
fe2de317 47388diff --git a/fs/libfs.c b/fs/libfs.c
5e856224 47389index 5b2dbb3..7442d54 100644
fe2de317
MT
47390--- a/fs/libfs.c
47391+++ b/fs/libfs.c
47392@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
66a7e928
MT
47393
47394 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
47395 struct dentry *next;
47396+ char d_name[sizeof(next->d_iname)];
47397+ const unsigned char *name;
47398+
47399 next = list_entry(p, struct dentry, d_u.d_child);
47400 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
47401 if (!simple_positive(next)) {
fe2de317 47402@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
71d190be
MT
47403
47404 spin_unlock(&next->d_lock);
47405 spin_unlock(&dentry->d_lock);
47406- if (filldir(dirent, next->d_name.name,
66a7e928
MT
47407+ name = next->d_name.name;
47408+ if (name == next->d_iname) {
47409+ memcpy(d_name, name, next->d_name.len);
71d190be 47410+ name = d_name;
66a7e928 47411+ }
71d190be
MT
47412+ if (filldir(dirent, name,
47413 next->d_name.len, filp->f_pos,
47414 next->d_inode->i_ino,
47415 dt_type(next->d_inode)) < 0)
fe2de317 47416diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
4c928ab7 47417index 8392cb8..80d6193 100644
fe2de317
MT
47418--- a/fs/lockd/clntproc.c
47419+++ b/fs/lockd/clntproc.c
47420@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
8308f9c9
MT
47421 /*
47422 * Cookie counter for NLM requests
47423 */
47424-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
47425+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
47426
47427 void nlmclnt_next_cookie(struct nlm_cookie *c)
47428 {
47429- u32 cookie = atomic_inc_return(&nlm_cookie);
47430+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
47431
47432 memcpy(c->data, &cookie, 4);
47433 c->len=4;
fe2de317 47434diff --git a/fs/locks.c b/fs/locks.c
5e856224 47435index 0d68f1f..f216b79 100644
fe2de317
MT
47436--- a/fs/locks.c
47437+++ b/fs/locks.c
5e856224 47438@@ -2075,16 +2075,16 @@ void locks_remove_flock(struct file *filp)
58c5fc13
MT
47439 return;
47440
47441 if (filp->f_op && filp->f_op->flock) {
47442- struct file_lock fl = {
47443+ struct file_lock flock = {
47444 .fl_pid = current->tgid,
47445 .fl_file = filp,
47446 .fl_flags = FL_FLOCK,
47447 .fl_type = F_UNLCK,
47448 .fl_end = OFFSET_MAX,
47449 };
47450- filp->f_op->flock(filp, F_SETLKW, &fl);
47451- if (fl.fl_ops && fl.fl_ops->fl_release_private)
47452- fl.fl_ops->fl_release_private(&fl);
47453+ filp->f_op->flock(filp, F_SETLKW, &flock);
47454+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
47455+ flock.fl_ops->fl_release_private(&flock);
47456 }
47457
bc901d79 47458 lock_flocks();
fe2de317 47459diff --git a/fs/namei.c b/fs/namei.c
5e856224 47460index 46ea9cc..c7cf3a3 100644
fe2de317
MT
47461--- a/fs/namei.c
47462+++ b/fs/namei.c
5e856224 47463@@ -278,16 +278,32 @@ int generic_permission(struct inode *inode, int mask)
fe2de317
MT
47464 if (ret != -EACCES)
47465 return ret;
47466
47467+#ifdef CONFIG_GRKERNSEC
47468+ /* we'll block if we have to log due to a denied capability use */
47469+ if (mask & MAY_NOT_BLOCK)
47470+ return -ECHILD;
47471+#endif
47472+
6e9df6a3
MT
47473 if (S_ISDIR(inode->i_mode)) {
47474 /* DACs are overridable for directories */
47475- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
47476- return 0;
47477 if (!(mask & MAY_WRITE))
fe2de317
MT
47478- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
47479+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
47480+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
6e9df6a3
MT
47481 return 0;
47482+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
47483+ return 0;
47484 return -EACCES;
47485 }
bc901d79 47486 /*
66a7e928 47487+ * Searching includes executable on directories, else just read.
6e9df6a3 47488+ */
66a7e928 47489+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
6e9df6a3 47490+ if (mask == MAY_READ)
fe2de317
MT
47491+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
47492+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
6e9df6a3
MT
47493+ return 0;
47494+
47495+ /*
47496 * Read/write DACs are always overridable.
47497 * Executable DACs are overridable when there is
47498 * at least one exec bit set.
5e856224 47499@@ -296,14 +312,6 @@ int generic_permission(struct inode *inode, int mask)
6e9df6a3 47500 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
bc901d79
MT
47501 return 0;
47502
6e9df6a3 47503- /*
66a7e928 47504- * Searching includes executable on directories, else just read.
6e9df6a3 47505- */
66a7e928 47506- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
6e9df6a3 47507- if (mask == MAY_READ)
66a7e928 47508- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
6e9df6a3
MT
47509- return 0;
47510-
bc901d79
MT
47511 return -EACCES;
47512 }
bc901d79 47513
5e856224 47514@@ -652,11 +660,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
66a7e928
MT
47515 return error;
47516 }
47517
47518+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
47519+ dentry->d_inode, dentry, nd->path.mnt)) {
47520+ error = -EACCES;
47521+ *p = ERR_PTR(error); /* no ->put_link(), please */
47522+ path_put(&nd->path);
47523+ return error;
47524+ }
47525+
47526 nd->last_type = LAST_BIND;
df50ba0c
MT
47527 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
47528 error = PTR_ERR(*p);
47529 if (!IS_ERR(*p)) {
58c5fc13
MT
47530- char *s = nd_get_link(nd);
47531+ const char *s = nd_get_link(nd);
47532 error = 0;
47533 if (s)
47534 error = __vfs_follow_link(nd, s);
5e856224 47535@@ -1650,6 +1666,21 @@ static int path_lookupat(int dfd, const char *name,
6e9df6a3
MT
47536 if (!err)
47537 err = complete_walk(nd);
47538
fe2de317
MT
47539+ if (!(nd->flags & LOOKUP_PARENT)) {
47540+#ifdef CONFIG_GRKERNSEC
47541+ if (flags & LOOKUP_RCU) {
47542+ if (!err)
47543+ path_put(&nd->path);
47544+ err = -ECHILD;
47545+ } else
47546+#endif
47547+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47548+ if (!err)
47549+ path_put(&nd->path);
47550+ err = -ENOENT;
47551+ }
6e9df6a3
MT
47552+ }
47553+
47554 if (!err && nd->flags & LOOKUP_DIRECTORY) {
47555 if (!nd->inode->i_op->lookup) {
47556 path_put(&nd->path);
5e856224 47557@@ -1677,6 +1708,15 @@ static int do_path_lookup(int dfd, const char *name,
66a7e928 47558 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
16454cff
MT
47559
47560 if (likely(!retval)) {
fe2de317
MT
47561+ if (*name != '/' && nd->path.dentry && nd->inode) {
47562+#ifdef CONFIG_GRKERNSEC
47563+ if (flags & LOOKUP_RCU)
47564+ return -ECHILD;
47565+#endif
47566+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
47567+ return -ENOENT;
47568+ }
16454cff
MT
47569+
47570 if (unlikely(!audit_dummy_context())) {
47571 if (nd->path.dentry && nd->inode)
47572 audit_inode(name, nd->path.dentry);
5e856224 47573@@ -2071,6 +2111,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
4c928ab7
MT
47574 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
47575 return -EPERM;
47576
47577+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
47578+ return -EPERM;
47579+ if (gr_handle_rawio(inode))
47580+ return -EPERM;
47581+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
47582+ return -EACCES;
bc901d79 47583+
4c928ab7 47584 return 0;
bc901d79
MT
47585 }
47586
5e856224 47587@@ -2132,6 +2179,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
6e9df6a3
MT
47588 error = complete_walk(nd);
47589 if (error)
47590 return ERR_PTR(error);
fe2de317
MT
47591+#ifdef CONFIG_GRKERNSEC
47592+ if (nd->flags & LOOKUP_RCU) {
47593+ error = -ECHILD;
47594+ goto exit;
47595+ }
47596+#endif
6e9df6a3
MT
47597+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47598+ error = -ENOENT;
47599+ goto exit;
47600+ }
47601 audit_inode(pathname, nd->path.dentry);
47602 if (open_flag & O_CREAT) {
47603 error = -EISDIR;
5e856224 47604@@ -2142,6 +2199,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
6e9df6a3
MT
47605 error = complete_walk(nd);
47606 if (error)
47607 return ERR_PTR(error);
fe2de317
MT
47608+#ifdef CONFIG_GRKERNSEC
47609+ if (nd->flags & LOOKUP_RCU) {
47610+ error = -ECHILD;
47611+ goto exit;
47612+ }
47613+#endif
6e9df6a3
MT
47614+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
47615+ error = -ENOENT;
47616+ goto exit;
47617+ }
47618 audit_inode(pathname, dir);
47619 goto ok;
47620 }
5e856224 47621@@ -2163,6 +2230,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
fe2de317 47622 error = complete_walk(nd);
6e9df6a3 47623 if (error)
4c928ab7 47624 return ERR_PTR(error);
fe2de317
MT
47625+#ifdef CONFIG_GRKERNSEC
47626+ if (nd->flags & LOOKUP_RCU) {
47627+ error = -ECHILD;
47628+ goto exit;
47629+ }
47630+#endif
6e9df6a3
MT
47631+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47632+ error = -ENOENT;
47633+ goto exit;
47634+ }
fe2de317 47635
6e9df6a3
MT
47636 error = -ENOTDIR;
47637 if (nd->flags & LOOKUP_DIRECTORY) {
5e856224 47638@@ -2203,6 +2280,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
66a7e928
MT
47639 /* Negative dentry, just create the file */
47640 if (!dentry->d_inode) {
5e856224 47641 umode_t mode = op->mode;
66a7e928 47642+
6e9df6a3 47643+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
66a7e928
MT
47644+ error = -EACCES;
47645+ goto exit_mutex_unlock;
47646+ }
47647+
47648 if (!IS_POSIXACL(dir->d_inode))
47649 mode &= ~current_umask();
47650 /*
5e856224 47651@@ -2226,6 +2309,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
66a7e928
MT
47652 error = vfs_create(dir->d_inode, dentry, mode, nd);
47653 if (error)
47654 goto exit_mutex_unlock;
47655+ else
47656+ gr_handle_create(path->dentry, path->mnt);
47657 mutex_unlock(&dir->d_inode->i_mutex);
47658 dput(nd->path.dentry);
47659 nd->path.dentry = dentry;
5e856224 47660@@ -2235,6 +2320,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
58c5fc13
MT
47661 /*
47662 * It already exists.
47663 */
47664+
6e9df6a3
MT
47665+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
47666+ error = -ENOENT;
47667+ goto exit_mutex_unlock;
47668+ }
47669+
bc901d79
MT
47670+ /* only check if O_CREAT is specified, all other checks need to go
47671+ into may_open */
6e9df6a3 47672+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
58c5fc13
MT
47673+ error = -EACCES;
47674+ goto exit_mutex_unlock;
47675+ }
47676+
47677 mutex_unlock(&dir->d_inode->i_mutex);
df50ba0c 47678 audit_inode(pathname, path->dentry);
58c5fc13 47679
5e856224 47680@@ -2447,6 +2545,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
6e9df6a3
MT
47681 *path = nd.path;
47682 return dentry;
47683 eexist:
47684+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
47685+ dput(dentry);
47686+ dentry = ERR_PTR(-ENOENT);
47687+ goto fail;
47688+ }
47689 dput(dentry);
47690 dentry = ERR_PTR(-EEXIST);
47691 fail:
5e856224 47692@@ -2469,6 +2572,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
6e9df6a3
MT
47693 }
47694 EXPORT_SYMBOL(user_path_create);
47695
47696+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
47697+{
47698+ char *tmp = getname(pathname);
47699+ struct dentry *res;
47700+ if (IS_ERR(tmp))
47701+ return ERR_CAST(tmp);
47702+ res = kern_path_create(dfd, tmp, path, is_dir);
47703+ if (IS_ERR(res))
47704+ putname(tmp);
47705+ else
47706+ *to = tmp;
47707+ return res;
47708+}
47709+
5e856224 47710 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
6e9df6a3
MT
47711 {
47712 int error = may_create(dir, dentry);
5e856224 47713@@ -2536,6 +2653,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
6e9df6a3 47714 error = mnt_want_write(path.mnt);
58c5fc13
MT
47715 if (error)
47716 goto out_dput;
47717+
6e9df6a3 47718+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
58c5fc13 47719+ error = -EPERM;
6e9df6a3 47720+ goto out_drop_write;
58c5fc13
MT
47721+ }
47722+
6e9df6a3 47723+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
58c5fc13 47724+ error = -EACCES;
6e9df6a3 47725+ goto out_drop_write;
58c5fc13
MT
47726+ }
47727+
6e9df6a3 47728 error = security_path_mknod(&path, dentry, mode, dev);
58c5fc13 47729 if (error)
6e9df6a3 47730 goto out_drop_write;
5e856224 47731@@ -2553,6 +2681,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
58c5fc13
MT
47732 }
47733 out_drop_write:
6e9df6a3 47734 mnt_drop_write(path.mnt);
58c5fc13
MT
47735+
47736+ if (!error)
6e9df6a3 47737+ gr_handle_create(dentry, path.mnt);
58c5fc13
MT
47738 out_dput:
47739 dput(dentry);
6e9df6a3 47740 mutex_unlock(&path.dentry->d_inode->i_mutex);
5e856224 47741@@ -2602,12 +2733,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
6e9df6a3
MT
47742 error = mnt_want_write(path.mnt);
47743 if (error)
47744 goto out_dput;
47745+
47746+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
58c5fc13 47747+ error = -EACCES;
6e9df6a3 47748+ goto out_drop_write;
58c5fc13
MT
47749+ }
47750+
6e9df6a3
MT
47751 error = security_path_mkdir(&path, dentry, mode);
47752 if (error)
47753 goto out_drop_write;
47754 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
58c5fc13 47755 out_drop_write:
6e9df6a3 47756 mnt_drop_write(path.mnt);
58c5fc13
MT
47757+
47758+ if (!error)
6e9df6a3 47759+ gr_handle_create(dentry, path.mnt);
58c5fc13
MT
47760 out_dput:
47761 dput(dentry);
6e9df6a3 47762 mutex_unlock(&path.dentry->d_inode->i_mutex);
5e856224 47763@@ -2687,6 +2827,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
58c5fc13
MT
47764 char * name;
47765 struct dentry *dentry;
47766 struct nameidata nd;
47767+ ino_t saved_ino = 0;
47768+ dev_t saved_dev = 0;
47769
47770 error = user_path_parent(dfd, pathname, &nd, &name);
47771 if (error)
5e856224 47772@@ -2715,6 +2857,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
15a11c5b
MT
47773 error = -ENOENT;
47774 goto exit3;
47775 }
58c5fc13 47776+
6e9df6a3
MT
47777+ saved_ino = dentry->d_inode->i_ino;
47778+ saved_dev = gr_get_dev_from_dentry(dentry);
58c5fc13 47779+
15a11c5b
MT
47780+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
47781+ error = -EACCES;
47782+ goto exit3;
58c5fc13
MT
47783+ }
47784+
47785 error = mnt_want_write(nd.path.mnt);
47786 if (error)
47787 goto exit3;
5e856224 47788@@ -2722,6 +2873,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
58c5fc13
MT
47789 if (error)
47790 goto exit4;
47791 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
47792+ if (!error && (saved_dev || saved_ino))
47793+ gr_handle_delete(saved_ino, saved_dev);
47794 exit4:
47795 mnt_drop_write(nd.path.mnt);
47796 exit3:
5e856224 47797@@ -2784,6 +2937,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
58c5fc13
MT
47798 struct dentry *dentry;
47799 struct nameidata nd;
47800 struct inode *inode = NULL;
47801+ ino_t saved_ino = 0;
47802+ dev_t saved_dev = 0;
47803
47804 error = user_path_parent(dfd, pathname, &nd, &name);
47805 if (error)
5e856224 47806@@ -2806,6 +2961,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
15a11c5b 47807 if (!inode)
58c5fc13 47808 goto slashes;
15a11c5b
MT
47809 ihold(inode);
47810+
47811+ if (inode->i_nlink <= 1) {
47812+ saved_ino = inode->i_ino;
47813+ saved_dev = gr_get_dev_from_dentry(dentry);
47814+ }
47815+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
47816+ error = -EACCES;
47817+ goto exit2;
58c5fc13 47818+ }
15a11c5b 47819+
58c5fc13
MT
47820 error = mnt_want_write(nd.path.mnt);
47821 if (error)
47822 goto exit2;
5e856224 47823@@ -2813,6 +2978,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
58c5fc13
MT
47824 if (error)
47825 goto exit3;
47826 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
47827+ if (!error && (saved_ino || saved_dev))
47828+ gr_handle_delete(saved_ino, saved_dev);
47829 exit3:
47830 mnt_drop_write(nd.path.mnt);
47831 exit2:
5e856224 47832@@ -2888,10 +3055,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
6e9df6a3
MT
47833 error = mnt_want_write(path.mnt);
47834 if (error)
47835 goto out_dput;
47836+
47837+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
58c5fc13 47838+ error = -EACCES;
6e9df6a3 47839+ goto out_drop_write;
58c5fc13
MT
47840+ }
47841+
6e9df6a3 47842 error = security_path_symlink(&path, dentry, from);
58c5fc13
MT
47843 if (error)
47844 goto out_drop_write;
6e9df6a3 47845 error = vfs_symlink(path.dentry->d_inode, dentry, from);
58c5fc13 47846+ if (!error)
6e9df6a3 47847+ gr_handle_create(dentry, path.mnt);
58c5fc13 47848 out_drop_write:
6e9df6a3 47849 mnt_drop_write(path.mnt);
58c5fc13 47850 out_dput:
5e856224 47851@@ -2963,6 +3138,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
6e9df6a3
MT
47852 {
47853 struct dentry *new_dentry;
47854 struct path old_path, new_path;
fe2de317 47855+ char *to = NULL;
6e9df6a3
MT
47856 int how = 0;
47857 int error;
47858
5e856224 47859@@ -2986,7 +3162,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
6e9df6a3
MT
47860 if (error)
47861 return error;
47862
47863- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
47864+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
58c5fc13
MT
47865 error = PTR_ERR(new_dentry);
47866 if (IS_ERR(new_dentry))
6e9df6a3 47867 goto out;
5e856224 47868@@ -2997,13 +3173,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
6e9df6a3
MT
47869 error = mnt_want_write(new_path.mnt);
47870 if (error)
47871 goto out_dput;
58c5fc13
MT
47872+
47873+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
47874+ old_path.dentry->d_inode,
47875+ old_path.dentry->d_inode->i_mode, to)) {
47876+ error = -EACCES;
6e9df6a3 47877+ goto out_drop_write;
58c5fc13
MT
47878+ }
47879+
6e9df6a3 47880+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
58c5fc13
MT
47881+ old_path.dentry, old_path.mnt, to)) {
47882+ error = -EACCES;
6e9df6a3 47883+ goto out_drop_write;
58c5fc13
MT
47884+ }
47885+
6e9df6a3 47886 error = security_path_link(old_path.dentry, &new_path, new_dentry);
58c5fc13
MT
47887 if (error)
47888 goto out_drop_write;
6e9df6a3 47889 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
58c5fc13 47890+ if (!error)
6e9df6a3 47891+ gr_handle_create(new_dentry, new_path.mnt);
58c5fc13 47892 out_drop_write:
6e9df6a3 47893 mnt_drop_write(new_path.mnt);
58c5fc13 47894 out_dput:
6e9df6a3
MT
47895+ putname(to);
47896 dput(new_dentry);
47897 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
47898 path_put(&new_path);
5e856224 47899@@ -3231,6 +3424,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
58c5fc13
MT
47900 if (new_dentry == trap)
47901 goto exit5;
47902
47903+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
47904+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
47905+ to);
47906+ if (error)
47907+ goto exit5;
47908+
47909 error = mnt_want_write(oldnd.path.mnt);
47910 if (error)
47911 goto exit5;
5e856224 47912@@ -3240,6 +3439,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
58c5fc13
MT
47913 goto exit6;
47914 error = vfs_rename(old_dir->d_inode, old_dentry,
47915 new_dir->d_inode, new_dentry);
47916+ if (!error)
47917+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
47918+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
47919 exit6:
47920 mnt_drop_write(oldnd.path.mnt);
47921 exit5:
5e856224 47922@@ -3265,6 +3467,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
71d190be
MT
47923
47924 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
47925 {
47926+ char tmpbuf[64];
47927+ const char *newlink;
47928 int len;
47929
47930 len = PTR_ERR(link);
5e856224 47931@@ -3274,7 +3478,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
71d190be
MT
47932 len = strlen(link);
47933 if (len > (unsigned) buflen)
47934 len = buflen;
47935- if (copy_to_user(buffer, link, len))
47936+
47937+ if (len < sizeof(tmpbuf)) {
47938+ memcpy(tmpbuf, link, len);
47939+ newlink = tmpbuf;
47940+ } else
47941+ newlink = link;
47942+
47943+ if (copy_to_user(buffer, newlink, len))
47944 len = -EFAULT;
47945 out:
47946 return len;
fe2de317 47947diff --git a/fs/namespace.c b/fs/namespace.c
5e856224 47948index e608199..9609cb9 100644
fe2de317
MT
47949--- a/fs/namespace.c
47950+++ b/fs/namespace.c
5e856224 47951@@ -1155,6 +1155,9 @@ static int do_umount(struct mount *mnt, int flags)
58c5fc13
MT
47952 if (!(sb->s_flags & MS_RDONLY))
47953 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
47954 up_write(&sb->s_umount);
47955+
47956+ gr_log_remount(mnt->mnt_devname, retval);
47957+
47958 return retval;
47959 }
47960
5e856224 47961@@ -1174,6 +1177,9 @@ static int do_umount(struct mount *mnt, int flags)
6892158b 47962 br_write_unlock(vfsmount_lock);
58c5fc13
MT
47963 up_write(&namespace_sem);
47964 release_mounts(&umount_list);
47965+
47966+ gr_log_unmount(mnt->mnt_devname, retval);
47967+
47968 return retval;
47969 }
47970
5e856224 47971@@ -2175,6 +2181,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
ae4e228f
MT
47972 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
47973 MS_STRICTATIME);
58c5fc13 47974
ae4e228f
MT
47975+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
47976+ retval = -EPERM;
47977+ goto dput_out;
47978+ }
47979+
58c5fc13
MT
47980+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
47981+ retval = -EPERM;
47982+ goto dput_out;
47983+ }
47984+
47985 if (flags & MS_REMOUNT)
47986 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
47987 data_page);
5e856224 47988@@ -2189,6 +2205,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
58c5fc13
MT
47989 dev_name, data_page);
47990 dput_out:
47991 path_put(&path);
47992+
47993+ gr_log_mount(dev_name, dir_name, retval);
47994+
47995 return retval;
47996 }
47997
5e856224 47998@@ -2470,6 +2489,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
66a7e928
MT
47999 if (error)
48000 goto out2;
58c5fc13
MT
48001
48002+ if (gr_handle_chroot_pivot()) {
48003+ error = -EPERM;
66a7e928 48004+ goto out2;
58c5fc13
MT
48005+ }
48006+
6892158b 48007 get_fs_root(current->fs, &root);
66a7e928
MT
48008 error = lock_mount(&old);
48009 if (error)
4c928ab7 48010diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
5e856224 48011index 32c0658..b1c2045e 100644
4c928ab7
MT
48012--- a/fs/ncpfs/ncplib_kernel.h
48013+++ b/fs/ncpfs/ncplib_kernel.h
48014@@ -130,7 +130,7 @@ static inline int ncp_is_nfs_extras(struct ncp_server* server, unsigned int voln
48015 int ncp__io2vol(struct ncp_server *, unsigned char *, unsigned int *,
48016 const unsigned char *, unsigned int, int);
48017 int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *,
48018- const unsigned char *, unsigned int, int);
48019+ const unsigned char *, unsigned int, int) __size_overflow(5);
48020
48021 #define NCP_ESC ':'
48022 #define NCP_IO_TABLE(sb) (NCP_SBP(sb)->nls_io)
48023@@ -146,7 +146,7 @@ int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *,
48024 int ncp__io2vol(unsigned char *, unsigned int *,
48025 const unsigned char *, unsigned int, int);
48026 int ncp__vol2io(unsigned char *, unsigned int *,
48027- const unsigned char *, unsigned int, int);
48028+ const unsigned char *, unsigned int, int) __size_overflow(5);
48029
48030 #define NCP_IO_TABLE(sb) NULL
48031 #define ncp_tolower(t, c) tolower(c)
fe2de317 48032diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
5e856224 48033index f649fba..236bf92 100644
fe2de317
MT
48034--- a/fs/nfs/inode.c
48035+++ b/fs/nfs/inode.c
5e856224 48036@@ -151,7 +151,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
15a11c5b
MT
48037 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
48038 nfsi->attrtimeo_timestamp = jiffies;
48039
48040- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
48041+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
48042 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
48043 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
48044 else
5e856224 48045@@ -1003,16 +1003,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
ae4e228f
MT
48046 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
48047 }
48048
48049-static atomic_long_t nfs_attr_generation_counter;
48050+static atomic_long_unchecked_t nfs_attr_generation_counter;
48051
48052 static unsigned long nfs_read_attr_generation_counter(void)
48053 {
48054- return atomic_long_read(&nfs_attr_generation_counter);
48055+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
48056 }
48057
48058 unsigned long nfs_inc_attr_generation_counter(void)
48059 {
48060- return atomic_long_inc_return(&nfs_attr_generation_counter);
48061+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
48062 }
48063
48064 void nfs_fattr_init(struct nfs_fattr *fattr)
fe2de317 48065diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
5e856224 48066index b96fe94..a4dbece 100644
fe2de317
MT
48067--- a/fs/nfsd/vfs.c
48068+++ b/fs/nfsd/vfs.c
5e856224 48069@@ -925,7 +925,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
ae4e228f
MT
48070 } else {
48071 oldfs = get_fs();
48072 set_fs(KERNEL_DS);
48073- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
6e9df6a3 48074+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
ae4e228f
MT
48075 set_fs(oldfs);
48076 }
48077
5e856224 48078@@ -1029,7 +1029,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
ae4e228f
MT
48079
48080 /* Write the data. */
48081 oldfs = get_fs(); set_fs(KERNEL_DS);
48082- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
6e9df6a3 48083+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
ae4e228f
MT
48084 set_fs(oldfs);
48085 if (host_err < 0)
48086 goto out_nfserr;
5e856224 48087@@ -1564,7 +1564,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
ae4e228f 48088 */
58c5fc13 48089
ae4e228f
MT
48090 oldfs = get_fs(); set_fs(KERNEL_DS);
48091- host_err = inode->i_op->readlink(dentry, buf, *lenp);
6e9df6a3 48092+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
ae4e228f 48093 set_fs(oldfs);
58c5fc13 48094
ae4e228f 48095 if (host_err < 0)
fe2de317 48096diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
5e856224 48097index 3568c8a..e0240d8 100644
fe2de317
MT
48098--- a/fs/notify/fanotify/fanotify_user.c
48099+++ b/fs/notify/fanotify/fanotify_user.c
5e856224 48100@@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
15a11c5b 48101 goto out_close_fd;
66a7e928 48102
15a11c5b
MT
48103 ret = -EFAULT;
48104- if (copy_to_user(buf, &fanotify_event_metadata,
48105+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
48106+ copy_to_user(buf, &fanotify_event_metadata,
48107 fanotify_event_metadata.event_len))
48108 goto out_kill_access_response;
48109
fe2de317
MT
48110diff --git a/fs/notify/notification.c b/fs/notify/notification.c
48111index ee18815..7aa5d01 100644
48112--- a/fs/notify/notification.c
48113+++ b/fs/notify/notification.c
48114@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
8308f9c9
MT
48115 * get set to 0 so it will never get 'freed'
48116 */
48117 static struct fsnotify_event *q_overflow_event;
48118-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
48119+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
48120
48121 /**
48122 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
fe2de317 48123@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
8308f9c9
MT
48124 */
48125 u32 fsnotify_get_cookie(void)
48126 {
48127- return atomic_inc_return(&fsnotify_sync_cookie);
48128+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
48129 }
48130 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
48131
fe2de317
MT
48132diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
48133index 99e3610..02c1068 100644
48134--- a/fs/ntfs/dir.c
48135+++ b/fs/ntfs/dir.c
6892158b
MT
48136@@ -1329,7 +1329,7 @@ find_next_index_buffer:
48137 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
48138 ~(s64)(ndir->itype.index.block_size - 1)));
48139 /* Bounds checks. */
48140- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
48141+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
48142 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
48143 "inode 0x%lx or driver bug.", vdir->i_ino);
48144 goto err_out;
fe2de317
MT
48145diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
48146index c587e2d..3641eaa 100644
48147--- a/fs/ntfs/file.c
48148+++ b/fs/ntfs/file.c
48149@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
58c5fc13
MT
48150 #endif /* NTFS_RW */
48151 };
48152
48153-const struct file_operations ntfs_empty_file_ops = {};
ae4e228f 48154+const struct file_operations ntfs_empty_file_ops __read_only;
58c5fc13
MT
48155
48156-const struct inode_operations ntfs_empty_inode_ops = {};
ae4e228f 48157+const struct inode_operations ntfs_empty_inode_ops __read_only;
fe2de317
MT
48158diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
48159index 210c352..a174f83 100644
48160--- a/fs/ocfs2/localalloc.c
48161+++ b/fs/ocfs2/localalloc.c
48162@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
58c5fc13
MT
48163 goto bail;
48164 }
48165
48166- atomic_inc(&osb->alloc_stats.moves);
48167+ atomic_inc_unchecked(&osb->alloc_stats.moves);
48168
58c5fc13 48169 bail:
57199397 48170 if (handle)
fe2de317 48171diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
4c928ab7 48172index d355e6e..578d905 100644
fe2de317
MT
48173--- a/fs/ocfs2/ocfs2.h
48174+++ b/fs/ocfs2/ocfs2.h
66a7e928 48175@@ -235,11 +235,11 @@ enum ocfs2_vol_state
58c5fc13
MT
48176
48177 struct ocfs2_alloc_stats
48178 {
48179- atomic_t moves;
48180- atomic_t local_data;
48181- atomic_t bitmap_data;
48182- atomic_t bg_allocs;
48183- atomic_t bg_extends;
48184+ atomic_unchecked_t moves;
48185+ atomic_unchecked_t local_data;
48186+ atomic_unchecked_t bitmap_data;
48187+ atomic_unchecked_t bg_allocs;
48188+ atomic_unchecked_t bg_extends;
48189 };
48190
48191 enum ocfs2_local_alloc_state
fe2de317 48192diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
5e856224 48193index f169da4..9112253 100644
fe2de317
MT
48194--- a/fs/ocfs2/suballoc.c
48195+++ b/fs/ocfs2/suballoc.c
48196@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
58c5fc13
MT
48197 mlog_errno(status);
48198 goto bail;
48199 }
48200- atomic_inc(&osb->alloc_stats.bg_extends);
48201+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
48202
48203 /* You should never ask for this much metadata */
48204 BUG_ON(bits_wanted >
fe2de317 48205@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
58c5fc13
MT
48206 mlog_errno(status);
48207 goto bail;
48208 }
57199397
MT
48209- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48210+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58c5fc13 48211
57199397
MT
48212 *suballoc_loc = res.sr_bg_blkno;
48213 *suballoc_bit_start = res.sr_bit_offset;
fe2de317 48214@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
66a7e928
MT
48215 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
48216 res->sr_bits);
48217
48218- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48219+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48220
48221 BUG_ON(res->sr_bits != 1);
48222
fe2de317 48223@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
58c5fc13
MT
48224 mlog_errno(status);
48225 goto bail;
48226 }
57199397
MT
48227- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48228+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58c5fc13 48229
57199397 48230 BUG_ON(res.sr_bits != 1);
58c5fc13 48231
fe2de317 48232@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
58c5fc13
MT
48233 cluster_start,
48234 num_clusters);
48235 if (!status)
48236- atomic_inc(&osb->alloc_stats.local_data);
48237+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
48238 } else {
48239 if (min_clusters > (osb->bitmap_cpg - 1)) {
48240 /* The only paths asking for contiguousness
fe2de317 48241@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
58c5fc13 48242 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
57199397
MT
48243 res.sr_bg_blkno,
48244 res.sr_bit_offset);
58c5fc13
MT
48245- atomic_inc(&osb->alloc_stats.bitmap_data);
48246+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
57199397 48247 *num_clusters = res.sr_bits;
58c5fc13
MT
48248 }
48249 }
fe2de317 48250diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
5e856224 48251index 604e12c..8426483 100644
fe2de317
MT
48252--- a/fs/ocfs2/super.c
48253+++ b/fs/ocfs2/super.c
4c928ab7 48254@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
58c5fc13
MT
48255 "%10s => GlobalAllocs: %d LocalAllocs: %d "
48256 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
48257 "Stats",
48258- atomic_read(&osb->alloc_stats.bitmap_data),
48259- atomic_read(&osb->alloc_stats.local_data),
48260- atomic_read(&osb->alloc_stats.bg_allocs),
48261- atomic_read(&osb->alloc_stats.moves),
48262- atomic_read(&osb->alloc_stats.bg_extends));
48263+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
48264+ atomic_read_unchecked(&osb->alloc_stats.local_data),
48265+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
48266+ atomic_read_unchecked(&osb->alloc_stats.moves),
48267+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
48268
48269 out += snprintf(buf + out, len - out,
48270 "%10s => State: %u Descriptor: %llu Size: %u bits "
5e856224 48271@@ -2117,11 +2117,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
58c5fc13 48272 spin_lock_init(&osb->osb_xattr_lock);
df50ba0c 48273 ocfs2_init_steal_slots(osb);
58c5fc13
MT
48274
48275- atomic_set(&osb->alloc_stats.moves, 0);
48276- atomic_set(&osb->alloc_stats.local_data, 0);
48277- atomic_set(&osb->alloc_stats.bitmap_data, 0);
48278- atomic_set(&osb->alloc_stats.bg_allocs, 0);
48279- atomic_set(&osb->alloc_stats.bg_extends, 0);
48280+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
48281+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
48282+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
48283+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
48284+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
48285
48286 /* Copy the blockcheck stats from the superblock probe */
48287 osb->osb_ecc_stats = *stats;
fe2de317
MT
48288diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
48289index 5d22872..523db20 100644
48290--- a/fs/ocfs2/symlink.c
48291+++ b/fs/ocfs2/symlink.c
66a7e928 48292@@ -142,7 +142,7 @@ bail:
58c5fc13 48293
ae4e228f
MT
48294 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
48295 {
48296- char *link = nd_get_link(nd);
48297+ const char *link = nd_get_link(nd);
48298 if (!IS_ERR(link))
48299 kfree(link);
58c5fc13 48300 }
fe2de317 48301diff --git a/fs/open.c b/fs/open.c
5e856224 48302index 77becc0..aad7bd9 100644
fe2de317
MT
48303--- a/fs/open.c
48304+++ b/fs/open.c
48305@@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
bc901d79
MT
48306 error = locks_verify_truncate(inode, NULL, length);
48307 if (!error)
48308 error = security_path_truncate(&path);
58c5fc13 48309+
bc901d79
MT
48310+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
48311+ error = -EACCES;
48312+
48313 if (!error)
48314 error = do_truncate(path.dentry, length, 0, NULL);
48315
fe2de317 48316@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
58c5fc13
MT
48317 if (__mnt_is_readonly(path.mnt))
48318 res = -EROFS;
48319
48320+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
48321+ res = -EACCES;
48322+
48323 out_path_release:
48324 path_put(&path);
48325 out:
fe2de317 48326@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
58c5fc13
MT
48327 if (error)
48328 goto dput_and_out;
48329
48330+ gr_log_chdir(path.dentry, path.mnt);
48331+
48332 set_fs_pwd(current->fs, &path);
48333
48334 dput_and_out:
fe2de317 48335@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
58c5fc13
MT
48336 goto out_putf;
48337
6892158b 48338 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
58c5fc13
MT
48339+
48340+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
48341+ error = -EPERM;
48342+
48343+ if (!error)
48344+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
48345+
48346 if (!error)
48347 set_fs_pwd(current->fs, &file->f_path);
48348 out_putf:
fe2de317 48349@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
ae4e228f 48350 if (error)
58c5fc13
MT
48351 goto dput_and_out;
48352
48353+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
48354+ goto dput_and_out;
58c5fc13
MT
48355+
48356 set_fs_root(current->fs, &path);
48357+
48358+ gr_handle_chroot_chdir(&path);
48359+
48360 error = 0;
48361 dput_and_out:
48362 path_put(&path);
fe2de317 48363@@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
58c5fc13 48364 if (error)
6e9df6a3 48365 return error;
6892158b 48366 mutex_lock(&inode->i_mutex);
58c5fc13 48367+
4c928ab7 48368+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
58c5fc13 48369+ error = -EACCES;
6892158b 48370+ goto out_unlock;
58c5fc13 48371+ }
6e9df6a3 48372+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
58c5fc13 48373+ error = -EACCES;
ae4e228f 48374+ goto out_unlock;
58c5fc13
MT
48375+ }
48376+
5e856224 48377 error = security_path_chmod(path, mode);
6e9df6a3
MT
48378 if (error)
48379 goto out_unlock;
fe2de317 48380@@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
58c5fc13
MT
48381 int error;
48382 struct iattr newattrs;
48383
ae4e228f 48384+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
58c5fc13
MT
48385+ return -EACCES;
48386+
48387 newattrs.ia_valid = ATTR_CTIME;
48388 if (user != (uid_t) -1) {
48389 newattrs.ia_valid |= ATTR_UID;
fe2de317 48390diff --git a/fs/pipe.c b/fs/pipe.c
5e856224 48391index 82e651b..8a68573 100644
fe2de317
MT
48392--- a/fs/pipe.c
48393+++ b/fs/pipe.c
5e856224 48394@@ -437,9 +437,9 @@ redo:
ae4e228f
MT
48395 }
48396 if (bufs) /* More to do? */
48397 continue;
48398- if (!pipe->writers)
48399+ if (!atomic_read(&pipe->writers))
48400 break;
48401- if (!pipe->waiting_writers) {
48402+ if (!atomic_read(&pipe->waiting_writers)) {
48403 /* syscall merging: Usually we must not sleep
48404 * if O_NONBLOCK is set, or if we got some data.
48405 * But if a writer sleeps in kernel space, then
5e856224 48406@@ -503,7 +503,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
ae4e228f
MT
48407 mutex_lock(&inode->i_mutex);
48408 pipe = inode->i_pipe;
48409
48410- if (!pipe->readers) {
48411+ if (!atomic_read(&pipe->readers)) {
48412 send_sig(SIGPIPE, current, 0);
48413 ret = -EPIPE;
48414 goto out;
5e856224 48415@@ -552,7 +552,7 @@ redo1:
ae4e228f
MT
48416 for (;;) {
48417 int bufs;
48418
48419- if (!pipe->readers) {
48420+ if (!atomic_read(&pipe->readers)) {
48421 send_sig(SIGPIPE, current, 0);
48422 if (!ret)
48423 ret = -EPIPE;
5e856224 48424@@ -643,9 +643,9 @@ redo2:
ae4e228f
MT
48425 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
48426 do_wakeup = 0;
48427 }
48428- pipe->waiting_writers++;
48429+ atomic_inc(&pipe->waiting_writers);
48430 pipe_wait(pipe);
48431- pipe->waiting_writers--;
48432+ atomic_dec(&pipe->waiting_writers);
48433 }
48434 out:
48435 mutex_unlock(&inode->i_mutex);
5e856224 48436@@ -712,7 +712,7 @@ pipe_poll(struct file *filp, poll_table *wait)
ae4e228f
MT
48437 mask = 0;
48438 if (filp->f_mode & FMODE_READ) {
48439 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
48440- if (!pipe->writers && filp->f_version != pipe->w_counter)
48441+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
48442 mask |= POLLHUP;
48443 }
48444
5e856224 48445@@ -722,7 +722,7 @@ pipe_poll(struct file *filp, poll_table *wait)
ae4e228f
MT
48446 * Most Unices do not set POLLERR for FIFOs but on Linux they
48447 * behave exactly like pipes for poll().
48448 */
48449- if (!pipe->readers)
48450+ if (!atomic_read(&pipe->readers))
48451 mask |= POLLERR;
48452 }
48453
5e856224 48454@@ -736,10 +736,10 @@ pipe_release(struct inode *inode, int decr, int decw)
ae4e228f
MT
48455
48456 mutex_lock(&inode->i_mutex);
48457 pipe = inode->i_pipe;
48458- pipe->readers -= decr;
48459- pipe->writers -= decw;
48460+ atomic_sub(decr, &pipe->readers);
48461+ atomic_sub(decw, &pipe->writers);
48462
48463- if (!pipe->readers && !pipe->writers) {
48464+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
48465 free_pipe_info(inode);
48466 } else {
16454cff 48467 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
5e856224 48468@@ -829,7 +829,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
ae4e228f
MT
48469
48470 if (inode->i_pipe) {
48471 ret = 0;
48472- inode->i_pipe->readers++;
48473+ atomic_inc(&inode->i_pipe->readers);
48474 }
48475
48476 mutex_unlock(&inode->i_mutex);
5e856224 48477@@ -846,7 +846,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
ae4e228f
MT
48478
48479 if (inode->i_pipe) {
48480 ret = 0;
48481- inode->i_pipe->writers++;
48482+ atomic_inc(&inode->i_pipe->writers);
48483 }
48484
48485 mutex_unlock(&inode->i_mutex);
5e856224 48486@@ -864,9 +864,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
ae4e228f
MT
48487 if (inode->i_pipe) {
48488 ret = 0;
48489 if (filp->f_mode & FMODE_READ)
48490- inode->i_pipe->readers++;
48491+ atomic_inc(&inode->i_pipe->readers);
48492 if (filp->f_mode & FMODE_WRITE)
48493- inode->i_pipe->writers++;
48494+ atomic_inc(&inode->i_pipe->writers);
48495 }
48496
48497 mutex_unlock(&inode->i_mutex);
5e856224 48498@@ -958,7 +958,7 @@ void free_pipe_info(struct inode *inode)
58c5fc13
MT
48499 inode->i_pipe = NULL;
48500 }
48501
48502-static struct vfsmount *pipe_mnt __read_mostly;
48503+struct vfsmount *pipe_mnt __read_mostly;
ae4e228f
MT
48504
48505 /*
48506 * pipefs_dname() is called from d_path().
5e856224 48507@@ -988,7 +988,8 @@ static struct inode * get_pipe_inode(void)
ae4e228f
MT
48508 goto fail_iput;
48509 inode->i_pipe = pipe;
48510
48511- pipe->readers = pipe->writers = 1;
48512+ atomic_set(&pipe->readers, 1);
48513+ atomic_set(&pipe->writers, 1);
48514 inode->i_fop = &rdwr_pipefifo_fops;
48515
58c5fc13 48516 /*
fe2de317
MT
48517diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
48518index 15af622..0e9f4467 100644
48519--- a/fs/proc/Kconfig
48520+++ b/fs/proc/Kconfig
48521@@ -30,12 +30,12 @@ config PROC_FS
48522
48523 config PROC_KCORE
48524 bool "/proc/kcore support" if !ARM
48525- depends on PROC_FS && MMU
48526+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
48527
48528 config PROC_VMCORE
48529 bool "/proc/vmcore support"
48530- depends on PROC_FS && CRASH_DUMP
48531- default y
48532+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
48533+ default n
48534 help
48535 Exports the dump image of crashed kernel in ELF format.
48536
48537@@ -59,8 +59,8 @@ config PROC_SYSCTL
48538 limited in memory.
48539
48540 config PROC_PAGE_MONITOR
48541- default y
48542- depends on PROC_FS && MMU
48543+ default n
48544+ depends on PROC_FS && MMU && !GRKERNSEC
48545 bool "Enable /proc page monitoring" if EXPERT
48546 help
48547 Various /proc files exist to monitor process memory utilization:
48548diff --git a/fs/proc/array.c b/fs/proc/array.c
5e856224 48549index c602b8d..a7de642 100644
fe2de317
MT
48550--- a/fs/proc/array.c
48551+++ b/fs/proc/array.c
6892158b
MT
48552@@ -60,6 +60,7 @@
48553 #include <linux/tty.h>
48554 #include <linux/string.h>
48555 #include <linux/mman.h>
48556+#include <linux/grsecurity.h>
48557 #include <linux/proc_fs.h>
48558 #include <linux/ioport.h>
48559 #include <linux/uaccess.h>
fe2de317 48560@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
16454cff 48561 seq_putc(m, '\n');
57199397
MT
48562 }
48563
48564+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48565+static inline void task_pax(struct seq_file *m, struct task_struct *p)
48566+{
48567+ if (p->mm)
48568+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
48569+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
48570+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
48571+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
48572+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
48573+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
48574+ else
48575+ seq_printf(m, "PaX:\t-----\n");
48576+}
48577+#endif
48578+
48579 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48580 struct pid *pid, struct task_struct *task)
48581 {
fe2de317 48582@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
c52201e0
MT
48583 task_cpus_allowed(m, task);
48584 cpuset_task_status_allowed(m, task);
57199397
MT
48585 task_context_switch_counts(m, task);
48586+
48587+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48588+ task_pax(m, task);
48589+#endif
6892158b
MT
48590+
48591+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
48592+ task_grsec_rbac(m, task);
48593+#endif
57199397
MT
48594+
48595 return 0;
48596 }
48597
48598+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48599+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48600+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
48601+ _mm->pax_flags & MF_PAX_SEGMEXEC))
48602+#endif
48603+
48604 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48605 struct pid *pid, struct task_struct *task, int whole)
48606 {
4c928ab7 48607@@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
6e9df6a3 48608 char tcomm[sizeof(task->comm)];
66a7e928
MT
48609 unsigned long flags;
48610
4c928ab7
MT
48611+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48612+ if (current->exec_id != m->exec_id) {
48613+ gr_log_badprocpid("stat");
48614+ return 0;
48615+ }
48616+#endif
66a7e928
MT
48617+
48618 state = *get_task_state(task);
48619 vsize = eip = esp = 0;
5e856224 48620 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
4c928ab7 48621@@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
57199397
MT
48622 gtime = task->gtime;
48623 }
48624
48625+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48626+ if (PAX_RAND_FLAGS(mm)) {
48627+ eip = 0;
48628+ esp = 0;
48629+ wchan = 0;
48630+ }
48631+#endif
48632+#ifdef CONFIG_GRKERNSEC_HIDESYM
48633+ wchan = 0;
48634+ eip =0;
48635+ esp =0;
48636+#endif
48637+
48638 /* scale priority and nice values from timeslices to -20..20 */
48639 /* to make it look like a "normal" Unix priority/nice value */
48640 priority = task_prio(task);
4c928ab7 48641@@ -489,9 +540,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
57199397
MT
48642 vsize,
48643 mm ? get_mm_rss(mm) : 0,
48644 rsslim,
48645+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
ea610fa8
AF
48646+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
48647+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
57199397
MT
48648+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
48649+#else
ea610fa8
AF
48650 mm ? (permitted ? mm->start_code : 1) : 0,
48651 mm ? (permitted ? mm->end_code : 1) : 0,
57199397
MT
48652 (permitted && mm) ? mm->start_stack : 0,
48653+#endif
48654 esp,
48655 eip,
48656 /* The signal information here is obsolete.
5e856224 48657@@ -536,8 +593,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
4c928ab7
MT
48658 struct pid *pid, struct task_struct *task)
48659 {
48660 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
48661- struct mm_struct *mm = get_task_mm(task);
48662+ struct mm_struct *mm;
48663
48664+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48665+ if (current->exec_id != m->exec_id) {
48666+ gr_log_badprocpid("statm");
48667+ return 0;
48668+ }
48669+#endif
48670+ mm = get_task_mm(task);
48671 if (mm) {
48672 size = task_statm(mm, &shared, &text, &data, &resident);
48673 mmput(mm);
5e856224 48674@@ -547,3 +611,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
57199397
MT
48675
48676 return 0;
48677 }
48678+
48679+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48680+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
48681+{
71d190be
MT
48682+ u32 curr_ip = 0;
48683+ unsigned long flags;
48684+
48685+ if (lock_task_sighand(task, &flags)) {
48686+ curr_ip = task->signal->curr_ip;
48687+ unlock_task_sighand(task, &flags);
48688+ }
48689+
48690+ return sprintf(buffer, "%pI4\n", &curr_ip);
57199397
MT
48691+}
48692+#endif
fe2de317 48693diff --git a/fs/proc/base.c b/fs/proc/base.c
5e856224 48694index d4548dd..d101f84 100644
fe2de317
MT
48695--- a/fs/proc/base.c
48696+++ b/fs/proc/base.c
5e856224 48697@@ -109,6 +109,14 @@ struct pid_entry {
57199397
MT
48698 union proc_op op;
48699 };
48700
48701+struct getdents_callback {
48702+ struct linux_dirent __user * current_dir;
48703+ struct linux_dirent __user * previous;
48704+ struct file * file;
48705+ int count;
48706+ int error;
48707+};
57199397
MT
48708+
48709 #define NOD(NAME, MODE, IOP, FOP, OP) { \
48710 .name = (NAME), \
48711 .len = sizeof(NAME) - 1, \
5e856224 48712@@ -213,6 +221,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
57199397
MT
48713 if (!mm->arg_end)
48714 goto out_mm; /* Shh! No looking before we're done */
48715
48716+ if (gr_acl_handle_procpidmem(task))
48717+ goto out_mm;
48718+
48719 len = mm->arg_end - mm->arg_start;
48720
48721 if (len > PAGE_SIZE)
5e856224 48722@@ -240,12 +251,28 @@ out:
57199397
MT
48723 return res;
48724 }
48725
48726+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48727+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48728+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
48729+ _mm->pax_flags & MF_PAX_SEGMEXEC))
48730+#endif
48731+
48732 static int proc_pid_auxv(struct task_struct *task, char *buffer)
48733 {
66a7e928
MT
48734 struct mm_struct *mm = mm_for_maps(task);
48735 int res = PTR_ERR(mm);
48736 if (mm && !IS_ERR(mm)) {
57199397
MT
48737 unsigned int nwords = 0;
48738+
48739+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
6892158b
MT
48740+ /* allow if we're currently ptracing this task */
48741+ if (PAX_RAND_FLAGS(mm) &&
48742+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
57199397 48743+ mmput(mm);
15a11c5b 48744+ return 0;
57199397
MT
48745+ }
48746+#endif
48747+
48748 do {
48749 nwords += 2;
48750 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
5e856224 48751@@ -259,7 +286,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
6892158b
MT
48752 }
48753
48754
48755-#ifdef CONFIG_KALLSYMS
48756+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48757 /*
48758 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
48759 * Returns the resolved symbol. If that fails, simply return the address.
5e856224 48760@@ -298,7 +325,7 @@ static void unlock_trace(struct task_struct *task)
66a7e928 48761 mutex_unlock(&task->signal->cred_guard_mutex);
57199397 48762 }
57199397
MT
48763
48764-#ifdef CONFIG_STACKTRACE
48765+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48766
48767 #define MAX_STACK_TRACE_DEPTH 64
48768
5e856224 48769@@ -489,7 +516,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
57199397
MT
48770 return count;
48771 }
48772
48773-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48774+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48775 static int proc_pid_syscall(struct task_struct *task, char *buffer)
48776 {
48777 long nr;
5e856224 48778@@ -518,7 +545,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
16454cff
MT
48779 /************************************************************************/
48780
48781 /* permission checks */
48782-static int proc_fd_access_allowed(struct inode *inode)
48783+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
48784 {
48785 struct task_struct *task;
48786 int allowed = 0;
5e856224 48787@@ -528,7 +555,10 @@ static int proc_fd_access_allowed(struct inode *inode)
16454cff
MT
48788 */
48789 task = get_proc_task(inode);
48790 if (task) {
48791- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
48792+ if (log)
16454cff 48793+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
5e856224
MT
48794+ else
48795+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
16454cff
MT
48796 put_task_struct(task);
48797 }
48798 return allowed;
5e856224
MT
48799@@ -566,10 +596,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
48800 struct task_struct *task,
48801 int hide_pid_min)
48802 {
48803+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48804+ return false;
48805+
48806+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48807+ rcu_read_lock();
48808+ {
48809+ const struct cred *tmpcred = current_cred();
48810+ const struct cred *cred = __task_cred(task);
48811+
48812+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
48813+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48814+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
48815+#endif
48816+ ) {
48817+ rcu_read_unlock();
48818+ return true;
48819+ }
48820+ }
48821+ rcu_read_unlock();
48822+
48823+ if (!pid->hide_pid)
48824+ return false;
48825+#endif
48826+
48827 if (pid->hide_pid < hide_pid_min)
48828 return true;
48829 if (in_group_p(pid->pid_gid))
48830 return true;
48831+
48832 return ptrace_may_access(task, PTRACE_MODE_READ);
48833 }
48834
48835@@ -587,7 +642,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
48836 put_task_struct(task);
48837
48838 if (!has_perms) {
48839+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48840+ {
48841+#else
48842 if (pid->hide_pid == 2) {
48843+#endif
48844 /*
48845 * Let's make getdents(), stat(), and open()
48846 * consistent with each other. If a process
48847@@ -702,6 +761,10 @@ static int mem_open(struct inode* inode, struct file* file)
4c928ab7
MT
48848 file->f_mode |= FMODE_UNSIGNED_OFFSET;
48849 file->private_data = mm;
48850
48851+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48852+ file->f_version = current->exec_id;
48853+#endif
48854+
48855 return 0;
48856 }
48857
5e856224 48858@@ -713,6 +776,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
4c928ab7
MT
48859 ssize_t copied;
48860 char *page;
48861
48862+#ifdef CONFIG_GRKERNSEC
48863+ if (write)
48864+ return -EPERM;
48865+#endif
48866+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48867+ if (file->f_version != current->exec_id) {
48868+ gr_log_badprocpid("mem");
48869+ return 0;
48870+ }
48871+#endif
48872+
48873 if (!mm)
48874 return 0;
48875
5e856224 48876@@ -813,6 +887,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
57199397
MT
48877 if (!task)
48878 goto out_no_task;
48879
48880+ if (gr_acl_handle_procpidmem(task))
48881+ goto out;
48882+
66a7e928
MT
48883 ret = -ENOMEM;
48884 page = (char *)__get_free_page(GFP_TEMPORARY);
48885 if (!page)
5e856224 48886@@ -1434,7 +1511,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
16454cff
MT
48887 path_put(&nd->path);
48888
48889 /* Are we allowed to snoop on the tasks file descriptors? */
48890- if (!proc_fd_access_allowed(inode))
5e856224 48891+ if (!proc_fd_access_allowed(inode, 0))
16454cff
MT
48892 goto out;
48893
5e856224
MT
48894 error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
48895@@ -1473,8 +1550,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
16454cff
MT
48896 struct path path;
48897
48898 /* Are we allowed to snoop on the tasks file descriptors? */
48899- if (!proc_fd_access_allowed(inode))
48900- goto out;
48901+ /* logging this is needed for learning on chromium to work properly,
48902+ but we don't want to flood the logs from 'ps' which does a readlink
48903+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
48904+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
48905+ */
48906+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
48907+ if (!proc_fd_access_allowed(inode,0))
48908+ goto out;
48909+ } else {
48910+ if (!proc_fd_access_allowed(inode,1))
48911+ goto out;
48912+ }
48913
5e856224 48914 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
16454cff 48915 if (error)
5e856224 48916@@ -1539,7 +1626,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
57199397
MT
48917 rcu_read_lock();
48918 cred = __task_cred(task);
48919 inode->i_uid = cred->euid;
48920+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48921+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48922+#else
48923 inode->i_gid = cred->egid;
48924+#endif
48925 rcu_read_unlock();
48926 }
48927 security_task_to_inode(task, inode);
5e856224
MT
48928@@ -1575,10 +1666,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
48929 return -ENOENT;
48930 }
57199397
MT
48931 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48932+#ifdef CONFIG_GRKERNSEC_PROC_USER
48933+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48934+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48935+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48936+#endif
48937 task_dumpable(task)) {
5e856224 48938 cred = __task_cred(task);
57199397
MT
48939 stat->uid = cred->euid;
48940+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48941+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
48942+#else
48943 stat->gid = cred->egid;
48944+#endif
48945 }
48946 }
48947 rcu_read_unlock();
5e856224 48948@@ -1616,11 +1716,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
57199397
MT
48949
48950 if (task) {
48951 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48952+#ifdef CONFIG_GRKERNSEC_PROC_USER
48953+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48954+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48955+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48956+#endif
48957 task_dumpable(task)) {
48958 rcu_read_lock();
48959 cred = __task_cred(task);
48960 inode->i_uid = cred->euid;
48961+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48962+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48963+#else
48964 inode->i_gid = cred->egid;
48965+#endif
48966 rcu_read_unlock();
48967 } else {
48968 inode->i_uid = 0;
5e856224 48969@@ -1738,7 +1847,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
57199397
MT
48970 int fd = proc_fd(inode);
48971
48972 if (task) {
48973- files = get_files_struct(task);
48974+ if (!gr_acl_handle_procpidmem(task))
48975+ files = get_files_struct(task);
48976 put_task_struct(task);
48977 }
48978 if (files) {
5e856224 48979@@ -2355,11 +2465,21 @@ static const struct file_operations proc_map_files_operations = {
16454cff 48980 */
6e9df6a3 48981 static int proc_fd_permission(struct inode *inode, int mask)
57199397 48982 {
57199397 48983+ struct task_struct *task;
6e9df6a3 48984 int rv = generic_permission(inode, mask);
57199397
MT
48985- if (rv == 0)
48986- return 0;
48987+
48988 if (task_pid(current) == proc_pid(inode))
48989 rv = 0;
48990+
48991+ task = get_proc_task(inode);
48992+ if (task == NULL)
48993+ return rv;
48994+
48995+ if (gr_acl_handle_procpidmem(task))
48996+ rv = -EACCES;
48997+
48998+ put_task_struct(task);
48999+
49000 return rv;
49001 }
49002
5e856224 49003@@ -2469,6 +2589,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
57199397
MT
49004 if (!task)
49005 goto out_no_task;
49006
49007+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
49008+ goto out;
49009+
49010 /*
49011 * Yes, it does not scale. And it should not. Don't add
49012 * new entries into /proc/<tgid>/ without very good reasons.
5e856224 49013@@ -2513,6 +2636,9 @@ static int proc_pident_readdir(struct file *filp,
57199397
MT
49014 if (!task)
49015 goto out_no_task;
49016
49017+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
49018+ goto out;
49019+
49020 ret = 0;
49021 i = filp->f_pos;
49022 switch (i) {
5e856224 49023@@ -2783,7 +2909,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
57199397
MT
49024 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
49025 void *cookie)
49026 {
49027- char *s = nd_get_link(nd);
49028+ const char *s = nd_get_link(nd);
49029 if (!IS_ERR(s))
49030 __putname(s);
49031 }
5e856224 49032@@ -2984,7 +3110,7 @@ static const struct pid_entry tgid_base_stuff[] = {
16454cff 49033 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
57199397
MT
49034 #endif
49035 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
49036-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
49037+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66a7e928 49038 INF("syscall", S_IRUGO, proc_pid_syscall),
57199397
MT
49039 #endif
49040 INF("cmdline", S_IRUGO, proc_pid_cmdline),
5e856224 49041@@ -3009,10 +3135,10 @@ static const struct pid_entry tgid_base_stuff[] = {
6892158b
MT
49042 #ifdef CONFIG_SECURITY
49043 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
49044 #endif
49045-#ifdef CONFIG_KALLSYMS
49046+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57199397
MT
49047 INF("wchan", S_IRUGO, proc_pid_wchan),
49048 #endif
49049-#ifdef CONFIG_STACKTRACE
49050+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66a7e928 49051 ONE("stack", S_IRUGO, proc_pid_stack),
57199397
MT
49052 #endif
49053 #ifdef CONFIG_SCHEDSTATS
5e856224 49054@@ -3046,6 +3172,9 @@ static const struct pid_entry tgid_base_stuff[] = {
15a11c5b
MT
49055 #ifdef CONFIG_HARDWALL
49056 INF("hardwall", S_IRUGO, proc_pid_hardwall),
57199397
MT
49057 #endif
49058+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
49059+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
49060+#endif
49061 };
49062
49063 static int proc_tgid_base_readdir(struct file * filp,
5e856224 49064@@ -3172,7 +3301,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
57199397
MT
49065 if (!inode)
49066 goto out;
49067
49068+#ifdef CONFIG_GRKERNSEC_PROC_USER
49069+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
49070+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49071+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
49072+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
49073+#else
49074 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
49075+#endif
49076 inode->i_op = &proc_tgid_base_inode_operations;
49077 inode->i_fop = &proc_tgid_base_operations;
49078 inode->i_flags|=S_IMMUTABLE;
5e856224 49079@@ -3214,7 +3350,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
57199397
MT
49080 if (!task)
49081 goto out;
49082
49083+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
49084+ goto out_put_task;
49085+
49086 result = proc_pid_instantiate(dir, dentry, task, NULL);
49087+out_put_task:
49088 put_task_struct(task);
49089 out:
49090 return result;
5e856224
MT
49091@@ -3277,6 +3417,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
49092 static int fake_filldir(void *buf, const char *name, int namelen,
49093 loff_t offset, u64 ino, unsigned d_type)
57199397 49094 {
5e856224
MT
49095+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
49096+ __buf->error = -EINVAL;
49097 return 0;
49098 }
57199397 49099
5e856224 49100@@ -3343,7 +3485,7 @@ static const struct pid_entry tid_base_stuff[] = {
57199397
MT
49101 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
49102 #endif
49103 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
49104-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
49105+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66a7e928 49106 INF("syscall", S_IRUGO, proc_pid_syscall),
57199397
MT
49107 #endif
49108 INF("cmdline", S_IRUGO, proc_pid_cmdline),
5e856224 49109@@ -3367,10 +3509,10 @@ static const struct pid_entry tid_base_stuff[] = {
6892158b
MT
49110 #ifdef CONFIG_SECURITY
49111 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
49112 #endif
49113-#ifdef CONFIG_KALLSYMS
49114+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57199397
MT
49115 INF("wchan", S_IRUGO, proc_pid_wchan),
49116 #endif
49117-#ifdef CONFIG_STACKTRACE
49118+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66a7e928 49119 ONE("stack", S_IRUGO, proc_pid_stack),
57199397
MT
49120 #endif
49121 #ifdef CONFIG_SCHEDSTATS
fe2de317
MT
49122diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
49123index 82676e3..5f8518a 100644
49124--- a/fs/proc/cmdline.c
49125+++ b/fs/proc/cmdline.c
49126@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
57199397
MT
49127
49128 static int __init proc_cmdline_init(void)
49129 {
49130+#ifdef CONFIG_GRKERNSEC_PROC_ADD
49131+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
49132+#else
49133 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
49134+#endif
49135 return 0;
49136 }
49137 module_init(proc_cmdline_init);
fe2de317
MT
49138diff --git a/fs/proc/devices.c b/fs/proc/devices.c
49139index b143471..bb105e5 100644
49140--- a/fs/proc/devices.c
49141+++ b/fs/proc/devices.c
49142@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
57199397
MT
49143
49144 static int __init proc_devices_init(void)
49145 {
49146+#ifdef CONFIG_GRKERNSEC_PROC_ADD
49147+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
49148+#else
49149 proc_create("devices", 0, NULL, &proc_devinfo_operations);
49150+#endif
49151 return 0;
49152 }
49153 module_init(proc_devices_init);
fe2de317 49154diff --git a/fs/proc/inode.c b/fs/proc/inode.c
5e856224 49155index 84fd323..f698a32 100644
fe2de317
MT
49156--- a/fs/proc/inode.c
49157+++ b/fs/proc/inode.c
5e856224
MT
49158@@ -21,12 +21,18 @@
49159 #include <linux/seq_file.h>
6e9df6a3 49160 #include <linux/slab.h>
5e856224 49161 #include <linux/mount.h>
6e9df6a3
MT
49162+#include <linux/grsecurity.h>
49163
49164 #include <asm/system.h>
49165 #include <asm/uaccess.h>
49166
49167 #include "internal.h"
49168
49169+#ifdef CONFIG_PROC_SYSCTL
49170+extern const struct inode_operations proc_sys_inode_operations;
49171+extern const struct inode_operations proc_sys_dir_operations;
49172+#endif
49173+
49174 static void proc_evict_inode(struct inode *inode)
49175 {
49176 struct proc_dir_entry *de;
5e856224 49177@@ -52,6 +58,13 @@ static void proc_evict_inode(struct inode *inode)
6e9df6a3
MT
49178 ns_ops = PROC_I(inode)->ns_ops;
49179 if (ns_ops && ns_ops->put)
49180 ns_ops->put(PROC_I(inode)->ns);
49181+
49182+#ifdef CONFIG_PROC_SYSCTL
49183+ if (inode->i_op == &proc_sys_inode_operations ||
49184+ inode->i_op == &proc_sys_dir_operations)
49185+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
49186+#endif
49187+
49188 }
49189
49190 static struct kmem_cache * proc_inode_cachep;
5e856224 49191@@ -457,7 +470,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
57199397
MT
49192 if (de->mode) {
49193 inode->i_mode = de->mode;
49194 inode->i_uid = de->uid;
49195+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
49196+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
49197+#else
49198 inode->i_gid = de->gid;
49199+#endif
49200 }
49201 if (de->size)
49202 inode->i_size = de->size;
fe2de317 49203diff --git a/fs/proc/internal.h b/fs/proc/internal.h
5e856224 49204index 2925775..4f08fae 100644
fe2de317
MT
49205--- a/fs/proc/internal.h
49206+++ b/fs/proc/internal.h
49207@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
57199397
MT
49208 struct pid *pid, struct task_struct *task);
49209 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
49210 struct pid *pid, struct task_struct *task);
49211+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
49212+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
49213+#endif
49214 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
49215
49216 extern const struct file_operations proc_maps_operations;
fe2de317 49217diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
4c928ab7 49218index d245cb2..f4e8498 100644
fe2de317
MT
49219--- a/fs/proc/kcore.c
49220+++ b/fs/proc/kcore.c
4c928ab7 49221@@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
57199397
MT
49222 * the addresses in the elf_phdr on our list.
49223 */
49224 start = kc_offset_to_vaddr(*fpos - elf_buflen);
49225- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
49226+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
49227+ if (tsz > buflen)
49228 tsz = buflen;
49229-
58c5fc13 49230+
57199397
MT
49231 while (buflen) {
49232 struct kcore_list *m;
58c5fc13 49233
4c928ab7 49234@@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
57199397 49235 kfree(elf_buf);
58c5fc13 49236 } else {
57199397
MT
49237 if (kern_addr_valid(start)) {
49238- unsigned long n;
49239+ char *elf_buf;
bc901d79 49240+ mm_segment_t oldfs;
57199397
MT
49241
49242- n = copy_to_user(buffer, (char *)start, tsz);
49243- /*
49244- * We cannot distingush between fault on source
49245- * and fault on destination. When this happens
49246- * we clear too and hope it will trigger the
49247- * EFAULT again.
49248- */
49249- if (n) {
49250- if (clear_user(buffer + tsz - n,
49251- n))
49252+ elf_buf = kmalloc(tsz, GFP_KERNEL);
49253+ if (!elf_buf)
49254+ return -ENOMEM;
bc901d79
MT
49255+ oldfs = get_fs();
49256+ set_fs(KERNEL_DS);
57199397 49257+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
bc901d79 49258+ set_fs(oldfs);
57199397
MT
49259+ if (copy_to_user(buffer, elf_buf, tsz)) {
49260+ kfree(elf_buf);
49261 return -EFAULT;
49262+ }
49263 }
bc901d79 49264+ set_fs(oldfs);
57199397
MT
49265+ kfree(elf_buf);
49266 } else {
49267 if (clear_user(buffer, tsz))
49268 return -EFAULT;
4c928ab7 49269@@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
58c5fc13 49270
ae4e228f 49271 static int open_kcore(struct inode *inode, struct file *filp)
58c5fc13 49272 {
ae4e228f
MT
49273+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
49274+ return -EPERM;
58c5fc13 49275+#endif
ae4e228f
MT
49276 if (!capable(CAP_SYS_RAWIO))
49277 return -EPERM;
49278 if (kcore_need_update)
fe2de317 49279diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
4c928ab7 49280index 80e4645..53e5fcf 100644
fe2de317
MT
49281--- a/fs/proc/meminfo.c
49282+++ b/fs/proc/meminfo.c
4c928ab7 49283@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
ae4e228f
MT
49284 vmi.used >> 10,
49285 vmi.largest_chunk >> 10
49286 #ifdef CONFIG_MEMORY_FAILURE
49287- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
49288+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
49289 #endif
16454cff
MT
49290 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
49291 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
fe2de317
MT
49292diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
49293index b1822dd..df622cb 100644
49294--- a/fs/proc/nommu.c
49295+++ b/fs/proc/nommu.c
49296@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
58c5fc13
MT
49297 if (len < 1)
49298 len = 1;
49299 seq_printf(m, "%*c", len, ' ');
49300- seq_path(m, &file->f_path, "");
49301+ seq_path(m, &file->f_path, "\n\\");
49302 }
49303
49304 seq_putc(m, '\n');
fe2de317 49305diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
5e856224 49306index 06e1cc1..177cd98 100644
fe2de317
MT
49307--- a/fs/proc/proc_net.c
49308+++ b/fs/proc/proc_net.c
49309@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
58c5fc13
MT
49310 struct task_struct *task;
49311 struct nsproxy *ns;
49312 struct net *net = NULL;
49313+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49314+ const struct cred *cred = current_cred();
49315+#endif
49316+
49317+#ifdef CONFIG_GRKERNSEC_PROC_USER
49318+ if (cred->fsuid)
49319+ return net;
49320+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49321+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
49322+ return net;
49323+#endif
49324
49325 rcu_read_lock();
49326 task = pid_task(proc_pid(dir), PIDTYPE_PID);
fe2de317 49327diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
5e856224 49328index 53c3bce..10ad159 100644
fe2de317
MT
49329--- a/fs/proc/proc_sysctl.c
49330+++ b/fs/proc/proc_sysctl.c
4c928ab7 49331@@ -9,11 +9,13 @@
16454cff 49332 #include <linux/namei.h>
58c5fc13
MT
49333 #include "internal.h"
49334
49335+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
49336+
49337 static const struct dentry_operations proc_sys_dentry_operations;
49338 static const struct file_operations proc_sys_file_operations;
6e9df6a3
MT
49339-static const struct inode_operations proc_sys_inode_operations;
49340+const struct inode_operations proc_sys_inode_operations;
49341 static const struct file_operations proc_sys_dir_file_operations;
49342-static const struct inode_operations proc_sys_dir_operations;
49343+const struct inode_operations proc_sys_dir_operations;
49344
4c928ab7
MT
49345 void proc_sys_poll_notify(struct ctl_table_poll *poll)
49346 {
49347@@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
6e9df6a3
MT
49348
49349 err = NULL;
49350 d_set_d_op(dentry, &proc_sys_dentry_operations);
49351+
49352+ gr_handle_proc_create(dentry, inode);
49353+
49354 d_add(dentry, inode);
58c5fc13
MT
49355
49356+ if (gr_handle_sysctl(p, MAY_EXEC))
6e9df6a3
MT
49357+ err = ERR_PTR(-ENOENT);
49358+
49359 out:
49360 sysctl_head_finish(head);
49361 return err;
4c928ab7
MT
49362@@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
49363 if (!table->proc_handler)
49364 goto out;
49365
49366+#ifdef CONFIG_GRKERNSEC
49367+ error = -EPERM;
49368+ if (write && !capable(CAP_SYS_ADMIN))
49369+ goto out;
49370+#endif
49371+
49372 /* careful: calling conventions are nasty here */
49373 res = count;
49374 error = table->proc_handler(table, write, buf, &res, ppos);
5e856224 49375@@ -260,6 +274,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
6e9df6a3
MT
49376 return -ENOMEM;
49377 } else {
49378 d_set_d_op(child, &proc_sys_dentry_operations);
49379+
49380+ gr_handle_proc_create(child, inode);
58c5fc13 49381+
6e9df6a3
MT
49382 d_add(child, inode);
49383 }
49384 } else {
5e856224 49385@@ -288,6 +305,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
58c5fc13
MT
49386 if (*pos < file->f_pos)
49387 continue;
49388
49389+ if (gr_handle_sysctl(table, 0))
49390+ continue;
49391+
49392 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
49393 if (res)
49394 return res;
5e856224 49395@@ -413,6 +433,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
58c5fc13
MT
49396 if (IS_ERR(head))
49397 return PTR_ERR(head);
49398
49399+ if (table && gr_handle_sysctl(table, MAY_EXEC))
49400+ return -ENOENT;
49401+
49402 generic_fillattr(inode, stat);
49403 if (table)
49404 stat->mode = (stat->mode & S_IFMT) | table->mode;
5e856224 49405@@ -435,13 +458,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
6e9df6a3
MT
49406 .llseek = generic_file_llseek,
49407 };
49408
49409-static const struct inode_operations proc_sys_inode_operations = {
49410+const struct inode_operations proc_sys_inode_operations = {
49411 .permission = proc_sys_permission,
49412 .setattr = proc_sys_setattr,
49413 .getattr = proc_sys_getattr,
49414 };
49415
49416-static const struct inode_operations proc_sys_dir_operations = {
49417+const struct inode_operations proc_sys_dir_operations = {
49418 .lookup = proc_sys_lookup,
49419 .permission = proc_sys_permission,
49420 .setattr = proc_sys_setattr,
fe2de317 49421diff --git a/fs/proc/root.c b/fs/proc/root.c
5e856224 49422index 46a15d8..335631a 100644
fe2de317
MT
49423--- a/fs/proc/root.c
49424+++ b/fs/proc/root.c
5e856224 49425@@ -187,7 +187,15 @@ void __init proc_root_init(void)
58c5fc13
MT
49426 #ifdef CONFIG_PROC_DEVICETREE
49427 proc_device_tree_init();
49428 #endif
49429+#ifdef CONFIG_GRKERNSEC_PROC_ADD
49430+#ifdef CONFIG_GRKERNSEC_PROC_USER
49431+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
49432+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49433+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
49434+#endif
49435+#else
49436 proc_mkdir("bus", NULL);
49437+#endif
49438 proc_sys_init();
49439 }
49440
fe2de317 49441diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
5e856224 49442index 3efa725..23c925b 100644
fe2de317
MT
49443--- a/fs/proc/task_mmu.c
49444+++ b/fs/proc/task_mmu.c
4c928ab7
MT
49445@@ -11,6 +11,7 @@
49446 #include <linux/rmap.h>
49447 #include <linux/swap.h>
49448 #include <linux/swapops.h>
49449+#include <linux/grsecurity.h>
49450
49451 #include <asm/elf.h>
49452 #include <asm/uaccess.h>
49453@@ -52,8 +53,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
58c5fc13
MT
49454 "VmExe:\t%8lu kB\n"
49455 "VmLib:\t%8lu kB\n"
df50ba0c
MT
49456 "VmPTE:\t%8lu kB\n"
49457- "VmSwap:\t%8lu kB\n",
58c5fc13 49458- hiwater_vm << (PAGE_SHIFT-10),
df50ba0c 49459+ "VmSwap:\t%8lu kB\n"
58c5fc13
MT
49460+
49461+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
49462+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
49463+#endif
49464+
49465+ ,hiwater_vm << (PAGE_SHIFT-10),
49466 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
49467 mm->locked_vm << (PAGE_SHIFT-10),
4c928ab7
MT
49468 mm->pinned_vm << (PAGE_SHIFT-10),
49469@@ -62,7 +68,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
58c5fc13
MT
49470 data << (PAGE_SHIFT-10),
49471 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
df50ba0c
MT
49472 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
49473- swap << (PAGE_SHIFT-10));
49474+ swap << (PAGE_SHIFT-10)
58c5fc13
MT
49475+
49476+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
49477+ , mm->context.user_cs_base, mm->context.user_cs_limit
49478+#endif
49479+
49480+ );
49481 }
49482
49483 unsigned long task_vsize(struct mm_struct *mm)
4c928ab7 49484@@ -209,6 +221,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
58c5fc13
MT
49485 return ret;
49486 }
49487
49488+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49489+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
49490+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
49491+ _mm->pax_flags & MF_PAX_SEGMEXEC))
49492+#endif
49493+
49494 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
49495 {
49496 struct mm_struct *mm = vma->vm_mm;
4c928ab7 49497@@ -227,13 +245,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
57199397 49498 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
58c5fc13
MT
49499 }
49500
57199397 49501- /* We don't show the stack guard page in /proc/maps */
58c5fc13 49502+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66a7e928
MT
49503+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
49504+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
58c5fc13 49505+#else
66a7e928
MT
49506 start = vma->vm_start;
49507- if (stack_guard_page_start(vma, start))
49508- start += PAGE_SIZE;
49509 end = vma->vm_end;
49510- if (stack_guard_page_end(vma, end))
49511- end -= PAGE_SIZE;
58c5fc13 49512+#endif
66a7e928
MT
49513
49514 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
49515 start,
4c928ab7 49516@@ -242,7 +260,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
58c5fc13
MT
49517 flags & VM_WRITE ? 'w' : '-',
49518 flags & VM_EXEC ? 'x' : '-',
49519 flags & VM_MAYSHARE ? 's' : 'p',
49520+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49521+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
49522+#else
49523 pgoff,
49524+#endif
49525 MAJOR(dev), MINOR(dev), ino, &len);
49526
49527 /*
4c928ab7 49528@@ -251,7 +273,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
58c5fc13
MT
49529 */
49530 if (file) {
49531 pad_len_spaces(m, len);
49532- seq_path(m, &file->f_path, "\n");
49533+ seq_path(m, &file->f_path, "\n\\");
49534 } else {
49535 const char *name = arch_vma_name(vma);
49536 if (!name) {
4c928ab7 49537@@ -259,8 +281,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
66a7e928
MT
49538 if (vma->vm_start <= mm->brk &&
49539 vma->vm_end >= mm->start_brk) {
58c5fc13
MT
49540 name = "[heap]";
49541- } else if (vma->vm_start <= mm->start_stack &&
49542- vma->vm_end >= mm->start_stack) {
49543+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
49544+ (vma->vm_start <= mm->start_stack &&
49545+ vma->vm_end >= mm->start_stack)) {
49546 name = "[stack]";
df50ba0c
MT
49547 }
49548 } else {
4c928ab7
MT
49549@@ -281,6 +304,13 @@ static int show_map(struct seq_file *m, void *v)
49550 struct proc_maps_private *priv = m->private;
49551 struct task_struct *task = priv->task;
49552
49553+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49554+ if (current->exec_id != m->exec_id) {
49555+ gr_log_badprocpid("maps");
49556+ return 0;
49557+ }
49558+#endif
49559+
49560 show_map_vma(m, vma);
49561
49562 if (m->count < m->size) /* vma is copied successfully */
5e856224 49563@@ -437,12 +467,23 @@ static int show_smap(struct seq_file *m, void *v)
4c928ab7 49564 .private = &mss,
58c5fc13
MT
49565 };
49566
4c928ab7
MT
49567+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49568+ if (current->exec_id != m->exec_id) {
49569+ gr_log_badprocpid("smaps");
49570+ return 0;
49571+ }
49572+#endif
58c5fc13
MT
49573 memset(&mss, 0, sizeof mss);
49574- mss.vma = vma;
df50ba0c 49575- /* mmap_sem is held in m_start */
58c5fc13
MT
49576- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
49577- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
df50ba0c 49578-
58c5fc13
MT
49579+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49580+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
49581+#endif
49582+ mss.vma = vma;
df50ba0c 49583+ /* mmap_sem is held in m_start */
58c5fc13
MT
49584+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
49585+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
49586+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49587+ }
49588+#endif
58c5fc13
MT
49589 show_map_vma(m, vma);
49590
df50ba0c 49591 seq_printf(m,
5e856224 49592@@ -460,7 +501,11 @@ static int show_smap(struct seq_file *m, void *v)
58c5fc13 49593 "KernelPageSize: %8lu kB\n"
16454cff
MT
49594 "MMUPageSize: %8lu kB\n"
49595 "Locked: %8lu kB\n",
58c5fc13
MT
49596+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49597+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
49598+#else
49599 (vma->vm_end - vma->vm_start) >> 10,
49600+#endif
49601 mss.resident >> 10,
49602 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
49603 mss.shared_clean >> 10,
5e856224 49604@@ -1024,6 +1069,13 @@ static int show_numa_map(struct seq_file *m, void *v)
4c928ab7
MT
49605 int n;
49606 char buffer[50];
49607
49608+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49609+ if (current->exec_id != m->exec_id) {
49610+ gr_log_badprocpid("numa_maps");
49611+ return 0;
49612+ }
49613+#endif
49614+
49615 if (!mm)
49616 return 0;
49617
5e856224 49618@@ -1041,11 +1093,15 @@ static int show_numa_map(struct seq_file *m, void *v)
4c928ab7
MT
49619 mpol_to_str(buffer, sizeof(buffer), pol, 0);
49620 mpol_cond_put(pol);
49621
49622+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49623+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
49624+#else
49625 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
49626+#endif
15a11c5b
MT
49627
49628 if (file) {
49629 seq_printf(m, " file=");
49630- seq_path(m, &file->f_path, "\n\t= ");
49631+ seq_path(m, &file->f_path, "\n\t\\= ");
49632 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
49633 seq_printf(m, " heap");
49634 } else if (vma->vm_start <= mm->start_stack &&
fe2de317
MT
49635diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
49636index 980de54..2a4db5f 100644
49637--- a/fs/proc/task_nommu.c
49638+++ b/fs/proc/task_nommu.c
49639@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
58c5fc13
MT
49640 else
49641 bytes += kobjsize(mm);
49642
49643- if (current->fs && current->fs->users > 1)
49644+ if (current->fs && atomic_read(&current->fs->users) > 1)
49645 sbytes += kobjsize(current->fs);
49646 else
49647 bytes += kobjsize(current->fs);
fe2de317 49648@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
57199397
MT
49649
49650 if (file) {
49651 pad_len_spaces(m, len);
58c5fc13
MT
49652- seq_path(m, &file->f_path, "");
49653+ seq_path(m, &file->f_path, "\n\\");
57199397
MT
49654 } else if (mm) {
49655 if (vma->vm_start <= mm->start_stack &&
49656 vma->vm_end >= mm->start_stack) {
fe2de317
MT
49657diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
49658index d67908b..d13f6a6 100644
49659--- a/fs/quota/netlink.c
49660+++ b/fs/quota/netlink.c
49661@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
8308f9c9
MT
49662 void quota_send_warning(short type, unsigned int id, dev_t dev,
49663 const char warntype)
49664 {
49665- static atomic_t seq;
49666+ static atomic_unchecked_t seq;
49667 struct sk_buff *skb;
49668 void *msg_head;
49669 int ret;
fe2de317 49670@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
8308f9c9
MT
49671 "VFS: Not enough memory to send quota warning.\n");
49672 return;
49673 }
49674- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
49675+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
49676 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
49677 if (!msg_head) {
49678 printk(KERN_ERR
fe2de317
MT
49679diff --git a/fs/readdir.c b/fs/readdir.c
49680index 356f715..c918d38 100644
49681--- a/fs/readdir.c
49682+++ b/fs/readdir.c
6892158b 49683@@ -17,6 +17,7 @@
58c5fc13
MT
49684 #include <linux/security.h>
49685 #include <linux/syscalls.h>
49686 #include <linux/unistd.h>
49687+#include <linux/namei.h>
49688
49689 #include <asm/uaccess.h>
49690
49691@@ -67,6 +68,7 @@ struct old_linux_dirent {
49692
49693 struct readdir_callback {
49694 struct old_linux_dirent __user * dirent;
49695+ struct file * file;
49696 int result;
49697 };
49698
fe2de317 49699@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
58c5fc13
MT
49700 buf->result = -EOVERFLOW;
49701 return -EOVERFLOW;
49702 }
49703+
49704+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49705+ return 0;
49706+
49707 buf->result++;
49708 dirent = buf->dirent;
49709 if (!access_ok(VERIFY_WRITE, dirent,
fe2de317 49710@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
58c5fc13
MT
49711
49712 buf.result = 0;
49713 buf.dirent = dirent;
49714+ buf.file = file;
49715
49716 error = vfs_readdir(file, fillonedir, &buf);
49717 if (buf.result)
49718@@ -142,6 +149,7 @@ struct linux_dirent {
49719 struct getdents_callback {
49720 struct linux_dirent __user * current_dir;
49721 struct linux_dirent __user * previous;
49722+ struct file * file;
49723 int count;
49724 int error;
49725 };
fe2de317 49726@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
58c5fc13
MT
49727 buf->error = -EOVERFLOW;
49728 return -EOVERFLOW;
49729 }
49730+
49731+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49732+ return 0;
49733+
49734 dirent = buf->previous;
49735 if (dirent) {
49736 if (__put_user(offset, &dirent->d_off))
fe2de317 49737@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
58c5fc13
MT
49738 buf.previous = NULL;
49739 buf.count = count;
49740 buf.error = 0;
49741+ buf.file = file;
49742
49743 error = vfs_readdir(file, filldir, &buf);
49744 if (error >= 0)
6892158b 49745@@ -229,6 +242,7 @@ out:
58c5fc13
MT
49746 struct getdents_callback64 {
49747 struct linux_dirent64 __user * current_dir;
49748 struct linux_dirent64 __user * previous;
49749+ struct file *file;
49750 int count;
49751 int error;
49752 };
fe2de317 49753@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
58c5fc13
MT
49754 buf->error = -EINVAL; /* only used if we fail.. */
49755 if (reclen > buf->count)
49756 return -EINVAL;
49757+
49758+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49759+ return 0;
49760+
49761 dirent = buf->previous;
49762 if (dirent) {
49763 if (__put_user(offset, &dirent->d_off))
fe2de317 49764@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
58c5fc13
MT
49765
49766 buf.current_dir = dirent;
49767 buf.previous = NULL;
49768+ buf.file = file;
49769 buf.count = count;
49770 buf.error = 0;
49771
fe2de317 49772@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
6e9df6a3
MT
49773 error = buf.error;
49774 lastdirent = buf.previous;
49775 if (lastdirent) {
49776- typeof(lastdirent->d_off) d_off = file->f_pos;
49777+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
49778 if (__put_user(d_off, &lastdirent->d_off))
49779 error = -EFAULT;
49780 else
fe2de317
MT
49781diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
49782index 60c0804..d814f98 100644
49783--- a/fs/reiserfs/do_balan.c
49784+++ b/fs/reiserfs/do_balan.c
49785@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
58c5fc13
MT
49786 return;
49787 }
49788
49789- atomic_inc(&(fs_generation(tb->tb_sb)));
49790+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
49791 do_balance_starts(tb);
49792
49793 /* balance leaf returns 0 except if combining L R and S into
fe2de317 49794diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
4c928ab7 49795index 7a99811..a7c96c4 100644
fe2de317
MT
49796--- a/fs/reiserfs/procfs.c
49797+++ b/fs/reiserfs/procfs.c
49798@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
58c5fc13
MT
49799 "SMALL_TAILS " : "NO_TAILS ",
49800 replay_only(sb) ? "REPLAY_ONLY " : "",
49801 convert_reiserfs(sb) ? "CONV " : "",
49802- atomic_read(&r->s_generation_counter),
49803+ atomic_read_unchecked(&r->s_generation_counter),
49804 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
49805 SF(s_do_balance), SF(s_unneeded_left_neighbor),
49806 SF(s_good_search_by_key_reada), SF(s_bmaps),
fe2de317 49807diff --git a/fs/select.c b/fs/select.c
5e856224 49808index e782258..3b4b44c 100644
fe2de317
MT
49809--- a/fs/select.c
49810+++ b/fs/select.c
ae4e228f 49811@@ -20,6 +20,7 @@
58c5fc13
MT
49812 #include <linux/module.h>
49813 #include <linux/slab.h>
49814 #include <linux/poll.h>
49815+#include <linux/security.h>
49816 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
49817 #include <linux/file.h>
49818 #include <linux/fdtable.h>
4c928ab7 49819@@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
58c5fc13
MT
49820 struct poll_list *walk = head;
49821 unsigned long todo = nfds;
49822
49823+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
df50ba0c 49824 if (nfds > rlimit(RLIMIT_NOFILE))
58c5fc13
MT
49825 return -EINVAL;
49826
fe2de317 49827diff --git a/fs/seq_file.c b/fs/seq_file.c
5e856224 49828index 4023d6b..ab46c6a 100644
fe2de317
MT
49829--- a/fs/seq_file.c
49830+++ b/fs/seq_file.c
4c928ab7
MT
49831@@ -9,6 +9,7 @@
49832 #include <linux/module.h>
49833 #include <linux/seq_file.h>
49834 #include <linux/slab.h>
49835+#include <linux/sched.h>
49836
49837 #include <asm/uaccess.h>
49838 #include <asm/page.h>
49839@@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
49840 memset(p, 0, sizeof(*p));
49841 mutex_init(&p->lock);
49842 p->op = op;
49843+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49844+ p->exec_id = current->exec_id;
49845+#endif
49846
49847 /*
49848 * Wrappers around seq_open(e.g. swaps_open) need to be
fe2de317 49849@@ -549,7 +553,7 @@ static void single_stop(struct seq_file *p, void *v)
15a11c5b
MT
49850 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
49851 void *data)
49852 {
49853- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
49854+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
49855 int res = -ENOMEM;
49856
49857 if (op) {
fe2de317 49858diff --git a/fs/splice.c b/fs/splice.c
5e856224 49859index 96d7b28..fd465ac 100644
fe2de317
MT
49860--- a/fs/splice.c
49861+++ b/fs/splice.c
49862@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
ae4e228f
MT
49863 pipe_lock(pipe);
49864
49865 for (;;) {
49866- if (!pipe->readers) {
49867+ if (!atomic_read(&pipe->readers)) {
49868 send_sig(SIGPIPE, current, 0);
49869 if (!ret)
49870 ret = -EPIPE;
fe2de317 49871@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
ae4e228f
MT
49872 do_wakeup = 0;
49873 }
49874
49875- pipe->waiting_writers++;
49876+ atomic_inc(&pipe->waiting_writers);
49877 pipe_wait(pipe);
49878- pipe->waiting_writers--;
49879+ atomic_dec(&pipe->waiting_writers);
49880 }
49881
49882 pipe_unlock(pipe);
4c928ab7 49883@@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
ae4e228f
MT
49884 old_fs = get_fs();
49885 set_fs(get_ds());
49886 /* The cast to a user pointer is valid due to the set_fs() */
49887- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
6e9df6a3 49888+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
ae4e228f
MT
49889 set_fs(old_fs);
49890
49891 return res;
4c928ab7 49892@@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
ae4e228f
MT
49893 old_fs = get_fs();
49894 set_fs(get_ds());
49895 /* The cast to a user pointer is valid due to the set_fs() */
49896- res = vfs_write(file, (const char __user *)buf, count, &pos);
6e9df6a3 49897+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
ae4e228f
MT
49898 set_fs(old_fs);
49899
49900 return res;
4c928ab7 49901@@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
ae4e228f
MT
49902 goto err;
49903
49904 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
49905- vec[i].iov_base = (void __user *) page_address(page);
6e9df6a3 49906+ vec[i].iov_base = (void __force_user *) page_address(page);
ae4e228f 49907 vec[i].iov_len = this_len;
57199397 49908 spd.pages[i] = page;
ae4e228f 49909 spd.nr_pages++;
5e856224 49910@@ -848,10 +848,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
ae4e228f
MT
49911 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
49912 {
49913 while (!pipe->nrbufs) {
49914- if (!pipe->writers)
49915+ if (!atomic_read(&pipe->writers))
49916 return 0;
49917
49918- if (!pipe->waiting_writers && sd->num_spliced)
49919+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
49920 return 0;
49921
49922 if (sd->flags & SPLICE_F_NONBLOCK)
5e856224 49923@@ -1184,7 +1184,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
ae4e228f
MT
49924 * out of the pipe right after the splice_to_pipe(). So set
49925 * PIPE_READERS appropriately.
49926 */
49927- pipe->readers = 1;
49928+ atomic_set(&pipe->readers, 1);
49929
49930 current->splice_pipe = pipe;
49931 }
5e856224 49932@@ -1736,9 +1736,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
ae4e228f
MT
49933 ret = -ERESTARTSYS;
49934 break;
49935 }
49936- if (!pipe->writers)
49937+ if (!atomic_read(&pipe->writers))
49938 break;
49939- if (!pipe->waiting_writers) {
49940+ if (!atomic_read(&pipe->waiting_writers)) {
49941 if (flags & SPLICE_F_NONBLOCK) {
49942 ret = -EAGAIN;
49943 break;
5e856224 49944@@ -1770,7 +1770,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
ae4e228f
MT
49945 pipe_lock(pipe);
49946
57199397 49947 while (pipe->nrbufs >= pipe->buffers) {
ae4e228f
MT
49948- if (!pipe->readers) {
49949+ if (!atomic_read(&pipe->readers)) {
49950 send_sig(SIGPIPE, current, 0);
49951 ret = -EPIPE;
49952 break;
5e856224 49953@@ -1783,9 +1783,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
ae4e228f
MT
49954 ret = -ERESTARTSYS;
49955 break;
49956 }
49957- pipe->waiting_writers++;
49958+ atomic_inc(&pipe->waiting_writers);
49959 pipe_wait(pipe);
49960- pipe->waiting_writers--;
49961+ atomic_dec(&pipe->waiting_writers);
49962 }
58c5fc13 49963
ae4e228f 49964 pipe_unlock(pipe);
5e856224 49965@@ -1821,14 +1821,14 @@ retry:
ae4e228f
MT
49966 pipe_double_lock(ipipe, opipe);
49967
49968 do {
49969- if (!opipe->readers) {
49970+ if (!atomic_read(&opipe->readers)) {
49971 send_sig(SIGPIPE, current, 0);
49972 if (!ret)
49973 ret = -EPIPE;
49974 break;
49975 }
49976
49977- if (!ipipe->nrbufs && !ipipe->writers)
49978+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
49979 break;
49980
49981 /*
5e856224 49982@@ -1925,7 +1925,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
ae4e228f
MT
49983 pipe_double_lock(ipipe, opipe);
49984
49985 do {
49986- if (!opipe->readers) {
49987+ if (!atomic_read(&opipe->readers)) {
49988 send_sig(SIGPIPE, current, 0);
49989 if (!ret)
49990 ret = -EPIPE;
5e856224 49991@@ -1970,7 +1970,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
ae4e228f
MT
49992 * return EAGAIN if we have the potential of some data in the
49993 * future, otherwise just return 0
49994 */
49995- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
49996+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
49997 ret = -EAGAIN;
49998
49999 pipe_unlock(ipipe);
4c928ab7
MT
50000diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
50001index a475983..9c6a1f0 100644
50002--- a/fs/sysfs/bin.c
50003+++ b/fs/sysfs/bin.c
50004@@ -67,6 +67,8 @@ fill_read(struct file *file, char *buffer, loff_t off, size_t count)
50005 }
50006
50007 static ssize_t
50008+read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off) __size_overflow(3);
50009+static ssize_t
50010 read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off)
50011 {
50012 struct bin_buffer *bb = file->private_data;
50013diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
50014index 7fdf6a7..e6cd8ad 100644
50015--- a/fs/sysfs/dir.c
50016+++ b/fs/sysfs/dir.c
50017@@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
50018 struct sysfs_dirent *sd;
50019 int rc;
50020
50021+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
50022+ const char *parent_name = parent_sd->s_name;
50023+
50024+ mode = S_IFDIR | S_IRWXU;
50025+
50026+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
50027+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
50028+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
50029+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
50030+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
50031+#endif
50032+
50033 /* allocate */
50034 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
50035 if (!sd)
fe2de317 50036diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
5e856224 50037index 00012e3..8392349 100644
fe2de317
MT
50038--- a/fs/sysfs/file.c
50039+++ b/fs/sysfs/file.c
50040@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
8308f9c9
MT
50041
50042 struct sysfs_open_dirent {
50043 atomic_t refcnt;
50044- atomic_t event;
50045+ atomic_unchecked_t event;
50046 wait_queue_head_t poll;
50047 struct list_head buffers; /* goes through sysfs_buffer.list */
50048 };
fe2de317 50049@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
8308f9c9
MT
50050 if (!sysfs_get_active(attr_sd))
50051 return -ENODEV;
50052
50053- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
50054+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
50055 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
50056
50057 sysfs_put_active(attr_sd);
fe2de317 50058@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
8308f9c9
MT
50059 return -ENOMEM;
50060
50061 atomic_set(&new_od->refcnt, 0);
50062- atomic_set(&new_od->event, 1);
50063+ atomic_set_unchecked(&new_od->event, 1);
50064 init_waitqueue_head(&new_od->poll);
50065 INIT_LIST_HEAD(&new_od->buffers);
50066 goto retry;
fe2de317 50067@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
8308f9c9
MT
50068
50069 sysfs_put_active(attr_sd);
50070
50071- if (buffer->event != atomic_read(&od->event))
50072+ if (buffer->event != atomic_read_unchecked(&od->event))
50073 goto trigger;
50074
50075 return DEFAULT_POLLMASK;
fe2de317 50076@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
8308f9c9
MT
50077
50078 od = sd->s_attr.open;
50079 if (od) {
50080- atomic_inc(&od->event);
50081+ atomic_inc_unchecked(&od->event);
50082 wake_up_interruptible(&od->poll);
50083 }
50084
fe2de317
MT
50085diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
50086index a7ac78f..02158e1 100644
50087--- a/fs/sysfs/symlink.c
50088+++ b/fs/sysfs/symlink.c
50089@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
58c5fc13
MT
50090
50091 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
50092 {
50093- char *page = nd_get_link(nd);
50094+ const char *page = nd_get_link(nd);
50095 if (!IS_ERR(page))
50096 free_page((unsigned long)page);
50097 }
4c928ab7 50098diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
5e856224 50099index f922cba..062fb02 100644
4c928ab7
MT
50100--- a/fs/ubifs/debug.c
50101+++ b/fs/ubifs/debug.c
5e856224 50102@@ -2819,6 +2819,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *u, size_t count,
4c928ab7
MT
50103 * debugfs file. Returns %0 or %1 in case of success and a negative error code
50104 * in case of failure.
50105 */
50106+static int interpret_user_input(const char __user *u, size_t count) __size_overflow(2);
50107 static int interpret_user_input(const char __user *u, size_t count)
50108 {
50109 size_t buf_size;
5e856224 50110@@ -2837,6 +2838,8 @@ static int interpret_user_input(const char __user *u, size_t count)
4c928ab7
MT
50111 }
50112
50113 static ssize_t dfs_file_write(struct file *file, const char __user *u,
50114+ size_t count, loff_t *ppos) __size_overflow(3);
50115+static ssize_t dfs_file_write(struct file *file, const char __user *u,
50116 size_t count, loff_t *ppos)
50117 {
50118 struct ubifs_info *c = file->private_data;
fe2de317 50119diff --git a/fs/udf/misc.c b/fs/udf/misc.c
4c928ab7 50120index c175b4d..8f36a16 100644
fe2de317
MT
50121--- a/fs/udf/misc.c
50122+++ b/fs/udf/misc.c
4c928ab7 50123@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
bc901d79
MT
50124
50125 u8 udf_tag_checksum(const struct tag *t)
50126 {
50127- u8 *data = (u8 *)t;
50128+ const u8 *data = (const u8 *)t;
50129 u8 checksum = 0;
50130 int i;
50131 for (i = 0; i < sizeof(struct tag); ++i)
fe2de317
MT
50132diff --git a/fs/utimes.c b/fs/utimes.c
50133index ba653f3..06ea4b1 100644
50134--- a/fs/utimes.c
50135+++ b/fs/utimes.c
58c5fc13
MT
50136@@ -1,6 +1,7 @@
50137 #include <linux/compiler.h>
50138 #include <linux/file.h>
50139 #include <linux/fs.h>
50140+#include <linux/security.h>
50141 #include <linux/linkage.h>
50142 #include <linux/mount.h>
50143 #include <linux/namei.h>
fe2de317 50144@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
58c5fc13
MT
50145 goto mnt_drop_write_and_out;
50146 }
50147 }
50148+
50149+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
50150+ error = -EACCES;
50151+ goto mnt_drop_write_and_out;
50152+ }
50153+
50154 mutex_lock(&inode->i_mutex);
50155 error = notify_change(path->dentry, &newattrs);
50156 mutex_unlock(&inode->i_mutex);
fe2de317 50157diff --git a/fs/xattr.c b/fs/xattr.c
5e856224 50158index 82f4337..236473c 100644
fe2de317
MT
50159--- a/fs/xattr.c
50160+++ b/fs/xattr.c
4c928ab7 50161@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
bc901d79
MT
50162 * Extended attribute SET operations
50163 */
50164 static long
50165-setxattr(struct dentry *d, const char __user *name, const void __user *value,
50166+setxattr(struct path *path, const char __user *name, const void __user *value,
50167 size_t size, int flags)
50168 {
50169 int error;
4c928ab7 50170@@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
bc901d79
MT
50171 return PTR_ERR(kvalue);
50172 }
50173
50174- error = vfs_setxattr(d, kname, kvalue, size, flags);
50175+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
50176+ error = -EACCES;
50177+ goto out;
50178+ }
50179+
50180+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
50181+out:
50182 kfree(kvalue);
50183 return error;
50184 }
4c928ab7 50185@@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
bc901d79
MT
50186 return error;
50187 error = mnt_want_write(path.mnt);
50188 if (!error) {
50189- error = setxattr(path.dentry, name, value, size, flags);
50190+ error = setxattr(&path, name, value, size, flags);
50191 mnt_drop_write(path.mnt);
50192 }
50193 path_put(&path);
4c928ab7 50194@@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
bc901d79
MT
50195 return error;
50196 error = mnt_want_write(path.mnt);
50197 if (!error) {
50198- error = setxattr(path.dentry, name, value, size, flags);
50199+ error = setxattr(&path, name, value, size, flags);
50200 mnt_drop_write(path.mnt);
50201 }
50202 path_put(&path);
4c928ab7 50203@@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
bc901d79
MT
50204 const void __user *,value, size_t, size, int, flags)
50205 {
50206 struct file *f;
50207- struct dentry *dentry;
50208 int error = -EBADF;
50209
50210 f = fget(fd);
50211 if (!f)
50212 return error;
50213- dentry = f->f_path.dentry;
50214- audit_inode(NULL, dentry);
50215+ audit_inode(NULL, f->f_path.dentry);
50216 error = mnt_want_write_file(f);
50217 if (!error) {
50218- error = setxattr(dentry, name, value, size, flags);
50219+ error = setxattr(&f->f_path, name, value, size, flags);
5e856224 50220 mnt_drop_write_file(f);
bc901d79
MT
50221 }
50222 fput(f);
fe2de317
MT
50223diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
50224index 8d5a506..7f62712 100644
50225--- a/fs/xattr_acl.c
50226+++ b/fs/xattr_acl.c
50227@@ -17,8 +17,8 @@
50228 struct posix_acl *
50229 posix_acl_from_xattr(const void *value, size_t size)
50230 {
50231- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
50232- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
50233+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
50234+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
50235 int count;
50236 struct posix_acl *acl;
50237 struct posix_acl_entry *acl_e;
50238diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
5e856224 50239index 188ef2f..adcf864 100644
fe2de317
MT
50240--- a/fs/xfs/xfs_bmap.c
50241+++ b/fs/xfs/xfs_bmap.c
4c928ab7 50242@@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
58c5fc13
MT
50243 int nmap,
50244 int ret_nmap);
50245 #else
50246-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
50247+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
50248 #endif /* DEBUG */
50249
ae4e228f 50250 STATIC int
fe2de317
MT
50251diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
50252index 79d05e8..e3e5861 100644
50253--- a/fs/xfs/xfs_dir2_sf.c
50254+++ b/fs/xfs/xfs_dir2_sf.c
6e9df6a3 50255@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
71d190be
MT
50256 }
50257
6e9df6a3 50258 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
71d190be
MT
50259- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
50260+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
50261+ char name[sfep->namelen];
50262+ memcpy(name, sfep->name, sfep->namelen);
50263+ if (filldir(dirent, name, sfep->namelen,
50264+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
50265+ *offset = off & 0x7fffffff;
50266+ return 0;
50267+ }
50268+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
50269 off & 0x7fffffff, ino, DT_UNKNOWN)) {
50270 *offset = off & 0x7fffffff;
50271 return 0;
fe2de317 50272diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
5e856224 50273index 76f3ca5..f57f712 100644
fe2de317
MT
50274--- a/fs/xfs/xfs_ioctl.c
50275+++ b/fs/xfs/xfs_ioctl.c
6e9df6a3
MT
50276@@ -128,7 +128,7 @@ xfs_find_handle(
50277 }
50278
50279 error = -EFAULT;
50280- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
50281+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
50282 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
50283 goto out_put;
50284
fe2de317 50285diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
5e856224 50286index ab30253..4d86958 100644
fe2de317
MT
50287--- a/fs/xfs/xfs_iops.c
50288+++ b/fs/xfs/xfs_iops.c
4c928ab7 50289@@ -447,7 +447,7 @@ xfs_vn_put_link(
6e9df6a3
MT
50290 struct nameidata *nd,
50291 void *p)
50292 {
50293- char *s = nd_get_link(nd);
50294+ const char *s = nd_get_link(nd);
50295
50296 if (!IS_ERR(s))
50297 kfree(s);
fe2de317
MT
50298diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
50299new file mode 100644
5e856224 50300index 0000000..2645296
fe2de317
MT
50301--- /dev/null
50302+++ b/grsecurity/Kconfig
5e856224 50303@@ -0,0 +1,1079 @@
fe2de317
MT
50304+#
50305+# grecurity configuration
50306+#
883a9837 50307+
fe2de317 50308+menu "Grsecurity"
58c5fc13 50309+
fe2de317
MT
50310+config GRKERNSEC
50311+ bool "Grsecurity"
50312+ select CRYPTO
50313+ select CRYPTO_SHA256
50314+ help
50315+ If you say Y here, you will be able to configure many features
50316+ that will enhance the security of your system. It is highly
50317+ recommended that you say Y here and read through the help
50318+ for each option so that you fully understand the features and
50319+ can evaluate their usefulness for your machine.
58c5fc13 50320+
fe2de317
MT
50321+choice
50322+ prompt "Security Level"
50323+ depends on GRKERNSEC
50324+ default GRKERNSEC_CUSTOM
58c5fc13 50325+
fe2de317
MT
50326+config GRKERNSEC_LOW
50327+ bool "Low"
50328+ select GRKERNSEC_LINK
50329+ select GRKERNSEC_FIFO
50330+ select GRKERNSEC_RANDNET
50331+ select GRKERNSEC_DMESG
50332+ select GRKERNSEC_CHROOT
50333+ select GRKERNSEC_CHROOT_CHDIR
58c5fc13 50334+
fe2de317
MT
50335+ help
50336+ If you choose this option, several of the grsecurity options will
50337+ be enabled that will give you greater protection against a number
50338+ of attacks, while assuring that none of your software will have any
50339+ conflicts with the additional security measures. If you run a lot
50340+ of unusual software, or you are having problems with the higher
50341+ security levels, you should say Y here. With this option, the
50342+ following features are enabled:
58c5fc13 50343+
fe2de317
MT
50344+ - Linking restrictions
50345+ - FIFO restrictions
50346+ - Restricted dmesg
50347+ - Enforced chdir("/") on chroot
50348+ - Runtime module disabling
58c5fc13 50349+
fe2de317
MT
50350+config GRKERNSEC_MEDIUM
50351+ bool "Medium"
50352+ select PAX
50353+ select PAX_EI_PAX
50354+ select PAX_PT_PAX_FLAGS
50355+ select PAX_HAVE_ACL_FLAGS
50356+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
50357+ select GRKERNSEC_CHROOT
50358+ select GRKERNSEC_CHROOT_SYSCTL
50359+ select GRKERNSEC_LINK
50360+ select GRKERNSEC_FIFO
50361+ select GRKERNSEC_DMESG
50362+ select GRKERNSEC_RANDNET
50363+ select GRKERNSEC_FORKFAIL
50364+ select GRKERNSEC_TIME
50365+ select GRKERNSEC_SIGNAL
50366+ select GRKERNSEC_CHROOT
50367+ select GRKERNSEC_CHROOT_UNIX
50368+ select GRKERNSEC_CHROOT_MOUNT
50369+ select GRKERNSEC_CHROOT_PIVOT
50370+ select GRKERNSEC_CHROOT_DOUBLE
50371+ select GRKERNSEC_CHROOT_CHDIR
50372+ select GRKERNSEC_CHROOT_MKNOD
50373+ select GRKERNSEC_PROC
50374+ select GRKERNSEC_PROC_USERGROUP
50375+ select PAX_RANDUSTACK
50376+ select PAX_ASLR
50377+ select PAX_RANDMMAP
50378+ select PAX_REFCOUNT if (X86 || SPARC64)
50379+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
58c5fc13 50380+
fe2de317
MT
50381+ help
50382+ If you say Y here, several features in addition to those included
50383+ in the low additional security level will be enabled. These
50384+ features provide even more security to your system, though in rare
50385+ cases they may be incompatible with very old or poorly written
50386+ software. If you enable this option, make sure that your auth
50387+ service (identd) is running as gid 1001. With this option,
50388+ the following features (in addition to those provided in the
50389+ low additional security level) will be enabled:
58c5fc13 50390+
fe2de317
MT
50391+ - Failed fork logging
50392+ - Time change logging
50393+ - Signal logging
50394+ - Deny mounts in chroot
50395+ - Deny double chrooting
50396+ - Deny sysctl writes in chroot
50397+ - Deny mknod in chroot
50398+ - Deny access to abstract AF_UNIX sockets out of chroot
50399+ - Deny pivot_root in chroot
50400+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
50401+ - /proc restrictions with special GID set to 10 (usually wheel)
50402+ - Address Space Layout Randomization (ASLR)
50403+ - Prevent exploitation of most refcount overflows
50404+ - Bounds checking of copying between the kernel and userland
58c5fc13 50405+
fe2de317
MT
50406+config GRKERNSEC_HIGH
50407+ bool "High"
50408+ select GRKERNSEC_LINK
50409+ select GRKERNSEC_FIFO
50410+ select GRKERNSEC_DMESG
50411+ select GRKERNSEC_FORKFAIL
50412+ select GRKERNSEC_TIME
50413+ select GRKERNSEC_SIGNAL
50414+ select GRKERNSEC_CHROOT
50415+ select GRKERNSEC_CHROOT_SHMAT
50416+ select GRKERNSEC_CHROOT_UNIX
50417+ select GRKERNSEC_CHROOT_MOUNT
50418+ select GRKERNSEC_CHROOT_FCHDIR
50419+ select GRKERNSEC_CHROOT_PIVOT
50420+ select GRKERNSEC_CHROOT_DOUBLE
50421+ select GRKERNSEC_CHROOT_CHDIR
50422+ select GRKERNSEC_CHROOT_MKNOD
50423+ select GRKERNSEC_CHROOT_CAPS
50424+ select GRKERNSEC_CHROOT_SYSCTL
50425+ select GRKERNSEC_CHROOT_FINDTASK
50426+ select GRKERNSEC_SYSFS_RESTRICT
50427+ select GRKERNSEC_PROC
50428+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
50429+ select GRKERNSEC_HIDESYM
50430+ select GRKERNSEC_BRUTE
50431+ select GRKERNSEC_PROC_USERGROUP
50432+ select GRKERNSEC_KMEM
50433+ select GRKERNSEC_RESLOG
50434+ select GRKERNSEC_RANDNET
50435+ select GRKERNSEC_PROC_ADD
50436+ select GRKERNSEC_CHROOT_CHMOD
50437+ select GRKERNSEC_CHROOT_NICE
5e856224 50438+ select GRKERNSEC_SETXID if (X86 || SPARC64 || PPC || ARM || MIPS)
fe2de317
MT
50439+ select GRKERNSEC_AUDIT_MOUNT
50440+ select GRKERNSEC_MODHARDEN if (MODULES)
50441+ select GRKERNSEC_HARDEN_PTRACE
4c928ab7 50442+ select GRKERNSEC_PTRACE_READEXEC
fe2de317
MT
50443+ select GRKERNSEC_VM86 if (X86_32)
50444+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
50445+ select PAX
50446+ select PAX_RANDUSTACK
50447+ select PAX_ASLR
50448+ select PAX_RANDMMAP
50449+ select PAX_NOEXEC
50450+ select PAX_MPROTECT
50451+ select PAX_EI_PAX
50452+ select PAX_PT_PAX_FLAGS
50453+ select PAX_HAVE_ACL_FLAGS
50454+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
50455+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
50456+ select PAX_RANDKSTACK if (X86_TSC && X86)
50457+ select PAX_SEGMEXEC if (X86_32)
50458+ select PAX_PAGEEXEC
50459+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
50460+ select PAX_EMUTRAMP if (PARISC)
50461+ select PAX_EMUSIGRT if (PARISC)
50462+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
50463+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
50464+ select PAX_REFCOUNT if (X86 || SPARC64)
50465+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
50466+ help
50467+ If you say Y here, many of the features of grsecurity will be
50468+ enabled, which will protect you against many kinds of attacks
50469+ against your system. The heightened security comes at a cost
50470+ of an increased chance of incompatibilities with rare software
50471+ on your machine. Since this security level enables PaX, you should
50472+ view <http://pax.grsecurity.net> and read about the PaX
50473+ project. While you are there, download chpax and run it on
50474+ binaries that cause problems with PaX. Also remember that
50475+ since the /proc restrictions are enabled, you must run your
50476+ identd as gid 1001. This security level enables the following
50477+ features in addition to those listed in the low and medium
50478+ security levels:
58c5fc13 50479+
fe2de317
MT
50480+ - Additional /proc restrictions
50481+ - Chmod restrictions in chroot
50482+ - No signals, ptrace, or viewing of processes outside of chroot
50483+ - Capability restrictions in chroot
50484+ - Deny fchdir out of chroot
50485+ - Priority restrictions in chroot
50486+ - Segmentation-based implementation of PaX
50487+ - Mprotect restrictions
50488+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
50489+ - Kernel stack randomization
50490+ - Mount/unmount/remount logging
50491+ - Kernel symbol hiding
50492+ - Hardening of module auto-loading
50493+ - Ptrace restrictions
50494+ - Restricted vm86 mode
50495+ - Restricted sysfs/debugfs
50496+ - Active kernel exploit response
58c5fc13 50497+
fe2de317
MT
50498+config GRKERNSEC_CUSTOM
50499+ bool "Custom"
50500+ help
50501+ If you say Y here, you will be able to configure every grsecurity
50502+ option, which allows you to enable many more features that aren't
50503+ covered in the basic security levels. These additional features
50504+ include TPE, socket restrictions, and the sysctl system for
50505+ grsecurity. It is advised that you read through the help for
50506+ each option to determine its usefulness in your situation.
58c5fc13 50507+
fe2de317 50508+endchoice
58c5fc13 50509+
4c928ab7 50510+menu "Memory Protections"
fe2de317 50511+depends on GRKERNSEC
58c5fc13 50512+
fe2de317
MT
50513+config GRKERNSEC_KMEM
50514+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
50515+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
50516+ help
50517+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
50518+ be written to or read from to modify or leak the contents of the running
50519+ kernel. /dev/port will also not be allowed to be opened. If you have module
50520+ support disabled, enabling this will close up four ways that are
50521+ currently used to insert malicious code into the running kernel.
50522+ Even with all these features enabled, we still highly recommend that
50523+ you use the RBAC system, as it is still possible for an attacker to
50524+ modify the running kernel through privileged I/O granted by ioperm/iopl.
50525+ If you are not using XFree86, you may be able to stop this additional
50526+ case by enabling the 'Disable privileged I/O' option. Though nothing
50527+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
50528+ but only to video memory, which is the only writing we allow in this
50529+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
50530+ not be allowed to mprotect it with PROT_WRITE later.
50531+ It is highly recommended that you say Y here if you meet all the
50532+ conditions above.
58c5fc13 50533+
fe2de317
MT
50534+config GRKERNSEC_VM86
50535+ bool "Restrict VM86 mode"
50536+ depends on X86_32
58c5fc13 50537+
fe2de317
MT
50538+ help
50539+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
50540+ make use of a special execution mode on 32bit x86 processors called
50541+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
50542+ video cards and will still work with this option enabled. The purpose
50543+ of the option is to prevent exploitation of emulation errors in
50544+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
50545+ Nearly all users should be able to enable this option.
58c5fc13 50546+
fe2de317
MT
50547+config GRKERNSEC_IO
50548+ bool "Disable privileged I/O"
50549+ depends on X86
50550+ select RTC_CLASS
50551+ select RTC_INTF_DEV
50552+ select RTC_DRV_CMOS
58c5fc13 50553+
fe2de317
MT
50554+ help
50555+ If you say Y here, all ioperm and iopl calls will return an error.
50556+ Ioperm and iopl can be used to modify the running kernel.
50557+ Unfortunately, some programs need this access to operate properly,
50558+ the most notable of which are XFree86 and hwclock. hwclock can be
50559+ remedied by having RTC support in the kernel, so real-time
50560+ clock support is enabled if this option is enabled, to ensure
50561+ that hwclock operates correctly. XFree86 still will not
50562+ operate correctly with this option enabled, so DO NOT CHOOSE Y
50563+ IF YOU USE XFree86. If you use XFree86 and you still want to
50564+ protect your kernel against modification, use the RBAC system.
58c5fc13 50565+
fe2de317 50566+config GRKERNSEC_PROC_MEMMAP
4c928ab7 50567+ bool "Harden ASLR against information leaks and entropy reduction"
fe2de317
MT
50568+ default y if (PAX_NOEXEC || PAX_ASLR)
50569+ depends on PAX_NOEXEC || PAX_ASLR
50570+ help
50571+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
50572+ give no information about the addresses of its mappings if
50573+ PaX features that rely on random addresses are enabled on the task.
4c928ab7
MT
50574+ In addition to sanitizing this information and disabling other
50575+ dangerous sources of information, this option causes reads of sensitive
50576+ /proc/<pid> entries where the file descriptor was opened in a different
50577+ task than the one performing the read. Such attempts are logged.
50578+ This option also limits argv/env strings for suid/sgid binaries
50579+ to 512KB to prevent a complete exhaustion of the stack entropy provided
50580+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
50581+ binaries to prevent alternative mmap layouts from being abused.
50582+
50583+ If you use PaX it is essential that you say Y here as it closes up
50584+ several holes that make full ASLR useless locally.
58c5fc13 50585+
fe2de317
MT
50586+config GRKERNSEC_BRUTE
50587+ bool "Deter exploit bruteforcing"
50588+ help
50589+ If you say Y here, attempts to bruteforce exploits against forking
50590+ daemons such as apache or sshd, as well as against suid/sgid binaries
50591+ will be deterred. When a child of a forking daemon is killed by PaX
50592+ or crashes due to an illegal instruction or other suspicious signal,
50593+ the parent process will be delayed 30 seconds upon every subsequent
50594+ fork until the administrator is able to assess the situation and
50595+ restart the daemon.
50596+ In the suid/sgid case, the attempt is logged, the user has all their
50597+ processes terminated, and they are prevented from executing any further
50598+ processes for 15 minutes.
50599+ It is recommended that you also enable signal logging in the auditing
50600+ section so that logs are generated when a process triggers a suspicious
50601+ signal.
50602+ If the sysctl option is enabled, a sysctl option with name
50603+ "deter_bruteforce" is created.
58c5fc13 50604+
58c5fc13 50605+
fe2de317
MT
50606+config GRKERNSEC_MODHARDEN
50607+ bool "Harden module auto-loading"
50608+ depends on MODULES
50609+ help
50610+ If you say Y here, module auto-loading in response to use of some
50611+ feature implemented by an unloaded module will be restricted to
50612+ root users. Enabling this option helps defend against attacks
50613+ by unprivileged users who abuse the auto-loading behavior to
50614+ cause a vulnerable module to load that is then exploited.
58c5fc13 50615+
fe2de317
MT
50616+ If this option prevents a legitimate use of auto-loading for a
50617+ non-root user, the administrator can execute modprobe manually
50618+ with the exact name of the module mentioned in the alert log.
50619+ Alternatively, the administrator can add the module to the list
50620+ of modules loaded at boot by modifying init scripts.
58c5fc13 50621+
fe2de317
MT
50622+ Modification of init scripts will most likely be needed on
50623+ Ubuntu servers with encrypted home directory support enabled,
50624+ as the first non-root user logging in will cause the ecb(aes),
50625+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
58c5fc13 50626+
fe2de317
MT
50627+config GRKERNSEC_HIDESYM
50628+ bool "Hide kernel symbols"
50629+ help
50630+ If you say Y here, getting information on loaded modules, and
50631+ displaying all kernel symbols through a syscall will be restricted
50632+ to users with CAP_SYS_MODULE. For software compatibility reasons,
50633+ /proc/kallsyms will be restricted to the root user. The RBAC
50634+ system can hide that entry even from root.
58c5fc13 50635+
fe2de317
MT
50636+ This option also prevents leaking of kernel addresses through
50637+ several /proc entries.
58c5fc13 50638+
fe2de317
MT
50639+ Note that this option is only effective provided the following
50640+ conditions are met:
50641+ 1) The kernel using grsecurity is not precompiled by some distribution
50642+ 2) You have also enabled GRKERNSEC_DMESG
50643+ 3) You are using the RBAC system and hiding other files such as your
50644+ kernel image and System.map. Alternatively, enabling this option
50645+ causes the permissions on /boot, /lib/modules, and the kernel
50646+ source directory to change at compile time to prevent
50647+ reading by non-root users.
50648+ If the above conditions are met, this option will aid in providing a
50649+ useful protection against local kernel exploitation of overflows
50650+ and arbitrary read/write vulnerabilities.
58c5fc13 50651+
fe2de317
MT
50652+config GRKERNSEC_KERN_LOCKOUT
50653+ bool "Active kernel exploit response"
50654+ depends on X86 || ARM || PPC || SPARC
50655+ help
50656+ If you say Y here, when a PaX alert is triggered due to suspicious
50657+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
50658+ or an OOPs occurs due to bad memory accesses, instead of just
50659+ terminating the offending process (and potentially allowing
50660+ a subsequent exploit from the same user), we will take one of two
50661+ actions:
50662+ If the user was root, we will panic the system
50663+ If the user was non-root, we will log the attempt, terminate
50664+ all processes owned by the user, then prevent them from creating
50665+ any new processes until the system is restarted
50666+ This deters repeated kernel exploitation/bruteforcing attempts
50667+ and is useful for later forensics.
ae4e228f 50668+
fe2de317
MT
50669+endmenu
50670+menu "Role Based Access Control Options"
50671+depends on GRKERNSEC
58c5fc13 50672+
fe2de317
MT
50673+config GRKERNSEC_RBAC_DEBUG
50674+ bool
58c5fc13 50675+
fe2de317
MT
50676+config GRKERNSEC_NO_RBAC
50677+ bool "Disable RBAC system"
50678+ help
50679+ If you say Y here, the /dev/grsec device will be removed from the kernel,
50680+ preventing the RBAC system from being enabled. You should only say Y
50681+ here if you have no intention of using the RBAC system, so as to prevent
50682+ an attacker with root access from misusing the RBAC system to hide files
50683+ and processes when loadable module support and /dev/[k]mem have been
50684+ locked down.
58c5fc13 50685+
fe2de317
MT
50686+config GRKERNSEC_ACL_HIDEKERN
50687+ bool "Hide kernel processes"
50688+ help
50689+ If you say Y here, all kernel threads will be hidden to all
50690+ processes but those whose subject has the "view hidden processes"
50691+ flag.
58c5fc13 50692+
fe2de317
MT
50693+config GRKERNSEC_ACL_MAXTRIES
50694+ int "Maximum tries before password lockout"
50695+ default 3
50696+ help
50697+ This option enforces the maximum number of times a user can attempt
50698+ to authorize themselves with the grsecurity RBAC system before being
50699+ denied the ability to attempt authorization again for a specified time.
50700+ The lower the number, the harder it will be to brute-force a password.
58c5fc13 50701+
fe2de317
MT
50702+config GRKERNSEC_ACL_TIMEOUT
50703+ int "Time to wait after max password tries, in seconds"
50704+ default 30
50705+ help
50706+ This option specifies the time the user must wait after attempting to
50707+ authorize to the RBAC system with the maximum number of invalid
50708+ passwords. The higher the number, the harder it will be to brute-force
50709+ a password.
58c5fc13 50710+
fe2de317
MT
50711+endmenu
50712+menu "Filesystem Protections"
50713+depends on GRKERNSEC
58c5fc13 50714+
fe2de317
MT
50715+config GRKERNSEC_PROC
50716+ bool "Proc restrictions"
50717+ help
50718+ If you say Y here, the permissions of the /proc filesystem
50719+ will be altered to enhance system security and privacy. You MUST
50720+ choose either a user only restriction or a user and group restriction.
50721+ Depending upon the option you choose, you can either restrict users to
50722+ see only the processes they themselves run, or choose a group that can
50723+ view all processes and files normally restricted to root if you choose
4c928ab7
MT
50724+ the "restrict to user only" option. NOTE: If you're running identd or
50725+ ntpd as a non-root user, you will have to run it as the group you
50726+ specify here.
58c5fc13 50727+
fe2de317
MT
50728+config GRKERNSEC_PROC_USER
50729+ bool "Restrict /proc to user only"
50730+ depends on GRKERNSEC_PROC
50731+ help
50732+ If you say Y here, non-root users will only be able to view their own
50733+ processes, and restricts them from viewing network-related information,
50734+ and viewing kernel symbol and module information.
58c5fc13 50735+
fe2de317
MT
50736+config GRKERNSEC_PROC_USERGROUP
50737+ bool "Allow special group"
50738+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50739+ help
50740+ If you say Y here, you will be able to select a group that will be
50741+ able to view all processes and network-related information. If you've
50742+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50743+ remain hidden. This option is useful if you want to run identd as
50744+ a non-root user.
58c5fc13 50745+
fe2de317
MT
50746+config GRKERNSEC_PROC_GID
50747+ int "GID for special group"
50748+ depends on GRKERNSEC_PROC_USERGROUP
50749+ default 1001
df50ba0c 50750+
fe2de317
MT
50751+config GRKERNSEC_PROC_ADD
50752+ bool "Additional restrictions"
50753+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50754+ help
50755+ If you say Y here, additional restrictions will be placed on
50756+ /proc that keep normal users from viewing device information and
50757+ slabinfo information that could be useful for exploits.
58c5fc13 50758+
fe2de317
MT
50759+config GRKERNSEC_LINK
50760+ bool "Linking restrictions"
50761+ help
50762+ If you say Y here, /tmp race exploits will be prevented, since users
50763+ will no longer be able to follow symlinks owned by other users in
50764+ world-writable +t directories (e.g. /tmp), unless the owner of the
50765+ symlink is the owner of the directory. users will also not be
50766+ able to hardlink to files they do not own. If the sysctl option is
50767+ enabled, a sysctl option with name "linking_restrictions" is created.
15a11c5b 50768+
fe2de317
MT
50769+config GRKERNSEC_FIFO
50770+ bool "FIFO restrictions"
50771+ help
50772+ If you say Y here, users will not be able to write to FIFOs they don't
50773+ own in world-writable +t directories (e.g. /tmp), unless the owner of
50774+ the FIFO is the same owner of the directory it's held in. If the sysctl
50775+ option is enabled, a sysctl option with name "fifo_restrictions" is
50776+ created.
58c5fc13 50777+
fe2de317
MT
50778+config GRKERNSEC_SYSFS_RESTRICT
50779+ bool "Sysfs/debugfs restriction"
50780+ depends on SYSFS
50781+ help
50782+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
4c928ab7
MT
50783+ any filesystem normally mounted under it (e.g. debugfs) will be
50784+ mostly accessible only by root. These filesystems generally provide access
fe2de317
MT
50785+ to hardware and debug information that isn't appropriate for unprivileged
50786+ users of the system. Sysfs and debugfs have also become a large source
50787+ of new vulnerabilities, ranging from infoleaks to local compromise.
50788+ There has been very little oversight with an eye toward security involved
50789+ in adding new exporters of information to these filesystems, so their
50790+ use is discouraged.
4c928ab7
MT
50791+ For reasons of compatibility, a few directories have been whitelisted
50792+ for access by non-root users:
50793+ /sys/fs/selinux
50794+ /sys/fs/fuse
50795+ /sys/devices/system/cpu
df50ba0c 50796+
fe2de317
MT
50797+config GRKERNSEC_ROFS
50798+ bool "Runtime read-only mount protection"
50799+ help
50800+ If you say Y here, a sysctl option with name "romount_protect" will
50801+ be created. By setting this option to 1 at runtime, filesystems
50802+ will be protected in the following ways:
50803+ * No new writable mounts will be allowed
50804+ * Existing read-only mounts won't be able to be remounted read/write
50805+ * Write operations will be denied on all block devices
50806+ This option acts independently of grsec_lock: once it is set to 1,
50807+ it cannot be turned off. Therefore, please be mindful of the resulting
50808+ behavior if this option is enabled in an init script on a read-only
50809+ filesystem. This feature is mainly intended for secure embedded systems.
58c5fc13 50810+
fe2de317
MT
50811+config GRKERNSEC_CHROOT
50812+ bool "Chroot jail restrictions"
50813+ help
50814+ If you say Y here, you will be able to choose several options that will
50815+ make breaking out of a chrooted jail much more difficult. If you
50816+ encounter no software incompatibilities with the following options, it
50817+ is recommended that you enable each one.
58c5fc13 50818+
fe2de317
MT
50819+config GRKERNSEC_CHROOT_MOUNT
50820+ bool "Deny mounts"
50821+ depends on GRKERNSEC_CHROOT
50822+ help
50823+ If you say Y here, processes inside a chroot will not be able to
50824+ mount or remount filesystems. If the sysctl option is enabled, a
50825+ sysctl option with name "chroot_deny_mount" is created.
58c5fc13 50826+
fe2de317
MT
50827+config GRKERNSEC_CHROOT_DOUBLE
50828+ bool "Deny double-chroots"
50829+ depends on GRKERNSEC_CHROOT
50830+ help
50831+ If you say Y here, processes inside a chroot will not be able to chroot
50832+ again outside the chroot. This is a widely used method of breaking
50833+ out of a chroot jail and should not be allowed. If the sysctl
50834+ option is enabled, a sysctl option with name
50835+ "chroot_deny_chroot" is created.
16454cff 50836+
fe2de317
MT
50837+config GRKERNSEC_CHROOT_PIVOT
50838+ bool "Deny pivot_root in chroot"
50839+ depends on GRKERNSEC_CHROOT
50840+ help
50841+ If you say Y here, processes inside a chroot will not be able to use
50842+ a function called pivot_root() that was introduced in Linux 2.3.41. It
50843+ works similar to chroot in that it changes the root filesystem. This
50844+ function could be misused in a chrooted process to attempt to break out
50845+ of the chroot, and therefore should not be allowed. If the sysctl
50846+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
50847+ created.
16454cff 50848+
fe2de317
MT
50849+config GRKERNSEC_CHROOT_CHDIR
50850+ bool "Enforce chdir(\"/\") on all chroots"
50851+ depends on GRKERNSEC_CHROOT
50852+ help
50853+ If you say Y here, the current working directory of all newly-chrooted
50854+ applications will be set to the the root directory of the chroot.
50855+ The man page on chroot(2) states:
50856+ Note that this call does not change the current working
50857+ directory, so that `.' can be outside the tree rooted at
50858+ `/'. In particular, the super-user can escape from a
50859+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
16454cff 50860+
fe2de317
MT
50861+ It is recommended that you say Y here, since it's not known to break
50862+ any software. If the sysctl option is enabled, a sysctl option with
50863+ name "chroot_enforce_chdir" is created.
58c5fc13 50864+
fe2de317
MT
50865+config GRKERNSEC_CHROOT_CHMOD
50866+ bool "Deny (f)chmod +s"
50867+ depends on GRKERNSEC_CHROOT
50868+ help
50869+ If you say Y here, processes inside a chroot will not be able to chmod
50870+ or fchmod files to make them have suid or sgid bits. This protects
50871+ against another published method of breaking a chroot. If the sysctl
50872+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
50873+ created.
58c5fc13 50874+
fe2de317
MT
50875+config GRKERNSEC_CHROOT_FCHDIR
50876+ bool "Deny fchdir out of chroot"
50877+ depends on GRKERNSEC_CHROOT
50878+ help
50879+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
50880+ to a file descriptor of the chrooting process that points to a directory
50881+ outside the filesystem will be stopped. If the sysctl option
50882+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
6892158b 50883+
fe2de317
MT
50884+config GRKERNSEC_CHROOT_MKNOD
50885+ bool "Deny mknod"
50886+ depends on GRKERNSEC_CHROOT
50887+ help
50888+ If you say Y here, processes inside a chroot will not be allowed to
50889+ mknod. The problem with using mknod inside a chroot is that it
50890+ would allow an attacker to create a device entry that is the same
50891+ as one on the physical root of your system, which could range from
50892+ anything from the console device to a device for your harddrive (which
50893+ they could then use to wipe the drive or steal data). It is recommended
50894+ that you say Y here, unless you run into software incompatibilities.
50895+ If the sysctl option is enabled, a sysctl option with name
50896+ "chroot_deny_mknod" is created.
58c5fc13 50897+
fe2de317
MT
50898+config GRKERNSEC_CHROOT_SHMAT
50899+ bool "Deny shmat() out of chroot"
50900+ depends on GRKERNSEC_CHROOT
50901+ help
50902+ If you say Y here, processes inside a chroot will not be able to attach
50903+ to shared memory segments that were created outside of the chroot jail.
50904+ It is recommended that you say Y here. If the sysctl option is enabled,
50905+ a sysctl option with name "chroot_deny_shmat" is created.
58c5fc13 50906+
fe2de317
MT
50907+config GRKERNSEC_CHROOT_UNIX
50908+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
50909+ depends on GRKERNSEC_CHROOT
50910+ help
50911+ If you say Y here, processes inside a chroot will not be able to
50912+ connect to abstract (meaning not belonging to a filesystem) Unix
50913+ domain sockets that were bound outside of a chroot. It is recommended
50914+ that you say Y here. If the sysctl option is enabled, a sysctl option
50915+ with name "chroot_deny_unix" is created.
58c5fc13 50916+
fe2de317
MT
50917+config GRKERNSEC_CHROOT_FINDTASK
50918+ bool "Protect outside processes"
50919+ depends on GRKERNSEC_CHROOT
50920+ help
50921+ If you say Y here, processes inside a chroot will not be able to
50922+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50923+ getsid, or view any process outside of the chroot. If the sysctl
50924+ option is enabled, a sysctl option with name "chroot_findtask" is
50925+ created.
58c5fc13 50926+
fe2de317
MT
50927+config GRKERNSEC_CHROOT_NICE
50928+ bool "Restrict priority changes"
50929+ depends on GRKERNSEC_CHROOT
50930+ help
50931+ If you say Y here, processes inside a chroot will not be able to raise
50932+ the priority of processes in the chroot, or alter the priority of
50933+ processes outside the chroot. This provides more security than simply
50934+ removing CAP_SYS_NICE from the process' capability set. If the
50935+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50936+ is created.
bc901d79 50937+
fe2de317
MT
50938+config GRKERNSEC_CHROOT_SYSCTL
50939+ bool "Deny sysctl writes"
50940+ depends on GRKERNSEC_CHROOT
50941+ help
50942+ If you say Y here, an attacker in a chroot will not be able to
50943+ write to sysctl entries, either by sysctl(2) or through a /proc
50944+ interface. It is strongly recommended that you say Y here. If the
50945+ sysctl option is enabled, a sysctl option with name
50946+ "chroot_deny_sysctl" is created.
bc901d79 50947+
fe2de317
MT
50948+config GRKERNSEC_CHROOT_CAPS
50949+ bool "Capability restrictions"
50950+ depends on GRKERNSEC_CHROOT
50951+ help
50952+ If you say Y here, the capabilities on all processes within a
50953+ chroot jail will be lowered to stop module insertion, raw i/o,
50954+ system and net admin tasks, rebooting the system, modifying immutable
50955+ files, modifying IPC owned by another, and changing the system time.
50956+ This is left an option because it can break some apps. Disable this
50957+ if your chrooted apps are having problems performing those kinds of
50958+ tasks. If the sysctl option is enabled, a sysctl option with
50959+ name "chroot_caps" is created.
bc901d79 50960+
fe2de317
MT
50961+endmenu
50962+menu "Kernel Auditing"
50963+depends on GRKERNSEC
bc901d79 50964+
fe2de317
MT
50965+config GRKERNSEC_AUDIT_GROUP
50966+ bool "Single group for auditing"
50967+ help
50968+ If you say Y here, the exec, chdir, and (un)mount logging features
50969+ will only operate on a group you specify. This option is recommended
50970+ if you only want to watch certain users instead of having a large
50971+ amount of logs from the entire system. If the sysctl option is enabled,
50972+ a sysctl option with name "audit_group" is created.
bc901d79 50973+
fe2de317
MT
50974+config GRKERNSEC_AUDIT_GID
50975+ int "GID for auditing"
50976+ depends on GRKERNSEC_AUDIT_GROUP
50977+ default 1007
bc901d79 50978+
fe2de317
MT
50979+config GRKERNSEC_EXECLOG
50980+ bool "Exec logging"
50981+ help
50982+ If you say Y here, all execve() calls will be logged (since the
50983+ other exec*() calls are frontends to execve(), all execution
50984+ will be logged). Useful for shell-servers that like to keep track
50985+ of their users. If the sysctl option is enabled, a sysctl option with
50986+ name "exec_logging" is created.
50987+ WARNING: This option when enabled will produce a LOT of logs, especially
50988+ on an active system.
bc901d79 50989+
fe2de317
MT
50990+config GRKERNSEC_RESLOG
50991+ bool "Resource logging"
50992+ help
50993+ If you say Y here, all attempts to overstep resource limits will
50994+ be logged with the resource name, the requested size, and the current
50995+ limit. It is highly recommended that you say Y here. If the sysctl
50996+ option is enabled, a sysctl option with name "resource_logging" is
50997+ created. If the RBAC system is enabled, the sysctl value is ignored.
bc901d79 50998+
fe2de317
MT
50999+config GRKERNSEC_CHROOT_EXECLOG
51000+ bool "Log execs within chroot"
51001+ help
51002+ If you say Y here, all executions inside a chroot jail will be logged
51003+ to syslog. This can cause a large amount of logs if certain
51004+ applications (eg. djb's daemontools) are installed on the system, and
51005+ is therefore left as an option. If the sysctl option is enabled, a
51006+ sysctl option with name "chroot_execlog" is created.
bc901d79 51007+
fe2de317
MT
51008+config GRKERNSEC_AUDIT_PTRACE
51009+ bool "Ptrace logging"
51010+ help
51011+ If you say Y here, all attempts to attach to a process via ptrace
51012+ will be logged. If the sysctl option is enabled, a sysctl option
51013+ with name "audit_ptrace" is created.
bc901d79 51014+
fe2de317
MT
51015+config GRKERNSEC_AUDIT_CHDIR
51016+ bool "Chdir logging"
51017+ help
51018+ If you say Y here, all chdir() calls will be logged. If the sysctl
51019+ option is enabled, a sysctl option with name "audit_chdir" is created.
bc901d79 51020+
fe2de317
MT
51021+config GRKERNSEC_AUDIT_MOUNT
51022+ bool "(Un)Mount logging"
51023+ help
51024+ If you say Y here, all mounts and unmounts will be logged. If the
51025+ sysctl option is enabled, a sysctl option with name "audit_mount" is
51026+ created.
bc901d79 51027+
fe2de317
MT
51028+config GRKERNSEC_SIGNAL
51029+ bool "Signal logging"
51030+ help
51031+ If you say Y here, certain important signals will be logged, such as
51032+ SIGSEGV, which will as a result inform you of when a error in a program
51033+ occurred, which in some cases could mean a possible exploit attempt.
51034+ If the sysctl option is enabled, a sysctl option with name
51035+ "signal_logging" is created.
58c5fc13 51036+
fe2de317
MT
51037+config GRKERNSEC_FORKFAIL
51038+ bool "Fork failure logging"
51039+ help
51040+ If you say Y here, all failed fork() attempts will be logged.
51041+ This could suggest a fork bomb, or someone attempting to overstep
51042+ their process limit. If the sysctl option is enabled, a sysctl option
51043+ with name "forkfail_logging" is created.
58c5fc13 51044+
fe2de317
MT
51045+config GRKERNSEC_TIME
51046+ bool "Time change logging"
51047+ help
51048+ If you say Y here, any changes of the system clock will be logged.
51049+ If the sysctl option is enabled, a sysctl option with name
51050+ "timechange_logging" is created.
58c5fc13 51051+
fe2de317
MT
51052+config GRKERNSEC_PROC_IPADDR
51053+ bool "/proc/<pid>/ipaddr support"
51054+ help
51055+ If you say Y here, a new entry will be added to each /proc/<pid>
51056+ directory that contains the IP address of the person using the task.
51057+ The IP is carried across local TCP and AF_UNIX stream sockets.
51058+ This information can be useful for IDS/IPSes to perform remote response
51059+ to a local attack. The entry is readable by only the owner of the
51060+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
51061+ the RBAC system), and thus does not create privacy concerns.
58c5fc13 51062+
fe2de317
MT
51063+config GRKERNSEC_RWXMAP_LOG
51064+ bool 'Denied RWX mmap/mprotect logging'
51065+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
51066+ help
51067+ If you say Y here, calls to mmap() and mprotect() with explicit
51068+ usage of PROT_WRITE and PROT_EXEC together will be logged when
51069+ denied by the PAX_MPROTECT feature. If the sysctl option is
51070+ enabled, a sysctl option with name "rwxmap_logging" is created.
6892158b 51071+
fe2de317
MT
51072+config GRKERNSEC_AUDIT_TEXTREL
51073+ bool 'ELF text relocations logging (READ HELP)'
51074+ depends on PAX_MPROTECT
51075+ help
51076+ If you say Y here, text relocations will be logged with the filename
51077+ of the offending library or binary. The purpose of the feature is
51078+ to help Linux distribution developers get rid of libraries and
51079+ binaries that need text relocations which hinder the future progress
51080+ of PaX. Only Linux distribution developers should say Y here, and
51081+ never on a production machine, as this option creates an information
51082+ leak that could aid an attacker in defeating the randomization of
51083+ a single memory region. If the sysctl option is enabled, a sysctl
51084+ option with name "audit_textrel" is created.
58c5fc13 51085+
fe2de317 51086+endmenu
58c5fc13 51087+
fe2de317
MT
51088+menu "Executable Protections"
51089+depends on GRKERNSEC
58c5fc13 51090+
fe2de317
MT
51091+config GRKERNSEC_DMESG
51092+ bool "Dmesg(8) restriction"
51093+ help
51094+ If you say Y here, non-root users will not be able to use dmesg(8)
51095+ to view up to the last 4kb of messages in the kernel's log buffer.
51096+ The kernel's log buffer often contains kernel addresses and other
51097+ identifying information useful to an attacker in fingerprinting a
51098+ system for a targeted exploit.
51099+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
51100+ created.
6892158b 51101+
fe2de317
MT
51102+config GRKERNSEC_HARDEN_PTRACE
51103+ bool "Deter ptrace-based process snooping"
51104+ help
51105+ If you say Y here, TTY sniffers and other malicious monitoring
51106+ programs implemented through ptrace will be defeated. If you
51107+ have been using the RBAC system, this option has already been
51108+ enabled for several years for all users, with the ability to make
51109+ fine-grained exceptions.
58c5fc13 51110+
fe2de317
MT
51111+ This option only affects the ability of non-root users to ptrace
51112+ processes that are not a descendent of the ptracing process.
51113+ This means that strace ./binary and gdb ./binary will still work,
51114+ but attaching to arbitrary processes will not. If the sysctl
51115+ option is enabled, a sysctl option with name "harden_ptrace" is
51116+ created.
58c5fc13 51117+
4c928ab7
MT
51118+config GRKERNSEC_PTRACE_READEXEC
51119+ bool "Require read access to ptrace sensitive binaries"
51120+ help
51121+ If you say Y here, unprivileged users will not be able to ptrace unreadable
51122+ binaries. This option is useful in environments that
51123+ remove the read bits (e.g. file mode 4711) from suid binaries to
51124+ prevent infoleaking of their contents. This option adds
51125+ consistency to the use of that file mode, as the binary could normally
51126+ be read out when run without privileges while ptracing.
51127+
51128+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
51129+ is created.
51130+
51131+config GRKERNSEC_SETXID
51132+ bool "Enforce consistent multithreaded privileges"
5e856224 51133+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
4c928ab7
MT
51134+ help
51135+ If you say Y here, a change from a root uid to a non-root uid
51136+ in a multithreaded application will cause the resulting uids,
51137+ gids, supplementary groups, and capabilities in that thread
51138+ to be propagated to the other threads of the process. In most
51139+ cases this is unnecessary, as glibc will emulate this behavior
51140+ on behalf of the application. Other libcs do not act in the
51141+ same way, allowing the other threads of the process to continue
51142+ running with root privileges. If the sysctl option is enabled,
51143+ a sysctl option with name "consistent_setxid" is created.
51144+
fe2de317
MT
51145+config GRKERNSEC_TPE
51146+ bool "Trusted Path Execution (TPE)"
51147+ help
51148+ If you say Y here, you will be able to choose a gid to add to the
51149+ supplementary groups of users you want to mark as "untrusted."
51150+ These users will not be able to execute any files that are not in
51151+ root-owned directories writable only by root. If the sysctl option
51152+ is enabled, a sysctl option with name "tpe" is created.
58c5fc13 51153+
fe2de317
MT
51154+config GRKERNSEC_TPE_ALL
51155+ bool "Partially restrict all non-root users"
51156+ depends on GRKERNSEC_TPE
51157+ help
51158+ If you say Y here, all non-root users will be covered under
51159+ a weaker TPE restriction. This is separate from, and in addition to,
51160+ the main TPE options that you have selected elsewhere. Thus, if a
51161+ "trusted" GID is chosen, this restriction applies to even that GID.
51162+ Under this restriction, all non-root users will only be allowed to
51163+ execute files in directories they own that are not group or
51164+ world-writable, or in directories owned by root and writable only by
51165+ root. If the sysctl option is enabled, a sysctl option with name
51166+ "tpe_restrict_all" is created.
58c5fc13 51167+
fe2de317
MT
51168+config GRKERNSEC_TPE_INVERT
51169+ bool "Invert GID option"
51170+ depends on GRKERNSEC_TPE
51171+ help
51172+ If you say Y here, the group you specify in the TPE configuration will
51173+ decide what group TPE restrictions will be *disabled* for. This
51174+ option is useful if you want TPE restrictions to be applied to most
51175+ users on the system. If the sysctl option is enabled, a sysctl option
51176+ with name "tpe_invert" is created. Unlike other sysctl options, this
51177+ entry will default to on for backward-compatibility.
6e9df6a3 51178+
fe2de317
MT
51179+config GRKERNSEC_TPE_GID
51180+ int "GID for untrusted users"
51181+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
51182+ default 1005
51183+ help
51184+ Setting this GID determines what group TPE restrictions will be
51185+ *enabled* for. If the sysctl option is enabled, a sysctl option
51186+ with name "tpe_gid" is created.
6e9df6a3 51187+
fe2de317
MT
51188+config GRKERNSEC_TPE_GID
51189+ int "GID for trusted users"
51190+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
51191+ default 1005
51192+ help
51193+ Setting this GID determines what group TPE restrictions will be
51194+ *disabled* for. If the sysctl option is enabled, a sysctl option
51195+ with name "tpe_gid" is created.
58c5fc13 51196+
fe2de317
MT
51197+endmenu
51198+menu "Network Protections"
51199+depends on GRKERNSEC
58c5fc13 51200+
fe2de317
MT
51201+config GRKERNSEC_RANDNET
51202+ bool "Larger entropy pools"
51203+ help
51204+ If you say Y here, the entropy pools used for many features of Linux
51205+ and grsecurity will be doubled in size. Since several grsecurity
51206+ features use additional randomness, it is recommended that you say Y
51207+ here. Saying Y here has a similar effect as modifying
51208+ /proc/sys/kernel/random/poolsize.
58c5fc13 51209+
fe2de317
MT
51210+config GRKERNSEC_BLACKHOLE
51211+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
51212+ depends on NET
51213+ help
51214+ If you say Y here, neither TCP resets nor ICMP
51215+ destination-unreachable packets will be sent in response to packets
51216+ sent to ports for which no associated listening process exists.
51217+ This feature supports both IPV4 and IPV6 and exempts the
51218+ loopback interface from blackholing. Enabling this feature
51219+ makes a host more resilient to DoS attacks and reduces network
51220+ visibility against scanners.
58c5fc13 51221+
fe2de317
MT
51222+ The blackhole feature as-implemented is equivalent to the FreeBSD
51223+ blackhole feature, as it prevents RST responses to all packets, not
51224+ just SYNs. Under most application behavior this causes no
51225+ problems, but applications (like haproxy) may not close certain
51226+ connections in a way that cleanly terminates them on the remote
51227+ end, leaving the remote host in LAST_ACK state. Because of this
51228+ side-effect and to prevent intentional LAST_ACK DoSes, this
51229+ feature also adds automatic mitigation against such attacks.
51230+ The mitigation drastically reduces the amount of time a socket
51231+ can spend in LAST_ACK state. If you're using haproxy and not
51232+ all servers it connects to have this option enabled, consider
51233+ disabling this feature on the haproxy host.
58c5fc13 51234+
fe2de317
MT
51235+ If the sysctl option is enabled, two sysctl options with names
51236+ "ip_blackhole" and "lastack_retries" will be created.
51237+ While "ip_blackhole" takes the standard zero/non-zero on/off
51238+ toggle, "lastack_retries" uses the same kinds of values as
51239+ "tcp_retries1" and "tcp_retries2". The default value of 4
51240+ prevents a socket from lasting more than 45 seconds in LAST_ACK
51241+ state.
58c5fc13 51242+
fe2de317
MT
51243+config GRKERNSEC_SOCKET
51244+ bool "Socket restrictions"
51245+ depends on NET
51246+ help
51247+ If you say Y here, you will be able to choose from several options.
51248+ If you assign a GID on your system and add it to the supplementary
51249+ groups of users you want to restrict socket access to, this patch
51250+ will perform up to three things, based on the option(s) you choose.
58c5fc13 51251+
fe2de317
MT
51252+config GRKERNSEC_SOCKET_ALL
51253+ bool "Deny any sockets to group"
51254+ depends on GRKERNSEC_SOCKET
51255+ help
51256+ If you say Y here, you will be able to choose a GID of whose users will
51257+ be unable to connect to other hosts from your machine or run server
51258+ applications from your machine. If the sysctl option is enabled, a
51259+ sysctl option with name "socket_all" is created.
58c5fc13 51260+
fe2de317
MT
51261+config GRKERNSEC_SOCKET_ALL_GID
51262+ int "GID to deny all sockets for"
51263+ depends on GRKERNSEC_SOCKET_ALL
51264+ default 1004
51265+ help
51266+ Here you can choose the GID to disable socket access for. Remember to
51267+ add the users you want socket access disabled for to the GID
51268+ specified here. If the sysctl option is enabled, a sysctl option
51269+ with name "socket_all_gid" is created.
58c5fc13 51270+
fe2de317
MT
51271+config GRKERNSEC_SOCKET_CLIENT
51272+ bool "Deny client sockets to group"
51273+ depends on GRKERNSEC_SOCKET
51274+ help
51275+ If you say Y here, you will be able to choose a GID of whose users will
51276+ be unable to connect to other hosts from your machine, but will be
51277+ able to run servers. If this option is enabled, all users in the group
51278+ you specify will have to use passive mode when initiating ftp transfers
51279+ from the shell on your machine. If the sysctl option is enabled, a
51280+ sysctl option with name "socket_client" is created.
58c5fc13 51281+
fe2de317
MT
51282+config GRKERNSEC_SOCKET_CLIENT_GID
51283+ int "GID to deny client sockets for"
51284+ depends on GRKERNSEC_SOCKET_CLIENT
51285+ default 1003
51286+ help
51287+ Here you can choose the GID to disable client socket access for.
51288+ Remember to add the users you want client socket access disabled for to
51289+ the GID specified here. If the sysctl option is enabled, a sysctl
51290+ option with name "socket_client_gid" is created.
58c5fc13 51291+
fe2de317
MT
51292+config GRKERNSEC_SOCKET_SERVER
51293+ bool "Deny server sockets to group"
51294+ depends on GRKERNSEC_SOCKET
51295+ help
51296+ If you say Y here, you will be able to choose a GID of whose users will
51297+ be unable to run server applications from your machine. If the sysctl
51298+ option is enabled, a sysctl option with name "socket_server" is created.
58c5fc13 51299+
fe2de317
MT
51300+config GRKERNSEC_SOCKET_SERVER_GID
51301+ int "GID to deny server sockets for"
51302+ depends on GRKERNSEC_SOCKET_SERVER
51303+ default 1002
51304+ help
51305+ Here you can choose the GID to disable server socket access for.
51306+ Remember to add the users you want server socket access disabled for to
51307+ the GID specified here. If the sysctl option is enabled, a sysctl
51308+ option with name "socket_server_gid" is created.
58c5fc13 51309+
fe2de317
MT
51310+endmenu
51311+menu "Sysctl support"
51312+depends on GRKERNSEC && SYSCTL
58c5fc13 51313+
fe2de317
MT
51314+config GRKERNSEC_SYSCTL
51315+ bool "Sysctl support"
51316+ help
51317+ If you say Y here, you will be able to change the options that
51318+ grsecurity runs with at bootup, without having to recompile your
51319+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
51320+ to enable (1) or disable (0) various features. All the sysctl entries
51321+ are mutable until the "grsec_lock" entry is set to a non-zero value.
51322+ All features enabled in the kernel configuration are disabled at boot
51323+ if you do not say Y to the "Turn on features by default" option.
51324+ All options should be set at startup, and the grsec_lock entry should
51325+ be set to a non-zero value after all the options are set.
51326+ *THIS IS EXTREMELY IMPORTANT*
58c5fc13 51327+
fe2de317
MT
51328+config GRKERNSEC_SYSCTL_DISTRO
51329+ bool "Extra sysctl support for distro makers (READ HELP)"
51330+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
51331+ help
51332+ If you say Y here, additional sysctl options will be created
51333+ for features that affect processes running as root. Therefore,
51334+ it is critical when using this option that the grsec_lock entry be
51335+ enabled after boot. Only distros with prebuilt kernel packages
51336+ with this option enabled that can ensure grsec_lock is enabled
51337+ after boot should use this option.
51338+ *Failure to set grsec_lock after boot makes all grsec features
51339+ this option covers useless*
bc901d79 51340+
fe2de317
MT
51341+ Currently this option creates the following sysctl entries:
51342+ "Disable Privileged I/O": "disable_priv_io"
58c5fc13 51343+
fe2de317
MT
51344+config GRKERNSEC_SYSCTL_ON
51345+ bool "Turn on features by default"
51346+ depends on GRKERNSEC_SYSCTL
51347+ help
51348+ If you say Y here, instead of having all features enabled in the
51349+ kernel configuration disabled at boot time, the features will be
51350+ enabled at boot time. It is recommended you say Y here unless
51351+ there is some reason you would want all sysctl-tunable features to
51352+ be disabled by default. As mentioned elsewhere, it is important
51353+ to enable the grsec_lock entry once you have finished modifying
51354+ the sysctl entries.
58c5fc13 51355+
fe2de317
MT
51356+endmenu
51357+menu "Logging Options"
51358+depends on GRKERNSEC
58c5fc13 51359+
fe2de317
MT
51360+config GRKERNSEC_FLOODTIME
51361+ int "Seconds in between log messages (minimum)"
51362+ default 10
51363+ help
51364+ This option allows you to enforce the number of seconds between
51365+ grsecurity log messages. The default should be suitable for most
51366+ people, however, if you choose to change it, choose a value small enough
51367+ to allow informative logs to be produced, but large enough to
51368+ prevent flooding.
58c5fc13 51369+
fe2de317
MT
51370+config GRKERNSEC_FLOODBURST
51371+ int "Number of messages in a burst (maximum)"
51372+ default 6
51373+ help
51374+ This option allows you to choose the maximum number of messages allowed
51375+ within the flood time interval you chose in a separate option. The
51376+ default should be suitable for most people, however if you find that
51377+ many of your logs are being interpreted as flooding, you may want to
51378+ raise this value.
58c5fc13 51379+
fe2de317 51380+endmenu
58c5fc13 51381+
fe2de317
MT
51382+endmenu
51383diff --git a/grsecurity/Makefile b/grsecurity/Makefile
51384new file mode 100644
4c928ab7 51385index 0000000..1b9afa9
fe2de317
MT
51386--- /dev/null
51387+++ b/grsecurity/Makefile
4c928ab7 51388@@ -0,0 +1,38 @@
fe2de317
MT
51389+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
51390+# during 2001-2009 it has been completely redesigned by Brad Spengler
51391+# into an RBAC system
51392+#
51393+# All code in this directory and various hooks inserted throughout the kernel
51394+# are copyright Brad Spengler - Open Source Security, Inc., and released
51395+# under the GPL v2 or higher
58c5fc13 51396+
4c928ab7
MT
51397+KBUILD_CFLAGS += -Werror
51398+
fe2de317
MT
51399+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
51400+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
51401+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
58c5fc13 51402+
fe2de317
MT
51403+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
51404+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
51405+ gracl_learn.o grsec_log.o
51406+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
58c5fc13 51407+
fe2de317
MT
51408+ifdef CONFIG_NET
51409+obj-y += grsec_sock.o
51410+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
51411+endif
58c5fc13 51412+
fe2de317
MT
51413+ifndef CONFIG_GRKERNSEC
51414+obj-y += grsec_disabled.o
51415+endif
58c5fc13 51416+
fe2de317
MT
51417+ifdef CONFIG_GRKERNSEC_HIDESYM
51418+extra-y := grsec_hidesym.o
51419+$(obj)/grsec_hidesym.o:
51420+ @-chmod -f 500 /boot
51421+ @-chmod -f 500 /lib/modules
51422+ @-chmod -f 500 /lib64/modules
51423+ @-chmod -f 500 /lib32/modules
51424+ @-chmod -f 700 .
51425+ @echo ' grsec: protected kernel image paths'
51426+endif
51427diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
51428new file mode 100644
5e856224 51429index 0000000..a6d83f0
fe2de317
MT
51430--- /dev/null
51431+++ b/grsecurity/gracl.c
5e856224 51432@@ -0,0 +1,4193 @@
fe2de317
MT
51433+#include <linux/kernel.h>
51434+#include <linux/module.h>
51435+#include <linux/sched.h>
51436+#include <linux/mm.h>
51437+#include <linux/file.h>
51438+#include <linux/fs.h>
51439+#include <linux/namei.h>
51440+#include <linux/mount.h>
51441+#include <linux/tty.h>
51442+#include <linux/proc_fs.h>
51443+#include <linux/lglock.h>
51444+#include <linux/slab.h>
51445+#include <linux/vmalloc.h>
51446+#include <linux/types.h>
51447+#include <linux/sysctl.h>
51448+#include <linux/netdevice.h>
51449+#include <linux/ptrace.h>
51450+#include <linux/gracl.h>
51451+#include <linux/gralloc.h>
4c928ab7 51452+#include <linux/security.h>
fe2de317
MT
51453+#include <linux/grinternal.h>
51454+#include <linux/pid_namespace.h>
51455+#include <linux/fdtable.h>
51456+#include <linux/percpu.h>
5e856224 51457+#include "../fs/mount.h"
58c5fc13 51458+
fe2de317
MT
51459+#include <asm/uaccess.h>
51460+#include <asm/errno.h>
51461+#include <asm/mman.h>
58c5fc13 51462+
fe2de317
MT
51463+static struct acl_role_db acl_role_set;
51464+static struct name_db name_set;
51465+static struct inodev_db inodev_set;
58c5fc13 51466+
fe2de317
MT
51467+/* for keeping track of userspace pointers used for subjects, so we
51468+ can share references in the kernel as well
51469+*/
58c5fc13 51470+
fe2de317 51471+static struct path real_root;
58c5fc13 51472+
fe2de317 51473+static struct acl_subj_map_db subj_map_set;
58c5fc13 51474+
fe2de317 51475+static struct acl_role_label *default_role;
58c5fc13 51476+
fe2de317 51477+static struct acl_role_label *role_list;
58c5fc13 51478+
fe2de317 51479+static u16 acl_sp_role_value;
58c5fc13 51480+
fe2de317
MT
51481+extern char *gr_shared_page[4];
51482+static DEFINE_MUTEX(gr_dev_mutex);
51483+DEFINE_RWLOCK(gr_inode_lock);
58c5fc13 51484+
fe2de317 51485+struct gr_arg *gr_usermode;
58c5fc13 51486+
fe2de317 51487+static unsigned int gr_status __read_only = GR_STATUS_INIT;
58c5fc13 51488+
fe2de317
MT
51489+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
51490+extern void gr_clear_learn_entries(void);
58c5fc13 51491+
fe2de317
MT
51492+#ifdef CONFIG_GRKERNSEC_RESLOG
51493+extern void gr_log_resource(const struct task_struct *task,
51494+ const int res, const unsigned long wanted, const int gt);
51495+#endif
58c5fc13 51496+
fe2de317
MT
51497+unsigned char *gr_system_salt;
51498+unsigned char *gr_system_sum;
58c5fc13 51499+
fe2de317
MT
51500+static struct sprole_pw **acl_special_roles = NULL;
51501+static __u16 num_sprole_pws = 0;
58c5fc13 51502+
fe2de317 51503+static struct acl_role_label *kernel_role = NULL;
58c5fc13 51504+
fe2de317
MT
51505+static unsigned int gr_auth_attempts = 0;
51506+static unsigned long gr_auth_expires = 0UL;
58c5fc13 51507+
fe2de317
MT
51508+#ifdef CONFIG_NET
51509+extern struct vfsmount *sock_mnt;
51510+#endif
58c5fc13 51511+
fe2de317
MT
51512+extern struct vfsmount *pipe_mnt;
51513+extern struct vfsmount *shm_mnt;
51514+#ifdef CONFIG_HUGETLBFS
51515+extern struct vfsmount *hugetlbfs_vfsmount;
51516+#endif
58c5fc13 51517+
fe2de317
MT
51518+static struct acl_object_label *fakefs_obj_rw;
51519+static struct acl_object_label *fakefs_obj_rwx;
58c5fc13 51520+
fe2de317
MT
51521+extern int gr_init_uidset(void);
51522+extern void gr_free_uidset(void);
51523+extern void gr_remove_uid(uid_t uid);
51524+extern int gr_find_uid(uid_t uid);
58c5fc13 51525+
fe2de317 51526+DECLARE_BRLOCK(vfsmount_lock);
58c5fc13 51527+
fe2de317
MT
51528+__inline__ int
51529+gr_acl_is_enabled(void)
57199397 51530+{
fe2de317
MT
51531+ return (gr_status & GR_READY);
51532+}
58c5fc13 51533+
fe2de317
MT
51534+#ifdef CONFIG_BTRFS_FS
51535+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
51536+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
51537+#endif
58c5fc13 51538+
fe2de317
MT
51539+static inline dev_t __get_dev(const struct dentry *dentry)
51540+{
51541+#ifdef CONFIG_BTRFS_FS
51542+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
51543+ return get_btrfs_dev_from_inode(dentry->d_inode);
51544+ else
51545+#endif
51546+ return dentry->d_inode->i_sb->s_dev;
58c5fc13
MT
51547+}
51548+
fe2de317 51549+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
58c5fc13 51550+{
fe2de317
MT
51551+ return __get_dev(dentry);
51552+}
58c5fc13 51553+
fe2de317
MT
51554+static char gr_task_roletype_to_char(struct task_struct *task)
51555+{
51556+ switch (task->role->roletype &
51557+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
51558+ GR_ROLE_SPECIAL)) {
51559+ case GR_ROLE_DEFAULT:
51560+ return 'D';
51561+ case GR_ROLE_USER:
51562+ return 'U';
51563+ case GR_ROLE_GROUP:
51564+ return 'G';
51565+ case GR_ROLE_SPECIAL:
51566+ return 'S';
51567+ }
58c5fc13 51568+
fe2de317
MT
51569+ return 'X';
51570+}
ae4e228f 51571+
fe2de317
MT
51572+char gr_roletype_to_char(void)
51573+{
51574+ return gr_task_roletype_to_char(current);
58c5fc13 51575+}
efbe55a5 51576+
fe2de317
MT
51577+__inline__ int
51578+gr_acl_tpe_check(void)
efbe55a5 51579+{
fe2de317
MT
51580+ if (unlikely(!(gr_status & GR_READY)))
51581+ return 0;
51582+ if (current->role->roletype & GR_ROLE_TPE)
51583+ return 1;
51584+ else
51585+ return 0;
51586+}
efbe55a5 51587+
fe2de317
MT
51588+int
51589+gr_handle_rawio(const struct inode *inode)
51590+{
51591+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51592+ if (inode && S_ISBLK(inode->i_mode) &&
51593+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
51594+ !capable(CAP_SYS_RAWIO))
51595+ return 1;
51596+#endif
51597+ return 0;
51598+}
efbe55a5 51599+
fe2de317
MT
51600+static int
51601+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
51602+{
51603+ if (likely(lena != lenb))
51604+ return 0;
efbe55a5 51605+
fe2de317 51606+ return !memcmp(a, b, lena);
efbe55a5
MT
51607+}
51608+
fe2de317 51609+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
efbe55a5 51610+{
fe2de317
MT
51611+ *buflen -= namelen;
51612+ if (*buflen < 0)
51613+ return -ENAMETOOLONG;
51614+ *buffer -= namelen;
51615+ memcpy(*buffer, str, namelen);
51616+ return 0;
51617+}
efbe55a5 51618+
fe2de317
MT
51619+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
51620+{
51621+ return prepend(buffer, buflen, name->name, name->len);
efbe55a5 51622+}
fe2de317
MT
51623+
51624+static int prepend_path(const struct path *path, struct path *root,
51625+ char **buffer, int *buflen)
efbe55a5 51626+{
fe2de317
MT
51627+ struct dentry *dentry = path->dentry;
51628+ struct vfsmount *vfsmnt = path->mnt;
5e856224 51629+ struct mount *mnt = real_mount(vfsmnt);
fe2de317
MT
51630+ bool slash = false;
51631+ int error = 0;
efbe55a5 51632+
fe2de317
MT
51633+ while (dentry != root->dentry || vfsmnt != root->mnt) {
51634+ struct dentry * parent;
efbe55a5 51635+
fe2de317
MT
51636+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
51637+ /* Global root? */
5e856224 51638+ if (!mnt_has_parent(mnt)) {
fe2de317
MT
51639+ goto out;
51640+ }
5e856224
MT
51641+ dentry = mnt->mnt_mountpoint;
51642+ mnt = mnt->mnt_parent;
51643+ vfsmnt = &mnt->mnt;
fe2de317
MT
51644+ continue;
51645+ }
51646+ parent = dentry->d_parent;
51647+ prefetch(parent);
51648+ spin_lock(&dentry->d_lock);
51649+ error = prepend_name(buffer, buflen, &dentry->d_name);
51650+ spin_unlock(&dentry->d_lock);
51651+ if (!error)
51652+ error = prepend(buffer, buflen, "/", 1);
51653+ if (error)
51654+ break;
efbe55a5 51655+
fe2de317
MT
51656+ slash = true;
51657+ dentry = parent;
51658+ }
efbe55a5 51659+
fe2de317
MT
51660+out:
51661+ if (!error && !slash)
51662+ error = prepend(buffer, buflen, "/", 1);
efbe55a5 51663+
fe2de317
MT
51664+ return error;
51665+}
efbe55a5 51666+
fe2de317 51667+/* this must be called with vfsmount_lock and rename_lock held */
efbe55a5 51668+
fe2de317
MT
51669+static char *__our_d_path(const struct path *path, struct path *root,
51670+ char *buf, int buflen)
51671+{
51672+ char *res = buf + buflen;
51673+ int error;
efbe55a5 51674+
fe2de317
MT
51675+ prepend(&res, &buflen, "\0", 1);
51676+ error = prepend_path(path, root, &res, &buflen);
51677+ if (error)
51678+ return ERR_PTR(error);
51679+
51680+ return res;
efbe55a5
MT
51681+}
51682+
fe2de317
MT
51683+static char *
51684+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
efbe55a5 51685+{
fe2de317 51686+ char *retval;
efbe55a5 51687+
fe2de317
MT
51688+ retval = __our_d_path(path, root, buf, buflen);
51689+ if (unlikely(IS_ERR(retval)))
51690+ retval = strcpy(buf, "<path too long>");
51691+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
51692+ retval[1] = '\0';
efbe55a5 51693+
fe2de317 51694+ return retval;
efbe55a5
MT
51695+}
51696+
fe2de317
MT
51697+static char *
51698+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
51699+ char *buf, int buflen)
efbe55a5 51700+{
fe2de317
MT
51701+ struct path path;
51702+ char *res;
efbe55a5 51703+
fe2de317
MT
51704+ path.dentry = (struct dentry *)dentry;
51705+ path.mnt = (struct vfsmount *)vfsmnt;
efbe55a5 51706+
fe2de317
MT
51707+ /* we can use real_root.dentry, real_root.mnt, because this is only called
51708+ by the RBAC system */
51709+ res = gen_full_path(&path, &real_root, buf, buflen);
efbe55a5 51710+
fe2de317 51711+ return res;
efbe55a5
MT
51712+}
51713+
fe2de317
MT
51714+static char *
51715+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
51716+ char *buf, int buflen)
efbe55a5 51717+{
fe2de317
MT
51718+ char *res;
51719+ struct path path;
51720+ struct path root;
51721+ struct task_struct *reaper = &init_task;
51722+
51723+ path.dentry = (struct dentry *)dentry;
51724+ path.mnt = (struct vfsmount *)vfsmnt;
51725+
51726+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
51727+ get_fs_root(reaper->fs, &root);
51728+
51729+ write_seqlock(&rename_lock);
51730+ br_read_lock(vfsmount_lock);
51731+ res = gen_full_path(&path, &root, buf, buflen);
51732+ br_read_unlock(vfsmount_lock);
51733+ write_sequnlock(&rename_lock);
51734+
51735+ path_put(&root);
51736+ return res;
51737+}
51738+
51739+static char *
51740+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
51741+{
51742+ char *ret;
51743+ write_seqlock(&rename_lock);
51744+ br_read_lock(vfsmount_lock);
51745+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
51746+ PAGE_SIZE);
51747+ br_read_unlock(vfsmount_lock);
51748+ write_sequnlock(&rename_lock);
51749+ return ret;
51750+}
51751+
51752+static char *
51753+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
51754+{
51755+ char *ret;
51756+ char *buf;
51757+ int buflen;
51758+
51759+ write_seqlock(&rename_lock);
51760+ br_read_lock(vfsmount_lock);
51761+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51762+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
51763+ buflen = (int)(ret - buf);
51764+ if (buflen >= 5)
51765+ prepend(&ret, &buflen, "/proc", 5);
51766+ else
51767+ ret = strcpy(buf, "<path too long>");
51768+ br_read_unlock(vfsmount_lock);
51769+ write_sequnlock(&rename_lock);
51770+ return ret;
51771+}
51772+
51773+char *
51774+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
51775+{
51776+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
51777+ PAGE_SIZE);
51778+}
51779+
51780+char *
51781+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
51782+{
51783+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
51784+ PAGE_SIZE);
51785+}
51786+
51787+char *
51788+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
51789+{
51790+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
51791+ PAGE_SIZE);
51792+}
51793+
51794+char *
51795+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
51796+{
51797+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
51798+ PAGE_SIZE);
51799+}
51800+
51801+char *
51802+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
51803+{
51804+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
51805+ PAGE_SIZE);
51806+}
51807+
51808+__inline__ __u32
51809+to_gr_audit(const __u32 reqmode)
51810+{
51811+ /* masks off auditable permission flags, then shifts them to create
51812+ auditing flags, and adds the special case of append auditing if
51813+ we're requesting write */
51814+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
51815+}
51816+
51817+struct acl_subject_label *
51818+lookup_subject_map(const struct acl_subject_label *userp)
51819+{
51820+ unsigned int index = shash(userp, subj_map_set.s_size);
51821+ struct subject_map *match;
51822+
51823+ match = subj_map_set.s_hash[index];
51824+
51825+ while (match && match->user != userp)
51826+ match = match->next;
51827+
51828+ if (match != NULL)
51829+ return match->kernel;
51830+ else
51831+ return NULL;
51832+}
51833+
51834+static void
51835+insert_subj_map_entry(struct subject_map *subjmap)
51836+{
51837+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
51838+ struct subject_map **curr;
51839+
51840+ subjmap->prev = NULL;
51841+
51842+ curr = &subj_map_set.s_hash[index];
51843+ if (*curr != NULL)
51844+ (*curr)->prev = subjmap;
51845+
51846+ subjmap->next = *curr;
51847+ *curr = subjmap;
51848+
51849+ return;
51850+}
51851+
51852+static struct acl_role_label *
51853+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
51854+ const gid_t gid)
51855+{
51856+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
51857+ struct acl_role_label *match;
51858+ struct role_allowed_ip *ipp;
51859+ unsigned int x;
51860+ u32 curr_ip = task->signal->curr_ip;
51861+
51862+ task->signal->saved_ip = curr_ip;
51863+
51864+ match = acl_role_set.r_hash[index];
51865+
51866+ while (match) {
51867+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
51868+ for (x = 0; x < match->domain_child_num; x++) {
51869+ if (match->domain_children[x] == uid)
51870+ goto found;
51871+ }
51872+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
51873+ break;
51874+ match = match->next;
51875+ }
51876+found:
51877+ if (match == NULL) {
51878+ try_group:
51879+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
51880+ match = acl_role_set.r_hash[index];
51881+
51882+ while (match) {
51883+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
51884+ for (x = 0; x < match->domain_child_num; x++) {
51885+ if (match->domain_children[x] == gid)
51886+ goto found2;
51887+ }
51888+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
51889+ break;
51890+ match = match->next;
51891+ }
51892+found2:
51893+ if (match == NULL)
51894+ match = default_role;
51895+ if (match->allowed_ips == NULL)
51896+ return match;
51897+ else {
51898+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51899+ if (likely
51900+ ((ntohl(curr_ip) & ipp->netmask) ==
51901+ (ntohl(ipp->addr) & ipp->netmask)))
51902+ return match;
51903+ }
51904+ match = default_role;
51905+ }
51906+ } else if (match->allowed_ips == NULL) {
51907+ return match;
51908+ } else {
51909+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51910+ if (likely
51911+ ((ntohl(curr_ip) & ipp->netmask) ==
51912+ (ntohl(ipp->addr) & ipp->netmask)))
51913+ return match;
51914+ }
51915+ goto try_group;
51916+ }
51917+
51918+ return match;
51919+}
51920+
51921+struct acl_subject_label *
51922+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
51923+ const struct acl_role_label *role)
51924+{
51925+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
51926+ struct acl_subject_label *match;
51927+
51928+ match = role->subj_hash[index];
51929+
51930+ while (match && (match->inode != ino || match->device != dev ||
51931+ (match->mode & GR_DELETED))) {
51932+ match = match->next;
51933+ }
51934+
51935+ if (match && !(match->mode & GR_DELETED))
51936+ return match;
51937+ else
51938+ return NULL;
51939+}
51940+
51941+struct acl_subject_label *
51942+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
51943+ const struct acl_role_label *role)
51944+{
51945+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
51946+ struct acl_subject_label *match;
51947+
51948+ match = role->subj_hash[index];
51949+
51950+ while (match && (match->inode != ino || match->device != dev ||
51951+ !(match->mode & GR_DELETED))) {
51952+ match = match->next;
51953+ }
51954+
51955+ if (match && (match->mode & GR_DELETED))
51956+ return match;
51957+ else
51958+ return NULL;
51959+}
51960+
51961+static struct acl_object_label *
51962+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
51963+ const struct acl_subject_label *subj)
51964+{
51965+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51966+ struct acl_object_label *match;
51967+
51968+ match = subj->obj_hash[index];
51969+
51970+ while (match && (match->inode != ino || match->device != dev ||
51971+ (match->mode & GR_DELETED))) {
51972+ match = match->next;
51973+ }
51974+
51975+ if (match && !(match->mode & GR_DELETED))
51976+ return match;
51977+ else
51978+ return NULL;
51979+}
51980+
51981+static struct acl_object_label *
51982+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
51983+ const struct acl_subject_label *subj)
51984+{
51985+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51986+ struct acl_object_label *match;
51987+
51988+ match = subj->obj_hash[index];
51989+
51990+ while (match && (match->inode != ino || match->device != dev ||
51991+ !(match->mode & GR_DELETED))) {
51992+ match = match->next;
51993+ }
51994+
51995+ if (match && (match->mode & GR_DELETED))
51996+ return match;
51997+
51998+ match = subj->obj_hash[index];
51999+
52000+ while (match && (match->inode != ino || match->device != dev ||
52001+ (match->mode & GR_DELETED))) {
52002+ match = match->next;
52003+ }
52004+
52005+ if (match && !(match->mode & GR_DELETED))
52006+ return match;
52007+ else
52008+ return NULL;
52009+}
52010+
52011+static struct name_entry *
52012+lookup_name_entry(const char *name)
52013+{
52014+ unsigned int len = strlen(name);
52015+ unsigned int key = full_name_hash(name, len);
52016+ unsigned int index = key % name_set.n_size;
52017+ struct name_entry *match;
52018+
52019+ match = name_set.n_hash[index];
52020+
52021+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
52022+ match = match->next;
52023+
52024+ return match;
52025+}
52026+
52027+static struct name_entry *
52028+lookup_name_entry_create(const char *name)
52029+{
52030+ unsigned int len = strlen(name);
52031+ unsigned int key = full_name_hash(name, len);
52032+ unsigned int index = key % name_set.n_size;
52033+ struct name_entry *match;
52034+
52035+ match = name_set.n_hash[index];
52036+
52037+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
52038+ !match->deleted))
52039+ match = match->next;
52040+
52041+ if (match && match->deleted)
52042+ return match;
52043+
52044+ match = name_set.n_hash[index];
52045+
52046+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
52047+ match->deleted))
52048+ match = match->next;
52049+
52050+ if (match && !match->deleted)
52051+ return match;
52052+ else
52053+ return NULL;
52054+}
52055+
52056+static struct inodev_entry *
52057+lookup_inodev_entry(const ino_t ino, const dev_t dev)
52058+{
52059+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
52060+ struct inodev_entry *match;
52061+
52062+ match = inodev_set.i_hash[index];
52063+
52064+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
52065+ match = match->next;
52066+
52067+ return match;
52068+}
52069+
52070+static void
52071+insert_inodev_entry(struct inodev_entry *entry)
52072+{
52073+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
52074+ inodev_set.i_size);
52075+ struct inodev_entry **curr;
52076+
52077+ entry->prev = NULL;
52078+
52079+ curr = &inodev_set.i_hash[index];
52080+ if (*curr != NULL)
52081+ (*curr)->prev = entry;
52082+
52083+ entry->next = *curr;
52084+ *curr = entry;
52085+
52086+ return;
52087+}
52088+
52089+static void
52090+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
52091+{
52092+ unsigned int index =
52093+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
52094+ struct acl_role_label **curr;
4c928ab7 52095+ struct acl_role_label *tmp, *tmp2;
fe2de317
MT
52096+
52097+ curr = &acl_role_set.r_hash[index];
52098+
4c928ab7
MT
52099+ /* simple case, slot is empty, just set it to our role */
52100+ if (*curr == NULL) {
52101+ *curr = role;
52102+ } else {
52103+ /* example:
52104+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
52105+ 2 -> 3
52106+ */
52107+ /* first check to see if we can already be reached via this slot */
52108+ tmp = *curr;
52109+ while (tmp && tmp != role)
fe2de317 52110+ tmp = tmp->next;
4c928ab7
MT
52111+ if (tmp == role) {
52112+ /* we don't need to add ourselves to this slot's chain */
52113+ return;
52114+ }
52115+ /* we need to add ourselves to this chain, two cases */
52116+ if (role->next == NULL) {
52117+ /* simple case, append the current chain to our role */
52118+ role->next = *curr;
52119+ *curr = role;
52120+ } else {
52121+ /* 1 -> 2 -> 3 -> 4
52122+ 2 -> 3 -> 4
52123+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
52124+ */
52125+ /* trickier case: walk our role's chain until we find
52126+ the role for the start of the current slot's chain */
52127+ tmp = role;
52128+ tmp2 = *curr;
52129+ while (tmp->next && tmp->next != tmp2)
52130+ tmp = tmp->next;
52131+ if (tmp->next == tmp2) {
52132+ /* from example above, we found 3, so just
52133+ replace this slot's chain with ours */
52134+ *curr = role;
52135+ } else {
52136+ /* we didn't find a subset of our role's chain
52137+ in the current slot's chain, so append their
52138+ chain to ours, and set us as the first role in
52139+ the slot's chain
52140+
52141+ we could fold this case with the case above,
52142+ but making it explicit for clarity
52143+ */
52144+ tmp->next = tmp2;
52145+ *curr = role;
52146+ }
52147+ }
52148+ }
fe2de317
MT
52149+
52150+ return;
52151+}
52152+
52153+static void
52154+insert_acl_role_label(struct acl_role_label *role)
52155+{
52156+ int i;
52157+
52158+ if (role_list == NULL) {
52159+ role_list = role;
52160+ role->prev = NULL;
52161+ } else {
52162+ role->prev = role_list;
52163+ role_list = role;
52164+ }
52165+
52166+ /* used for hash chains */
52167+ role->next = NULL;
52168+
52169+ if (role->roletype & GR_ROLE_DOMAIN) {
52170+ for (i = 0; i < role->domain_child_num; i++)
52171+ __insert_acl_role_label(role, role->domain_children[i]);
52172+ } else
52173+ __insert_acl_role_label(role, role->uidgid);
52174+}
52175+
52176+static int
52177+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
52178+{
52179+ struct name_entry **curr, *nentry;
52180+ struct inodev_entry *ientry;
52181+ unsigned int len = strlen(name);
52182+ unsigned int key = full_name_hash(name, len);
52183+ unsigned int index = key % name_set.n_size;
52184+
52185+ curr = &name_set.n_hash[index];
52186+
52187+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
52188+ curr = &((*curr)->next);
52189+
52190+ if (*curr != NULL)
52191+ return 1;
52192+
52193+ nentry = acl_alloc(sizeof (struct name_entry));
52194+ if (nentry == NULL)
52195+ return 0;
52196+ ientry = acl_alloc(sizeof (struct inodev_entry));
52197+ if (ientry == NULL)
52198+ return 0;
52199+ ientry->nentry = nentry;
52200+
52201+ nentry->key = key;
52202+ nentry->name = name;
52203+ nentry->inode = inode;
52204+ nentry->device = device;
52205+ nentry->len = len;
52206+ nentry->deleted = deleted;
52207+
52208+ nentry->prev = NULL;
52209+ curr = &name_set.n_hash[index];
52210+ if (*curr != NULL)
52211+ (*curr)->prev = nentry;
52212+ nentry->next = *curr;
52213+ *curr = nentry;
52214+
52215+ /* insert us into the table searchable by inode/dev */
52216+ insert_inodev_entry(ientry);
52217+
52218+ return 1;
52219+}
52220+
52221+static void
52222+insert_acl_obj_label(struct acl_object_label *obj,
52223+ struct acl_subject_label *subj)
52224+{
52225+ unsigned int index =
52226+ fhash(obj->inode, obj->device, subj->obj_hash_size);
52227+ struct acl_object_label **curr;
52228+
52229+
52230+ obj->prev = NULL;
52231+
52232+ curr = &subj->obj_hash[index];
52233+ if (*curr != NULL)
52234+ (*curr)->prev = obj;
52235+
52236+ obj->next = *curr;
52237+ *curr = obj;
52238+
52239+ return;
52240+}
52241+
52242+static void
52243+insert_acl_subj_label(struct acl_subject_label *obj,
52244+ struct acl_role_label *role)
52245+{
52246+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
52247+ struct acl_subject_label **curr;
52248+
52249+ obj->prev = NULL;
52250+
52251+ curr = &role->subj_hash[index];
52252+ if (*curr != NULL)
52253+ (*curr)->prev = obj;
52254+
52255+ obj->next = *curr;
52256+ *curr = obj;
52257+
52258+ return;
52259+}
52260+
52261+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
52262+
52263+static void *
52264+create_table(__u32 * len, int elementsize)
52265+{
52266+ unsigned int table_sizes[] = {
efbe55a5
MT
52267+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
52268+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
52269+ 4194301, 8388593, 16777213, 33554393, 67108859
52270+ };
52271+ void *newtable = NULL;
52272+ unsigned int pwr = 0;
52273+
52274+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
52275+ table_sizes[pwr] <= *len)
52276+ pwr++;
52277+
52278+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
52279+ return newtable;
52280+
52281+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
52282+ newtable =
52283+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
52284+ else
52285+ newtable = vmalloc(table_sizes[pwr] * elementsize);
52286+
52287+ *len = table_sizes[pwr];
52288+
52289+ return newtable;
52290+}
52291+
52292+static int
52293+init_variables(const struct gr_arg *arg)
52294+{
52295+ struct task_struct *reaper = &init_task;
52296+ unsigned int stacksize;
58c5fc13
MT
52297+
52298+ subj_map_set.s_size = arg->role_db.num_subjects;
52299+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
52300+ name_set.n_size = arg->role_db.num_objects;
52301+ inodev_set.i_size = arg->role_db.num_objects;
52302+
52303+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
52304+ !name_set.n_size || !inodev_set.i_size)
52305+ return 1;
52306+
52307+ if (!gr_init_uidset())
52308+ return 1;
52309+
52310+ /* set up the stack that holds allocation info */
52311+
52312+ stacksize = arg->role_db.num_pointers + 5;
52313+
52314+ if (!acl_alloc_stack_init(stacksize))
52315+ return 1;
52316+
52317+ /* grab reference for the real root dentry and vfsmount */
ea610fa8 52318+ get_fs_root(reaper->fs, &real_root);
58c5fc13 52319+
16454cff
MT
52320+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52321+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
52322+#endif
52323+
15a11c5b
MT
52324+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
52325+ if (fakefs_obj_rw == NULL)
52326+ return 1;
52327+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
52328+
52329+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
52330+ if (fakefs_obj_rwx == NULL)
58c5fc13 52331+ return 1;
15a11c5b 52332+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
58c5fc13
MT
52333+
52334+ subj_map_set.s_hash =
52335+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
52336+ acl_role_set.r_hash =
52337+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
52338+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
52339+ inodev_set.i_hash =
52340+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
52341+
52342+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
52343+ !name_set.n_hash || !inodev_set.i_hash)
52344+ return 1;
52345+
52346+ memset(subj_map_set.s_hash, 0,
52347+ sizeof(struct subject_map *) * subj_map_set.s_size);
52348+ memset(acl_role_set.r_hash, 0,
52349+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
52350+ memset(name_set.n_hash, 0,
52351+ sizeof (struct name_entry *) * name_set.n_size);
52352+ memset(inodev_set.i_hash, 0,
52353+ sizeof (struct inodev_entry *) * inodev_set.i_size);
52354+
52355+ return 0;
52356+}
52357+
52358+/* free information not needed after startup
52359+ currently contains user->kernel pointer mappings for subjects
52360+*/
52361+
52362+static void
52363+free_init_variables(void)
52364+{
52365+ __u32 i;
52366+
52367+ if (subj_map_set.s_hash) {
52368+ for (i = 0; i < subj_map_set.s_size; i++) {
52369+ if (subj_map_set.s_hash[i]) {
52370+ kfree(subj_map_set.s_hash[i]);
52371+ subj_map_set.s_hash[i] = NULL;
52372+ }
52373+ }
52374+
52375+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
52376+ PAGE_SIZE)
52377+ kfree(subj_map_set.s_hash);
52378+ else
52379+ vfree(subj_map_set.s_hash);
52380+ }
52381+
52382+ return;
52383+}
52384+
52385+static void
52386+free_variables(void)
52387+{
52388+ struct acl_subject_label *s;
52389+ struct acl_role_label *r;
52390+ struct task_struct *task, *task2;
ae4e228f 52391+ unsigned int x;
58c5fc13
MT
52392+
52393+ gr_clear_learn_entries();
52394+
52395+ read_lock(&tasklist_lock);
52396+ do_each_thread(task2, task) {
52397+ task->acl_sp_role = 0;
52398+ task->acl_role_id = 0;
52399+ task->acl = NULL;
52400+ task->role = NULL;
52401+ } while_each_thread(task2, task);
52402+ read_unlock(&tasklist_lock);
52403+
52404+ /* release the reference to the real root dentry and vfsmount */
6892158b 52405+ path_put(&real_root);
4c928ab7 52406+ memset(&real_root, 0, sizeof(real_root));
58c5fc13
MT
52407+
52408+ /* free all object hash tables */
52409+
ae4e228f 52410+ FOR_EACH_ROLE_START(r)
58c5fc13 52411+ if (r->subj_hash == NULL)
ae4e228f 52412+ goto next_role;
58c5fc13
MT
52413+ FOR_EACH_SUBJECT_START(r, s, x)
52414+ if (s->obj_hash == NULL)
52415+ break;
52416+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
52417+ kfree(s->obj_hash);
52418+ else
52419+ vfree(s->obj_hash);
52420+ FOR_EACH_SUBJECT_END(s, x)
52421+ FOR_EACH_NESTED_SUBJECT_START(r, s)
52422+ if (s->obj_hash == NULL)
52423+ break;
52424+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
52425+ kfree(s->obj_hash);
52426+ else
52427+ vfree(s->obj_hash);
52428+ FOR_EACH_NESTED_SUBJECT_END(s)
52429+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
52430+ kfree(r->subj_hash);
52431+ else
52432+ vfree(r->subj_hash);
52433+ r->subj_hash = NULL;
ae4e228f
MT
52434+next_role:
52435+ FOR_EACH_ROLE_END(r)
58c5fc13
MT
52436+
52437+ acl_free_all();
52438+
52439+ if (acl_role_set.r_hash) {
52440+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
52441+ PAGE_SIZE)
52442+ kfree(acl_role_set.r_hash);
52443+ else
52444+ vfree(acl_role_set.r_hash);
52445+ }
52446+ if (name_set.n_hash) {
52447+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
52448+ PAGE_SIZE)
52449+ kfree(name_set.n_hash);
52450+ else
52451+ vfree(name_set.n_hash);
52452+ }
52453+
52454+ if (inodev_set.i_hash) {
52455+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
52456+ PAGE_SIZE)
52457+ kfree(inodev_set.i_hash);
52458+ else
52459+ vfree(inodev_set.i_hash);
52460+ }
52461+
52462+ gr_free_uidset();
52463+
52464+ memset(&name_set, 0, sizeof (struct name_db));
52465+ memset(&inodev_set, 0, sizeof (struct inodev_db));
52466+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
52467+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
52468+
52469+ default_role = NULL;
4c928ab7 52470+ kernel_role = NULL;
ae4e228f 52471+ role_list = NULL;
58c5fc13
MT
52472+
52473+ return;
52474+}
52475+
52476+static __u32
52477+count_user_objs(struct acl_object_label *userp)
52478+{
52479+ struct acl_object_label o_tmp;
52480+ __u32 num = 0;
52481+
52482+ while (userp) {
52483+ if (copy_from_user(&o_tmp, userp,
52484+ sizeof (struct acl_object_label)))
52485+ break;
52486+
52487+ userp = o_tmp.prev;
52488+ num++;
52489+ }
52490+
52491+ return num;
52492+}
52493+
52494+static struct acl_subject_label *
52495+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
52496+
52497+static int
52498+copy_user_glob(struct acl_object_label *obj)
52499+{
52500+ struct acl_object_label *g_tmp, **guser;
52501+ unsigned int len;
52502+ char *tmp;
52503+
52504+ if (obj->globbed == NULL)
52505+ return 0;
52506+
52507+ guser = &obj->globbed;
52508+ while (*guser) {
52509+ g_tmp = (struct acl_object_label *)
52510+ acl_alloc(sizeof (struct acl_object_label));
52511+ if (g_tmp == NULL)
52512+ return -ENOMEM;
52513+
52514+ if (copy_from_user(g_tmp, *guser,
52515+ sizeof (struct acl_object_label)))
52516+ return -EFAULT;
52517+
52518+ len = strnlen_user(g_tmp->filename, PATH_MAX);
52519+
52520+ if (!len || len >= PATH_MAX)
52521+ return -EINVAL;
52522+
52523+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52524+ return -ENOMEM;
52525+
52526+ if (copy_from_user(tmp, g_tmp->filename, len))
52527+ return -EFAULT;
52528+ tmp[len-1] = '\0';
52529+ g_tmp->filename = tmp;
52530+
52531+ *guser = g_tmp;
52532+ guser = &(g_tmp->next);
52533+ }
52534+
52535+ return 0;
52536+}
52537+
52538+static int
52539+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
52540+ struct acl_role_label *role)
52541+{
52542+ struct acl_object_label *o_tmp;
52543+ unsigned int len;
52544+ int ret;
52545+ char *tmp;
52546+
52547+ while (userp) {
52548+ if ((o_tmp = (struct acl_object_label *)
52549+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
52550+ return -ENOMEM;
52551+
52552+ if (copy_from_user(o_tmp, userp,
52553+ sizeof (struct acl_object_label)))
52554+ return -EFAULT;
52555+
52556+ userp = o_tmp->prev;
52557+
52558+ len = strnlen_user(o_tmp->filename, PATH_MAX);
52559+
52560+ if (!len || len >= PATH_MAX)
52561+ return -EINVAL;
52562+
52563+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52564+ return -ENOMEM;
52565+
52566+ if (copy_from_user(tmp, o_tmp->filename, len))
52567+ return -EFAULT;
52568+ tmp[len-1] = '\0';
52569+ o_tmp->filename = tmp;
52570+
52571+ insert_acl_obj_label(o_tmp, subj);
52572+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
52573+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
52574+ return -ENOMEM;
52575+
52576+ ret = copy_user_glob(o_tmp);
52577+ if (ret)
52578+ return ret;
52579+
52580+ if (o_tmp->nested) {
52581+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
52582+ if (IS_ERR(o_tmp->nested))
52583+ return PTR_ERR(o_tmp->nested);
52584+
52585+ /* insert into nested subject list */
52586+ o_tmp->nested->next = role->hash->first;
52587+ role->hash->first = o_tmp->nested;
52588+ }
52589+ }
52590+
52591+ return 0;
52592+}
52593+
52594+static __u32
52595+count_user_subjs(struct acl_subject_label *userp)
52596+{
52597+ struct acl_subject_label s_tmp;
52598+ __u32 num = 0;
52599+
52600+ while (userp) {
52601+ if (copy_from_user(&s_tmp, userp,
52602+ sizeof (struct acl_subject_label)))
52603+ break;
52604+
52605+ userp = s_tmp.prev;
52606+ /* do not count nested subjects against this count, since
52607+ they are not included in the hash table, but are
52608+ attached to objects. We have already counted
52609+ the subjects in userspace for the allocation
52610+ stack
52611+ */
52612+ if (!(s_tmp.mode & GR_NESTED))
52613+ num++;
52614+ }
52615+
52616+ return num;
52617+}
52618+
52619+static int
52620+copy_user_allowedips(struct acl_role_label *rolep)
52621+{
52622+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
52623+
52624+ ruserip = rolep->allowed_ips;
52625+
52626+ while (ruserip) {
52627+ rlast = rtmp;
52628+
52629+ if ((rtmp = (struct role_allowed_ip *)
52630+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
52631+ return -ENOMEM;
52632+
52633+ if (copy_from_user(rtmp, ruserip,
52634+ sizeof (struct role_allowed_ip)))
52635+ return -EFAULT;
52636+
52637+ ruserip = rtmp->prev;
52638+
52639+ if (!rlast) {
52640+ rtmp->prev = NULL;
52641+ rolep->allowed_ips = rtmp;
52642+ } else {
52643+ rlast->next = rtmp;
52644+ rtmp->prev = rlast;
52645+ }
52646+
52647+ if (!ruserip)
52648+ rtmp->next = NULL;
52649+ }
52650+
52651+ return 0;
52652+}
52653+
52654+static int
52655+copy_user_transitions(struct acl_role_label *rolep)
52656+{
52657+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
52658+
52659+ unsigned int len;
52660+ char *tmp;
52661+
52662+ rusertp = rolep->transitions;
52663+
52664+ while (rusertp) {
52665+ rlast = rtmp;
52666+
52667+ if ((rtmp = (struct role_transition *)
52668+ acl_alloc(sizeof (struct role_transition))) == NULL)
52669+ return -ENOMEM;
52670+
52671+ if (copy_from_user(rtmp, rusertp,
52672+ sizeof (struct role_transition)))
52673+ return -EFAULT;
52674+
52675+ rusertp = rtmp->prev;
52676+
52677+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
52678+
52679+ if (!len || len >= GR_SPROLE_LEN)
52680+ return -EINVAL;
52681+
52682+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52683+ return -ENOMEM;
52684+
52685+ if (copy_from_user(tmp, rtmp->rolename, len))
52686+ return -EFAULT;
52687+ tmp[len-1] = '\0';
52688+ rtmp->rolename = tmp;
52689+
52690+ if (!rlast) {
52691+ rtmp->prev = NULL;
52692+ rolep->transitions = rtmp;
52693+ } else {
52694+ rlast->next = rtmp;
52695+ rtmp->prev = rlast;
52696+ }
52697+
52698+ if (!rusertp)
52699+ rtmp->next = NULL;
52700+ }
52701+
52702+ return 0;
52703+}
52704+
52705+static struct acl_subject_label *
52706+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
52707+{
52708+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
52709+ unsigned int len;
52710+ char *tmp;
52711+ __u32 num_objs;
52712+ struct acl_ip_label **i_tmp, *i_utmp2;
52713+ struct gr_hash_struct ghash;
52714+ struct subject_map *subjmap;
52715+ unsigned int i_num;
52716+ int err;
52717+
52718+ s_tmp = lookup_subject_map(userp);
52719+
52720+ /* we've already copied this subject into the kernel, just return
52721+ the reference to it, and don't copy it over again
52722+ */
52723+ if (s_tmp)
52724+ return(s_tmp);
52725+
52726+ if ((s_tmp = (struct acl_subject_label *)
52727+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
52728+ return ERR_PTR(-ENOMEM);
52729+
52730+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
52731+ if (subjmap == NULL)
52732+ return ERR_PTR(-ENOMEM);
52733+
52734+ subjmap->user = userp;
52735+ subjmap->kernel = s_tmp;
52736+ insert_subj_map_entry(subjmap);
52737+
52738+ if (copy_from_user(s_tmp, userp,
52739+ sizeof (struct acl_subject_label)))
52740+ return ERR_PTR(-EFAULT);
52741+
52742+ len = strnlen_user(s_tmp->filename, PATH_MAX);
52743+
52744+ if (!len || len >= PATH_MAX)
52745+ return ERR_PTR(-EINVAL);
52746+
52747+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52748+ return ERR_PTR(-ENOMEM);
52749+
52750+ if (copy_from_user(tmp, s_tmp->filename, len))
52751+ return ERR_PTR(-EFAULT);
52752+ tmp[len-1] = '\0';
52753+ s_tmp->filename = tmp;
52754+
52755+ if (!strcmp(s_tmp->filename, "/"))
52756+ role->root_label = s_tmp;
52757+
52758+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
52759+ return ERR_PTR(-EFAULT);
52760+
52761+ /* copy user and group transition tables */
52762+
52763+ if (s_tmp->user_trans_num) {
52764+ uid_t *uidlist;
52765+
52766+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
52767+ if (uidlist == NULL)
52768+ return ERR_PTR(-ENOMEM);
52769+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
52770+ return ERR_PTR(-EFAULT);
52771+
52772+ s_tmp->user_transitions = uidlist;
52773+ }
52774+
52775+ if (s_tmp->group_trans_num) {
52776+ gid_t *gidlist;
52777+
52778+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
52779+ if (gidlist == NULL)
52780+ return ERR_PTR(-ENOMEM);
52781+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
52782+ return ERR_PTR(-EFAULT);
52783+
52784+ s_tmp->group_transitions = gidlist;
52785+ }
52786+
52787+ /* set up object hash table */
52788+ num_objs = count_user_objs(ghash.first);
52789+
52790+ s_tmp->obj_hash_size = num_objs;
52791+ s_tmp->obj_hash =
52792+ (struct acl_object_label **)
52793+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
52794+
52795+ if (!s_tmp->obj_hash)
52796+ return ERR_PTR(-ENOMEM);
52797+
52798+ memset(s_tmp->obj_hash, 0,
52799+ s_tmp->obj_hash_size *
52800+ sizeof (struct acl_object_label *));
52801+
52802+ /* add in objects */
52803+ err = copy_user_objs(ghash.first, s_tmp, role);
52804+
52805+ if (err)
52806+ return ERR_PTR(err);
52807+
52808+ /* set pointer for parent subject */
52809+ if (s_tmp->parent_subject) {
52810+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
52811+
52812+ if (IS_ERR(s_tmp2))
52813+ return s_tmp2;
52814+
52815+ s_tmp->parent_subject = s_tmp2;
52816+ }
52817+
52818+ /* add in ip acls */
52819+
52820+ if (!s_tmp->ip_num) {
52821+ s_tmp->ips = NULL;
52822+ goto insert;
52823+ }
52824+
52825+ i_tmp =
52826+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
52827+ sizeof (struct acl_ip_label *));
52828+
52829+ if (!i_tmp)
52830+ return ERR_PTR(-ENOMEM);
52831+
52832+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
52833+ *(i_tmp + i_num) =
52834+ (struct acl_ip_label *)
52835+ acl_alloc(sizeof (struct acl_ip_label));
52836+ if (!*(i_tmp + i_num))
52837+ return ERR_PTR(-ENOMEM);
52838+
52839+ if (copy_from_user
52840+ (&i_utmp2, s_tmp->ips + i_num,
52841+ sizeof (struct acl_ip_label *)))
52842+ return ERR_PTR(-EFAULT);
52843+
52844+ if (copy_from_user
52845+ (*(i_tmp + i_num), i_utmp2,
52846+ sizeof (struct acl_ip_label)))
52847+ return ERR_PTR(-EFAULT);
52848+
52849+ if ((*(i_tmp + i_num))->iface == NULL)
52850+ continue;
52851+
52852+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
52853+ if (!len || len >= IFNAMSIZ)
52854+ return ERR_PTR(-EINVAL);
52855+ tmp = acl_alloc(len);
52856+ if (tmp == NULL)
52857+ return ERR_PTR(-ENOMEM);
52858+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
52859+ return ERR_PTR(-EFAULT);
52860+ (*(i_tmp + i_num))->iface = tmp;
52861+ }
52862+
52863+ s_tmp->ips = i_tmp;
52864+
52865+insert:
52866+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
52867+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
52868+ return ERR_PTR(-ENOMEM);
52869+
52870+ return s_tmp;
52871+}
52872+
52873+static int
52874+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
52875+{
52876+ struct acl_subject_label s_pre;
52877+ struct acl_subject_label * ret;
52878+ int err;
52879+
52880+ while (userp) {
52881+ if (copy_from_user(&s_pre, userp,
52882+ sizeof (struct acl_subject_label)))
52883+ return -EFAULT;
52884+
52885+ /* do not add nested subjects here, add
52886+ while parsing objects
52887+ */
52888+
52889+ if (s_pre.mode & GR_NESTED) {
52890+ userp = s_pre.prev;
52891+ continue;
52892+ }
52893+
52894+ ret = do_copy_user_subj(userp, role);
52895+
52896+ err = PTR_ERR(ret);
52897+ if (IS_ERR(ret))
52898+ return err;
52899+
52900+ insert_acl_subj_label(ret, role);
52901+
52902+ userp = s_pre.prev;
52903+ }
52904+
52905+ return 0;
52906+}
52907+
52908+static int
52909+copy_user_acl(struct gr_arg *arg)
52910+{
52911+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
52912+ struct sprole_pw *sptmp;
52913+ struct gr_hash_struct *ghash;
52914+ uid_t *domainlist;
52915+ unsigned int r_num;
52916+ unsigned int len;
52917+ char *tmp;
52918+ int err = 0;
52919+ __u16 i;
52920+ __u32 num_subjs;
52921+
52922+ /* we need a default and kernel role */
52923+ if (arg->role_db.num_roles < 2)
52924+ return -EINVAL;
52925+
52926+ /* copy special role authentication info from userspace */
52927+
52928+ num_sprole_pws = arg->num_sprole_pws;
52929+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
52930+
4c928ab7
MT
52931+ if (!acl_special_roles && num_sprole_pws)
52932+ return -ENOMEM;
58c5fc13
MT
52933+
52934+ for (i = 0; i < num_sprole_pws; i++) {
52935+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
4c928ab7
MT
52936+ if (!sptmp)
52937+ return -ENOMEM;
58c5fc13 52938+ if (copy_from_user(sptmp, arg->sprole_pws + i,
4c928ab7
MT
52939+ sizeof (struct sprole_pw)))
52940+ return -EFAULT;
58c5fc13 52941+
4c928ab7 52942+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
58c5fc13 52943+
4c928ab7
MT
52944+ if (!len || len >= GR_SPROLE_LEN)
52945+ return -EINVAL;
58c5fc13 52946+
4c928ab7
MT
52947+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52948+ return -ENOMEM;
52949+
52950+ if (copy_from_user(tmp, sptmp->rolename, len))
52951+ return -EFAULT;
58c5fc13 52952+
58c5fc13 52953+ tmp[len-1] = '\0';
16454cff 52954+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58c5fc13
MT
52955+ printk(KERN_ALERT "Copying special role %s\n", tmp);
52956+#endif
52957+ sptmp->rolename = tmp;
52958+ acl_special_roles[i] = sptmp;
52959+ }
52960+
52961+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
52962+
52963+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
52964+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
52965+
4c928ab7
MT
52966+ if (!r_tmp)
52967+ return -ENOMEM;
58c5fc13
MT
52968+
52969+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
4c928ab7
MT
52970+ sizeof (struct acl_role_label *)))
52971+ return -EFAULT;
58c5fc13
MT
52972+
52973+ if (copy_from_user(r_tmp, r_utmp2,
4c928ab7
MT
52974+ sizeof (struct acl_role_label)))
52975+ return -EFAULT;
58c5fc13
MT
52976+
52977+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
52978+
4c928ab7
MT
52979+ if (!len || len >= PATH_MAX)
52980+ return -EINVAL;
52981+
52982+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52983+ return -ENOMEM;
52984+
52985+ if (copy_from_user(tmp, r_tmp->rolename, len))
52986+ return -EFAULT;
58c5fc13 52987+
58c5fc13
MT
52988+ tmp[len-1] = '\0';
52989+ r_tmp->rolename = tmp;
52990+
52991+ if (!strcmp(r_tmp->rolename, "default")
52992+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
52993+ default_role = r_tmp;
52994+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
52995+ kernel_role = r_tmp;
52996+ }
52997+
4c928ab7
MT
52998+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
52999+ return -ENOMEM;
53000+
53001+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
53002+ return -EFAULT;
58c5fc13
MT
53003+
53004+ r_tmp->hash = ghash;
53005+
53006+ num_subjs = count_user_subjs(r_tmp->hash->first);
53007+
53008+ r_tmp->subj_hash_size = num_subjs;
53009+ r_tmp->subj_hash =
53010+ (struct acl_subject_label **)
53011+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
53012+
4c928ab7
MT
53013+ if (!r_tmp->subj_hash)
53014+ return -ENOMEM;
58c5fc13
MT
53015+
53016+ err = copy_user_allowedips(r_tmp);
53017+ if (err)
4c928ab7 53018+ return err;
58c5fc13
MT
53019+
53020+ /* copy domain info */
53021+ if (r_tmp->domain_children != NULL) {
53022+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
4c928ab7
MT
53023+ if (domainlist == NULL)
53024+ return -ENOMEM;
53025+
53026+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
53027+ return -EFAULT;
53028+
58c5fc13
MT
53029+ r_tmp->domain_children = domainlist;
53030+ }
53031+
53032+ err = copy_user_transitions(r_tmp);
53033+ if (err)
4c928ab7 53034+ return err;
58c5fc13
MT
53035+
53036+ memset(r_tmp->subj_hash, 0,
53037+ r_tmp->subj_hash_size *
53038+ sizeof (struct acl_subject_label *));
53039+
53040+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
53041+
53042+ if (err)
4c928ab7 53043+ return err;
58c5fc13
MT
53044+
53045+ /* set nested subject list to null */
53046+ r_tmp->hash->first = NULL;
53047+
53048+ insert_acl_role_label(r_tmp);
53049+ }
53050+
4c928ab7
MT
53051+ if (default_role == NULL || kernel_role == NULL)
53052+ return -EINVAL;
58c5fc13 53053+
4c928ab7 53054+ return err;
58c5fc13
MT
53055+}
53056+
53057+static int
53058+gracl_init(struct gr_arg *args)
53059+{
53060+ int error = 0;
53061+
53062+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
53063+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
53064+
53065+ if (init_variables(args)) {
53066+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
53067+ error = -ENOMEM;
53068+ free_variables();
53069+ goto out;
53070+ }
53071+
53072+ error = copy_user_acl(args);
53073+ free_init_variables();
53074+ if (error) {
53075+ free_variables();
53076+ goto out;
53077+ }
53078+
53079+ if ((error = gr_set_acls(0))) {
53080+ free_variables();
53081+ goto out;
53082+ }
53083+
ae4e228f 53084+ pax_open_kernel();
58c5fc13 53085+ gr_status |= GR_READY;
ae4e228f 53086+ pax_close_kernel();
58c5fc13
MT
53087+
53088+ out:
53089+ return error;
53090+}
53091+
53092+/* derived from glibc fnmatch() 0: match, 1: no match*/
53093+
53094+static int
53095+glob_match(const char *p, const char *n)
53096+{
53097+ char c;
53098+
53099+ while ((c = *p++) != '\0') {
53100+ switch (c) {
53101+ case '?':
53102+ if (*n == '\0')
53103+ return 1;
53104+ else if (*n == '/')
53105+ return 1;
53106+ break;
53107+ case '\\':
53108+ if (*n != c)
53109+ return 1;
53110+ break;
53111+ case '*':
53112+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
53113+ if (*n == '/')
53114+ return 1;
53115+ else if (c == '?') {
53116+ if (*n == '\0')
53117+ return 1;
53118+ else
53119+ ++n;
53120+ }
53121+ }
53122+ if (c == '\0') {
53123+ return 0;
53124+ } else {
53125+ const char *endp;
53126+
53127+ if ((endp = strchr(n, '/')) == NULL)
53128+ endp = n + strlen(n);
53129+
53130+ if (c == '[') {
53131+ for (--p; n < endp; ++n)
53132+ if (!glob_match(p, n))
53133+ return 0;
53134+ } else if (c == '/') {
53135+ while (*n != '\0' && *n != '/')
53136+ ++n;
53137+ if (*n == '/' && !glob_match(p, n + 1))
53138+ return 0;
53139+ } else {
53140+ for (--p; n < endp; ++n)
53141+ if (*n == c && !glob_match(p, n))
53142+ return 0;
53143+ }
53144+
53145+ return 1;
53146+ }
53147+ case '[':
53148+ {
53149+ int not;
53150+ char cold;
53151+
53152+ if (*n == '\0' || *n == '/')
53153+ return 1;
53154+
53155+ not = (*p == '!' || *p == '^');
53156+ if (not)
53157+ ++p;
53158+
53159+ c = *p++;
53160+ for (;;) {
53161+ unsigned char fn = (unsigned char)*n;
53162+
53163+ if (c == '\0')
53164+ return 1;
53165+ else {
53166+ if (c == fn)
53167+ goto matched;
53168+ cold = c;
53169+ c = *p++;
53170+
53171+ if (c == '-' && *p != ']') {
53172+ unsigned char cend = *p++;
53173+
53174+ if (cend == '\0')
53175+ return 1;
53176+
53177+ if (cold <= fn && fn <= cend)
53178+ goto matched;
53179+
53180+ c = *p++;
53181+ }
53182+ }
53183+
53184+ if (c == ']')
53185+ break;
53186+ }
53187+ if (!not)
53188+ return 1;
53189+ break;
53190+ matched:
53191+ while (c != ']') {
53192+ if (c == '\0')
53193+ return 1;
53194+
53195+ c = *p++;
53196+ }
53197+ if (not)
53198+ return 1;
53199+ }
53200+ break;
53201+ default:
53202+ if (c != *n)
53203+ return 1;
53204+ }
53205+
53206+ ++n;
53207+ }
53208+
53209+ if (*n == '\0')
53210+ return 0;
53211+
53212+ if (*n == '/')
53213+ return 0;
53214+
53215+ return 1;
53216+}
53217+
53218+static struct acl_object_label *
53219+chk_glob_label(struct acl_object_label *globbed,
4c928ab7 53220+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
58c5fc13
MT
53221+{
53222+ struct acl_object_label *tmp;
53223+
53224+ if (*path == NULL)
53225+ *path = gr_to_filename_nolock(dentry, mnt);
53226+
53227+ tmp = globbed;
53228+
53229+ while (tmp) {
53230+ if (!glob_match(tmp->filename, *path))
53231+ return tmp;
53232+ tmp = tmp->next;
53233+ }
53234+
53235+ return NULL;
53236+}
53237+
53238+static struct acl_object_label *
53239+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
53240+ const ino_t curr_ino, const dev_t curr_dev,
53241+ const struct acl_subject_label *subj, char **path, const int checkglob)
53242+{
53243+ struct acl_subject_label *tmpsubj;
53244+ struct acl_object_label *retval;
53245+ struct acl_object_label *retval2;
53246+
53247+ tmpsubj = (struct acl_subject_label *) subj;
53248+ read_lock(&gr_inode_lock);
53249+ do {
53250+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
53251+ if (retval) {
53252+ if (checkglob && retval->globbed) {
4c928ab7 53253+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
58c5fc13
MT
53254+ if (retval2)
53255+ retval = retval2;
53256+ }
53257+ break;
53258+ }
53259+ } while ((tmpsubj = tmpsubj->parent_subject));
53260+ read_unlock(&gr_inode_lock);
53261+
53262+ return retval;
53263+}
53264+
53265+static __inline__ struct acl_object_label *
53266+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
16454cff 53267+ struct dentry *curr_dentry,
58c5fc13
MT
53268+ const struct acl_subject_label *subj, char **path, const int checkglob)
53269+{
bc901d79 53270+ int newglob = checkglob;
16454cff
MT
53271+ ino_t inode;
53272+ dev_t device;
bc901d79
MT
53273+
53274+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
53275+ as we don't want a / * rule to match instead of the / object
53276+ don't do this for create lookups that call this function though, since they're looking up
53277+ on the parent and thus need globbing checks on all paths
53278+ */
53279+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
53280+ newglob = GR_NO_GLOB;
53281+
16454cff
MT
53282+ spin_lock(&curr_dentry->d_lock);
53283+ inode = curr_dentry->d_inode->i_ino;
53284+ device = __get_dev(curr_dentry);
53285+ spin_unlock(&curr_dentry->d_lock);
53286+
53287+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
58c5fc13
MT
53288+}
53289+
53290+static struct acl_object_label *
53291+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53292+ const struct acl_subject_label *subj, char *path, const int checkglob)
53293+{
53294+ struct dentry *dentry = (struct dentry *) l_dentry;
53295+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
5e856224 53296+ struct mount *real_mnt = real_mount(mnt);
58c5fc13 53297+ struct acl_object_label *retval;
16454cff 53298+ struct dentry *parent;
58c5fc13 53299+
16454cff 53300+ write_seqlock(&rename_lock);
bc901d79 53301+ br_read_lock(vfsmount_lock);
58c5fc13 53302+
15a11c5b
MT
53303+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
53304+#ifdef CONFIG_NET
53305+ mnt == sock_mnt ||
53306+#endif
df50ba0c 53307+#ifdef CONFIG_HUGETLBFS
71d190be 53308+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
df50ba0c 53309+#endif
58c5fc13
MT
53310+ /* ignore Eric Biederman */
53311+ IS_PRIVATE(l_dentry->d_inode))) {
15a11c5b 53312+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
58c5fc13
MT
53313+ goto out;
53314+ }
53315+
53316+ for (;;) {
6892158b 53317+ if (dentry == real_root.dentry && mnt == real_root.mnt)
58c5fc13
MT
53318+ break;
53319+
53320+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
5e856224 53321+ if (!mnt_has_parent(real_mnt))
58c5fc13
MT
53322+ break;
53323+
53324+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
53325+ if (retval != NULL)
53326+ goto out;
53327+
5e856224
MT
53328+ dentry = real_mnt->mnt_mountpoint;
53329+ real_mnt = real_mnt->mnt_parent;
53330+ mnt = &real_mnt->mnt;
58c5fc13
MT
53331+ continue;
53332+ }
53333+
16454cff 53334+ parent = dentry->d_parent;
58c5fc13
MT
53335+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
53336+ if (retval != NULL)
53337+ goto out;
53338+
16454cff 53339+ dentry = parent;
58c5fc13
MT
53340+ }
53341+
53342+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
53343+
16454cff 53344+ /* real_root is pinned so we don't have to hold a reference */
58c5fc13 53345+ if (retval == NULL)
6892158b 53346+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
58c5fc13 53347+out:
bc901d79 53348+ br_read_unlock(vfsmount_lock);
16454cff 53349+ write_sequnlock(&rename_lock);
bc901d79
MT
53350+
53351+ BUG_ON(retval == NULL);
53352+
58c5fc13
MT
53353+ return retval;
53354+}
53355+
53356+static __inline__ struct acl_object_label *
53357+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53358+ const struct acl_subject_label *subj)
53359+{
53360+ char *path = NULL;
bc901d79 53361+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
58c5fc13
MT
53362+}
53363+
53364+static __inline__ struct acl_object_label *
53365+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53366+ const struct acl_subject_label *subj)
53367+{
53368+ char *path = NULL;
bc901d79 53369+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
58c5fc13
MT
53370+}
53371+
53372+static __inline__ struct acl_object_label *
53373+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53374+ const struct acl_subject_label *subj, char *path)
53375+{
bc901d79 53376+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
58c5fc13
MT
53377+}
53378+
53379+static struct acl_subject_label *
53380+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53381+ const struct acl_role_label *role)
53382+{
53383+ struct dentry *dentry = (struct dentry *) l_dentry;
53384+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
5e856224 53385+ struct mount *real_mnt = real_mount(mnt);
58c5fc13 53386+ struct acl_subject_label *retval;
16454cff 53387+ struct dentry *parent;
58c5fc13 53388+
16454cff 53389+ write_seqlock(&rename_lock);
bc901d79 53390+ br_read_lock(vfsmount_lock);
58c5fc13
MT
53391+
53392+ for (;;) {
6892158b 53393+ if (dentry == real_root.dentry && mnt == real_root.mnt)
58c5fc13
MT
53394+ break;
53395+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
5e856224 53396+ if (!mnt_has_parent(real_mnt))
58c5fc13
MT
53397+ break;
53398+
16454cff 53399+ spin_lock(&dentry->d_lock);
58c5fc13
MT
53400+ read_lock(&gr_inode_lock);
53401+ retval =
53402+ lookup_acl_subj_label(dentry->d_inode->i_ino,
16454cff 53403+ __get_dev(dentry), role);
58c5fc13 53404+ read_unlock(&gr_inode_lock);
16454cff 53405+ spin_unlock(&dentry->d_lock);
58c5fc13
MT
53406+ if (retval != NULL)
53407+ goto out;
53408+
5e856224
MT
53409+ dentry = real_mnt->mnt_mountpoint;
53410+ real_mnt = real_mnt->mnt_parent;
53411+ mnt = &real_mnt->mnt;
58c5fc13
MT
53412+ continue;
53413+ }
53414+
16454cff 53415+ spin_lock(&dentry->d_lock);
58c5fc13
MT
53416+ read_lock(&gr_inode_lock);
53417+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
16454cff 53418+ __get_dev(dentry), role);
58c5fc13 53419+ read_unlock(&gr_inode_lock);
16454cff
MT
53420+ parent = dentry->d_parent;
53421+ spin_unlock(&dentry->d_lock);
53422+
58c5fc13
MT
53423+ if (retval != NULL)
53424+ goto out;
53425+
16454cff 53426+ dentry = parent;
58c5fc13
MT
53427+ }
53428+
16454cff 53429+ spin_lock(&dentry->d_lock);
58c5fc13
MT
53430+ read_lock(&gr_inode_lock);
53431+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
16454cff 53432+ __get_dev(dentry), role);
58c5fc13 53433+ read_unlock(&gr_inode_lock);
16454cff 53434+ spin_unlock(&dentry->d_lock);
58c5fc13
MT
53435+
53436+ if (unlikely(retval == NULL)) {
16454cff 53437+ /* real_root is pinned, we don't need to hold a reference */
58c5fc13 53438+ read_lock(&gr_inode_lock);
6892158b 53439+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
16454cff 53440+ __get_dev(real_root.dentry), role);
58c5fc13
MT
53441+ read_unlock(&gr_inode_lock);
53442+ }
53443+out:
bc901d79 53444+ br_read_unlock(vfsmount_lock);
16454cff 53445+ write_sequnlock(&rename_lock);
58c5fc13 53446+
bc901d79
MT
53447+ BUG_ON(retval == NULL);
53448+
58c5fc13
MT
53449+ return retval;
53450+}
53451+
53452+static void
53453+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
53454+{
53455+ struct task_struct *task = current;
53456+ const struct cred *cred = current_cred();
53457+
53458+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
53459+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
53460+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
bc901d79 53461+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
58c5fc13
MT
53462+
53463+ return;
53464+}
53465+
53466+static void
53467+gr_log_learn_sysctl(const char *path, const __u32 mode)
53468+{
53469+ struct task_struct *task = current;
53470+ const struct cred *cred = current_cred();
53471+
53472+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
53473+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
53474+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
bc901d79 53475+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
58c5fc13
MT
53476+
53477+ return;
53478+}
53479+
53480+static void
53481+gr_log_learn_id_change(const char type, const unsigned int real,
53482+ const unsigned int effective, const unsigned int fs)
53483+{
53484+ struct task_struct *task = current;
53485+ const struct cred *cred = current_cred();
53486+
53487+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
53488+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
53489+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
bc901d79 53490+ type, real, effective, fs, &task->signal->saved_ip);
58c5fc13
MT
53491+
53492+ return;
53493+}
53494+
53495+__u32
58c5fc13
MT
53496+gr_search_file(const struct dentry * dentry, const __u32 mode,
53497+ const struct vfsmount * mnt)
53498+{
53499+ __u32 retval = mode;
53500+ struct acl_subject_label *curracl;
53501+ struct acl_object_label *currobj;
53502+
53503+ if (unlikely(!(gr_status & GR_READY)))
53504+ return (mode & ~GR_AUDITS);
53505+
53506+ curracl = current->acl;
53507+
53508+ currobj = chk_obj_label(dentry, mnt, curracl);
53509+ retval = currobj->mode & mode;
53510+
16454cff
MT
53511+ /* if we're opening a specified transfer file for writing
53512+ (e.g. /dev/initctl), then transfer our role to init
53513+ */
53514+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
53515+ current->role->roletype & GR_ROLE_PERSIST)) {
53516+ struct task_struct *task = init_pid_ns.child_reaper;
53517+
53518+ if (task->role != current->role) {
53519+ task->acl_sp_role = 0;
53520+ task->acl_role_id = current->acl_role_id;
53521+ task->role = current->role;
53522+ rcu_read_lock();
53523+ read_lock(&grsec_exec_file_lock);
53524+ gr_apply_subject_to_task(task);
53525+ read_unlock(&grsec_exec_file_lock);
53526+ rcu_read_unlock();
53527+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
53528+ }
53529+ }
53530+
58c5fc13
MT
53531+ if (unlikely
53532+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
53533+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
53534+ __u32 new_mode = mode;
53535+
53536+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
53537+
53538+ retval = new_mode;
53539+
53540+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
53541+ new_mode |= GR_INHERIT;
53542+
53543+ if (!(mode & GR_NOLEARN))
53544+ gr_log_learn(dentry, mnt, new_mode);
53545+ }
53546+
53547+ return retval;
53548+}
53549+
6e9df6a3
MT
53550+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
53551+ const struct dentry *parent,
53552+ const struct vfsmount *mnt)
58c5fc13
MT
53553+{
53554+ struct name_entry *match;
53555+ struct acl_object_label *matchpo;
53556+ struct acl_subject_label *curracl;
53557+ char *path;
58c5fc13
MT
53558+
53559+ if (unlikely(!(gr_status & GR_READY)))
6e9df6a3 53560+ return NULL;
58c5fc13
MT
53561+
53562+ preempt_disable();
53563+ path = gr_to_filename_rbac(new_dentry, mnt);
53564+ match = lookup_name_entry_create(path);
53565+
58c5fc13
MT
53566+ curracl = current->acl;
53567+
6e9df6a3
MT
53568+ if (match) {
53569+ read_lock(&gr_inode_lock);
53570+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
53571+ read_unlock(&gr_inode_lock);
58c5fc13 53572+
6e9df6a3 53573+ if (matchpo) {
58c5fc13 53574+ preempt_enable();
6e9df6a3 53575+ return matchpo;
58c5fc13 53576+ }
58c5fc13
MT
53577+ }
53578+
6e9df6a3 53579+ // lookup parent
58c5fc13
MT
53580+
53581+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
6e9df6a3
MT
53582+
53583+ preempt_enable();
53584+ return matchpo;
53585+}
53586+
53587+__u32
53588+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
53589+ const struct vfsmount * mnt, const __u32 mode)
53590+{
53591+ struct acl_object_label *matchpo;
53592+ __u32 retval;
53593+
53594+ if (unlikely(!(gr_status & GR_READY)))
53595+ return (mode & ~GR_AUDITS);
53596+
53597+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
53598+
58c5fc13
MT
53599+ retval = matchpo->mode & mode;
53600+
53601+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
6e9df6a3 53602+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58c5fc13
MT
53603+ __u32 new_mode = mode;
53604+
53605+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
53606+
53607+ gr_log_learn(new_dentry, mnt, new_mode);
58c5fc13
MT
53608+ return new_mode;
53609+ }
53610+
58c5fc13
MT
53611+ return retval;
53612+}
53613+
6e9df6a3
MT
53614+__u32
53615+gr_check_link(const struct dentry * new_dentry,
53616+ const struct dentry * parent_dentry,
53617+ const struct vfsmount * parent_mnt,
53618+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
53619+{
53620+ struct acl_object_label *obj;
53621+ __u32 oldmode, newmode;
53622+ __u32 needmode;
53623+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
53624+ GR_DELETE | GR_INHERIT;
53625+
53626+ if (unlikely(!(gr_status & GR_READY)))
53627+ return (GR_CREATE | GR_LINK);
53628+
53629+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
53630+ oldmode = obj->mode;
53631+
53632+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
53633+ newmode = obj->mode;
53634+
53635+ needmode = newmode & checkmodes;
53636+
53637+ // old name for hardlink must have at least the permissions of the new name
53638+ if ((oldmode & needmode) != needmode)
53639+ goto bad;
53640+
53641+ // if old name had restrictions/auditing, make sure the new name does as well
53642+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
53643+
53644+ // don't allow hardlinking of suid/sgid files without permission
53645+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
53646+ needmode |= GR_SETID;
53647+
53648+ if ((newmode & needmode) != needmode)
53649+ goto bad;
53650+
53651+ // enforce minimum permissions
53652+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
53653+ return newmode;
53654+bad:
53655+ needmode = oldmode;
53656+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
53657+ needmode |= GR_SETID;
53658+
53659+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
53660+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
53661+ return (GR_CREATE | GR_LINK);
53662+ } else if (newmode & GR_SUPPRESS)
53663+ return GR_SUPPRESS;
53664+ else
53665+ return 0;
53666+}
53667+
58c5fc13
MT
53668+int
53669+gr_check_hidden_task(const struct task_struct *task)
53670+{
53671+ if (unlikely(!(gr_status & GR_READY)))
53672+ return 0;
53673+
53674+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
53675+ return 1;
53676+
53677+ return 0;
53678+}
53679+
53680+int
53681+gr_check_protected_task(const struct task_struct *task)
53682+{
53683+ if (unlikely(!(gr_status & GR_READY) || !task))
53684+ return 0;
53685+
53686+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
53687+ task->acl != current->acl)
53688+ return 1;
53689+
53690+ return 0;
53691+}
53692+
57199397
MT
53693+int
53694+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
53695+{
53696+ struct task_struct *p;
53697+ int ret = 0;
53698+
53699+ if (unlikely(!(gr_status & GR_READY) || !pid))
53700+ return ret;
53701+
53702+ read_lock(&tasklist_lock);
53703+ do_each_pid_task(pid, type, p) {
53704+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
53705+ p->acl != current->acl) {
53706+ ret = 1;
53707+ goto out;
53708+ }
53709+ } while_each_pid_task(pid, type, p);
53710+out:
53711+ read_unlock(&tasklist_lock);
53712+
53713+ return ret;
53714+}
53715+
58c5fc13
MT
53716+void
53717+gr_copy_label(struct task_struct *tsk)
53718+{
4c928ab7 53719+ /* plain copying of fields is already done by dup_task_struct */
58c5fc13
MT
53720+ tsk->signal->used_accept = 0;
53721+ tsk->acl_sp_role = 0;
4c928ab7
MT
53722+ //tsk->acl_role_id = current->acl_role_id;
53723+ //tsk->acl = current->acl;
53724+ //tsk->role = current->role;
58c5fc13 53725+ tsk->signal->curr_ip = current->signal->curr_ip;
bc901d79 53726+ tsk->signal->saved_ip = current->signal->saved_ip;
58c5fc13
MT
53727+ if (current->exec_file)
53728+ get_file(current->exec_file);
4c928ab7
MT
53729+ //tsk->exec_file = current->exec_file;
53730+ //tsk->is_writable = current->is_writable;
bc901d79 53731+ if (unlikely(current->signal->used_accept)) {
58c5fc13 53732+ current->signal->curr_ip = 0;
bc901d79
MT
53733+ current->signal->saved_ip = 0;
53734+ }
58c5fc13
MT
53735+
53736+ return;
53737+}
53738+
53739+static void
53740+gr_set_proc_res(struct task_struct *task)
53741+{
53742+ struct acl_subject_label *proc;
53743+ unsigned short i;
53744+
53745+ proc = task->acl;
53746+
53747+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
53748+ return;
53749+
53750+ for (i = 0; i < RLIM_NLIMITS; i++) {
53751+ if (!(proc->resmask & (1 << i)))
53752+ continue;
53753+
53754+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
53755+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
53756+ }
53757+
53758+ return;
53759+}
53760+
66a7e928
MT
53761+extern int __gr_process_user_ban(struct user_struct *user);
53762+
58c5fc13
MT
53763+int
53764+gr_check_user_change(int real, int effective, int fs)
53765+{
53766+ unsigned int i;
53767+ __u16 num;
53768+ uid_t *uidlist;
53769+ int curuid;
53770+ int realok = 0;
53771+ int effectiveok = 0;
53772+ int fsok = 0;
53773+
66a7e928
MT
53774+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
53775+ struct user_struct *user;
53776+
53777+ if (real == -1)
53778+ goto skipit;
53779+
53780+ user = find_user(real);
53781+ if (user == NULL)
53782+ goto skipit;
53783+
53784+ if (__gr_process_user_ban(user)) {
53785+ /* for find_user */
53786+ free_uid(user);
53787+ return 1;
53788+ }
53789+
53790+ /* for find_user */
53791+ free_uid(user);
53792+
53793+skipit:
53794+#endif
53795+
58c5fc13
MT
53796+ if (unlikely(!(gr_status & GR_READY)))
53797+ return 0;
53798+
53799+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53800+ gr_log_learn_id_change('u', real, effective, fs);
53801+
53802+ num = current->acl->user_trans_num;
53803+ uidlist = current->acl->user_transitions;
53804+
53805+ if (uidlist == NULL)
53806+ return 0;
53807+
53808+ if (real == -1)
53809+ realok = 1;
53810+ if (effective == -1)
53811+ effectiveok = 1;
53812+ if (fs == -1)
53813+ fsok = 1;
53814+
53815+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
53816+ for (i = 0; i < num; i++) {
53817+ curuid = (int)uidlist[i];
53818+ if (real == curuid)
53819+ realok = 1;
53820+ if (effective == curuid)
53821+ effectiveok = 1;
53822+ if (fs == curuid)
53823+ fsok = 1;
53824+ }
53825+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
53826+ for (i = 0; i < num; i++) {
53827+ curuid = (int)uidlist[i];
53828+ if (real == curuid)
53829+ break;
53830+ if (effective == curuid)
53831+ break;
53832+ if (fs == curuid)
53833+ break;
53834+ }
53835+ /* not in deny list */
53836+ if (i == num) {
53837+ realok = 1;
53838+ effectiveok = 1;
53839+ fsok = 1;
53840+ }
53841+ }
53842+
53843+ if (realok && effectiveok && fsok)
53844+ return 0;
53845+ else {
53846+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53847+ return 1;
53848+ }
53849+}
53850+
53851+int
53852+gr_check_group_change(int real, int effective, int fs)
53853+{
53854+ unsigned int i;
53855+ __u16 num;
53856+ gid_t *gidlist;
53857+ int curgid;
53858+ int realok = 0;
53859+ int effectiveok = 0;
53860+ int fsok = 0;
53861+
53862+ if (unlikely(!(gr_status & GR_READY)))
53863+ return 0;
53864+
53865+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53866+ gr_log_learn_id_change('g', real, effective, fs);
53867+
53868+ num = current->acl->group_trans_num;
53869+ gidlist = current->acl->group_transitions;
53870+
53871+ if (gidlist == NULL)
53872+ return 0;
53873+
53874+ if (real == -1)
53875+ realok = 1;
53876+ if (effective == -1)
53877+ effectiveok = 1;
53878+ if (fs == -1)
53879+ fsok = 1;
53880+
53881+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
53882+ for (i = 0; i < num; i++) {
53883+ curgid = (int)gidlist[i];
53884+ if (real == curgid)
53885+ realok = 1;
53886+ if (effective == curgid)
53887+ effectiveok = 1;
53888+ if (fs == curgid)
53889+ fsok = 1;
53890+ }
53891+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
53892+ for (i = 0; i < num; i++) {
53893+ curgid = (int)gidlist[i];
53894+ if (real == curgid)
53895+ break;
53896+ if (effective == curgid)
53897+ break;
53898+ if (fs == curgid)
53899+ break;
53900+ }
53901+ /* not in deny list */
53902+ if (i == num) {
53903+ realok = 1;
53904+ effectiveok = 1;
53905+ fsok = 1;
53906+ }
53907+ }
53908+
53909+ if (realok && effectiveok && fsok)
53910+ return 0;
53911+ else {
53912+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53913+ return 1;
53914+ }
53915+}
53916+
4c928ab7
MT
53917+extern int gr_acl_is_capable(const int cap);
53918+
58c5fc13
MT
53919+void
53920+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
53921+{
53922+ struct acl_role_label *role = task->role;
53923+ struct acl_subject_label *subj = NULL;
53924+ struct acl_object_label *obj;
53925+ struct file *filp;
53926+
53927+ if (unlikely(!(gr_status & GR_READY)))
53928+ return;
53929+
53930+ filp = task->exec_file;
53931+
53932+ /* kernel process, we'll give them the kernel role */
53933+ if (unlikely(!filp)) {
53934+ task->role = kernel_role;
53935+ task->acl = kernel_role->root_label;
53936+ return;
53937+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
53938+ role = lookup_acl_role_label(task, uid, gid);
53939+
4c928ab7
MT
53940+ /* don't change the role if we're not a privileged process */
53941+ if (role && task->role != role &&
53942+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
53943+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
53944+ return;
53945+
58c5fc13
MT
53946+ /* perform subject lookup in possibly new role
53947+ we can use this result below in the case where role == task->role
53948+ */
53949+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
53950+
53951+ /* if we changed uid/gid, but result in the same role
53952+ and are using inheritance, don't lose the inherited subject
53953+ if current subject is other than what normal lookup
53954+ would result in, we arrived via inheritance, don't
53955+ lose subject
53956+ */
53957+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
53958+ (subj == task->acl)))
53959+ task->acl = subj;
53960+
53961+ task->role = role;
53962+
53963+ task->is_writable = 0;
53964+
53965+ /* ignore additional mmap checks for processes that are writable
53966+ by the default ACL */
53967+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53968+ if (unlikely(obj->mode & GR_WRITE))
53969+ task->is_writable = 1;
53970+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53971+ if (unlikely(obj->mode & GR_WRITE))
53972+ task->is_writable = 1;
53973+
16454cff 53974+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58c5fc13
MT
53975+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53976+#endif
53977+
53978+ gr_set_proc_res(task);
53979+
53980+ return;
53981+}
53982+
53983+int
53984+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
4c928ab7 53985+ const int unsafe_flags)
58c5fc13
MT
53986+{
53987+ struct task_struct *task = current;
53988+ struct acl_subject_label *newacl;
53989+ struct acl_object_label *obj;
53990+ __u32 retmode;
53991+
53992+ if (unlikely(!(gr_status & GR_READY)))
53993+ return 0;
53994+
53995+ newacl = chk_subj_label(dentry, mnt, task->role);
53996+
5e856224
MT
53997+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
53998+ did an exec
53999+ */
54000+ rcu_read_lock();
54001+ read_lock(&tasklist_lock);
54002+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
54003+ (task->parent->acl->mode & GR_POVERRIDE))) {
54004+ read_unlock(&tasklist_lock);
54005+ rcu_read_unlock();
54006+ goto skip_check;
54007+ }
54008+ read_unlock(&tasklist_lock);
54009+ rcu_read_unlock();
54010+
4c928ab7 54011+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
58c5fc13
MT
54012+ !(task->role->roletype & GR_ROLE_GOD) &&
54013+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
4c928ab7 54014+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
4c928ab7 54015+ if (unsafe_flags & LSM_UNSAFE_SHARE)
ae4e228f
MT
54016+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
54017+ else
54018+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
58c5fc13
MT
54019+ return -EACCES;
54020+ }
5e856224
MT
54021+
54022+skip_check:
58c5fc13
MT
54023+
54024+ obj = chk_obj_label(dentry, mnt, task->acl);
54025+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
54026+
54027+ if (!(task->acl->mode & GR_INHERITLEARN) &&
54028+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
54029+ if (obj->nested)
54030+ task->acl = obj->nested;
54031+ else
54032+ task->acl = newacl;
54033+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
54034+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
54035+
54036+ task->is_writable = 0;
54037+
54038+ /* ignore additional mmap checks for processes that are writable
54039+ by the default ACL */
54040+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
54041+ if (unlikely(obj->mode & GR_WRITE))
54042+ task->is_writable = 1;
54043+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
54044+ if (unlikely(obj->mode & GR_WRITE))
54045+ task->is_writable = 1;
54046+
54047+ gr_set_proc_res(task);
54048+
16454cff 54049+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58c5fc13
MT
54050+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
54051+#endif
54052+ return 0;
54053+}
54054+
54055+/* always called with valid inodev ptr */
54056+static void
54057+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
54058+{
54059+ struct acl_object_label *matchpo;
54060+ struct acl_subject_label *matchps;
54061+ struct acl_subject_label *subj;
54062+ struct acl_role_label *role;
ae4e228f 54063+ unsigned int x;
58c5fc13 54064+
ae4e228f 54065+ FOR_EACH_ROLE_START(role)
58c5fc13
MT
54066+ FOR_EACH_SUBJECT_START(role, subj, x)
54067+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
54068+ matchpo->mode |= GR_DELETED;
54069+ FOR_EACH_SUBJECT_END(subj,x)
54070+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
54071+ if (subj->inode == ino && subj->device == dev)
54072+ subj->mode |= GR_DELETED;
54073+ FOR_EACH_NESTED_SUBJECT_END(subj)
54074+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
54075+ matchps->mode |= GR_DELETED;
ae4e228f 54076+ FOR_EACH_ROLE_END(role)
58c5fc13
MT
54077+
54078+ inodev->nentry->deleted = 1;
54079+
54080+ return;
54081+}
54082+
54083+void
54084+gr_handle_delete(const ino_t ino, const dev_t dev)
54085+{
54086+ struct inodev_entry *inodev;
54087+
54088+ if (unlikely(!(gr_status & GR_READY)))
54089+ return;
54090+
54091+ write_lock(&gr_inode_lock);
54092+ inodev = lookup_inodev_entry(ino, dev);
54093+ if (inodev != NULL)
54094+ do_handle_delete(inodev, ino, dev);
54095+ write_unlock(&gr_inode_lock);
54096+
54097+ return;
54098+}
54099+
54100+static void
54101+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
54102+ const ino_t newinode, const dev_t newdevice,
54103+ struct acl_subject_label *subj)
54104+{
54105+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
54106+ struct acl_object_label *match;
54107+
54108+ match = subj->obj_hash[index];
54109+
54110+ while (match && (match->inode != oldinode ||
54111+ match->device != olddevice ||
54112+ !(match->mode & GR_DELETED)))
54113+ match = match->next;
54114+
54115+ if (match && (match->inode == oldinode)
54116+ && (match->device == olddevice)
54117+ && (match->mode & GR_DELETED)) {
54118+ if (match->prev == NULL) {
54119+ subj->obj_hash[index] = match->next;
54120+ if (match->next != NULL)
54121+ match->next->prev = NULL;
54122+ } else {
54123+ match->prev->next = match->next;
54124+ if (match->next != NULL)
54125+ match->next->prev = match->prev;
54126+ }
54127+ match->prev = NULL;
54128+ match->next = NULL;
54129+ match->inode = newinode;
54130+ match->device = newdevice;
54131+ match->mode &= ~GR_DELETED;
54132+
54133+ insert_acl_obj_label(match, subj);
54134+ }
54135+
54136+ return;
54137+}
54138+
54139+static void
54140+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
54141+ const ino_t newinode, const dev_t newdevice,
54142+ struct acl_role_label *role)
54143+{
54144+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
54145+ struct acl_subject_label *match;
54146+
54147+ match = role->subj_hash[index];
54148+
54149+ while (match && (match->inode != oldinode ||
54150+ match->device != olddevice ||
54151+ !(match->mode & GR_DELETED)))
54152+ match = match->next;
54153+
54154+ if (match && (match->inode == oldinode)
54155+ && (match->device == olddevice)
54156+ && (match->mode & GR_DELETED)) {
54157+ if (match->prev == NULL) {
54158+ role->subj_hash[index] = match->next;
54159+ if (match->next != NULL)
54160+ match->next->prev = NULL;
54161+ } else {
54162+ match->prev->next = match->next;
54163+ if (match->next != NULL)
54164+ match->next->prev = match->prev;
54165+ }
54166+ match->prev = NULL;
54167+ match->next = NULL;
54168+ match->inode = newinode;
54169+ match->device = newdevice;
54170+ match->mode &= ~GR_DELETED;
54171+
54172+ insert_acl_subj_label(match, role);
54173+ }
54174+
54175+ return;
54176+}
54177+
54178+static void
54179+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
54180+ const ino_t newinode, const dev_t newdevice)
54181+{
54182+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
54183+ struct inodev_entry *match;
54184+
54185+ match = inodev_set.i_hash[index];
54186+
54187+ while (match && (match->nentry->inode != oldinode ||
54188+ match->nentry->device != olddevice || !match->nentry->deleted))
54189+ match = match->next;
54190+
54191+ if (match && (match->nentry->inode == oldinode)
54192+ && (match->nentry->device == olddevice) &&
54193+ match->nentry->deleted) {
54194+ if (match->prev == NULL) {
54195+ inodev_set.i_hash[index] = match->next;
54196+ if (match->next != NULL)
54197+ match->next->prev = NULL;
54198+ } else {
54199+ match->prev->next = match->next;
54200+ if (match->next != NULL)
54201+ match->next->prev = match->prev;
54202+ }
54203+ match->prev = NULL;
54204+ match->next = NULL;
54205+ match->nentry->inode = newinode;
54206+ match->nentry->device = newdevice;
54207+ match->nentry->deleted = 0;
54208+
54209+ insert_inodev_entry(match);
54210+ }
54211+
54212+ return;
54213+}
54214+
54215+static void
6e9df6a3 54216+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
58c5fc13
MT
54217+{
54218+ struct acl_subject_label *subj;
54219+ struct acl_role_label *role;
ae4e228f 54220+ unsigned int x;
6e9df6a3 54221+
ae4e228f 54222+ FOR_EACH_ROLE_START(role)
16454cff 54223+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
58c5fc13
MT
54224+
54225+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
16454cff
MT
54226+ if ((subj->inode == ino) && (subj->device == dev)) {
54227+ subj->inode = ino;
54228+ subj->device = dev;
58c5fc13
MT
54229+ }
54230+ FOR_EACH_NESTED_SUBJECT_END(subj)
54231+ FOR_EACH_SUBJECT_START(role, subj, x)
54232+ update_acl_obj_label(matchn->inode, matchn->device,
16454cff 54233+ ino, dev, subj);
58c5fc13 54234+ FOR_EACH_SUBJECT_END(subj,x)
ae4e228f 54235+ FOR_EACH_ROLE_END(role)
58c5fc13 54236+
16454cff 54237+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
58c5fc13
MT
54238+
54239+ return;
54240+}
54241+
6e9df6a3
MT
54242+static void
54243+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
54244+ const struct vfsmount *mnt)
54245+{
54246+ ino_t ino = dentry->d_inode->i_ino;
54247+ dev_t dev = __get_dev(dentry);
54248+
54249+ __do_handle_create(matchn, ino, dev);
54250+
54251+ return;
54252+}
54253+
58c5fc13
MT
54254+void
54255+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
54256+{
54257+ struct name_entry *matchn;
54258+
54259+ if (unlikely(!(gr_status & GR_READY)))
54260+ return;
54261+
54262+ preempt_disable();
54263+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
54264+
54265+ if (unlikely((unsigned long)matchn)) {
54266+ write_lock(&gr_inode_lock);
54267+ do_handle_create(matchn, dentry, mnt);
54268+ write_unlock(&gr_inode_lock);
54269+ }
54270+ preempt_enable();
54271+
54272+ return;
54273+}
54274+
54275+void
6e9df6a3
MT
54276+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
54277+{
54278+ struct name_entry *matchn;
54279+
54280+ if (unlikely(!(gr_status & GR_READY)))
54281+ return;
54282+
54283+ preempt_disable();
54284+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
54285+
54286+ if (unlikely((unsigned long)matchn)) {
54287+ write_lock(&gr_inode_lock);
54288+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
54289+ write_unlock(&gr_inode_lock);
54290+ }
54291+ preempt_enable();
54292+
54293+ return;
54294+}
54295+
54296+void
58c5fc13
MT
54297+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
54298+ struct dentry *old_dentry,
54299+ struct dentry *new_dentry,
54300+ struct vfsmount *mnt, const __u8 replace)
54301+{
54302+ struct name_entry *matchn;
54303+ struct inodev_entry *inodev;
6e9df6a3 54304+ struct inode *inode = new_dentry->d_inode;
16454cff
MT
54305+ ino_t old_ino = old_dentry->d_inode->i_ino;
54306+ dev_t old_dev = __get_dev(old_dentry);
58c5fc13
MT
54307+
54308+ /* vfs_rename swaps the name and parent link for old_dentry and
54309+ new_dentry
54310+ at this point, old_dentry has the new name, parent link, and inode
54311+ for the renamed file
54312+ if a file is being replaced by a rename, new_dentry has the inode
54313+ and name for the replaced file
54314+ */
54315+
54316+ if (unlikely(!(gr_status & GR_READY)))
54317+ return;
54318+
54319+ preempt_disable();
54320+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
54321+
54322+ /* we wouldn't have to check d_inode if it weren't for
54323+ NFS silly-renaming
54324+ */
54325+
54326+ write_lock(&gr_inode_lock);
6e9df6a3
MT
54327+ if (unlikely(replace && inode)) {
54328+ ino_t new_ino = inode->i_ino;
16454cff
MT
54329+ dev_t new_dev = __get_dev(new_dentry);
54330+
54331+ inodev = lookup_inodev_entry(new_ino, new_dev);
6e9df6a3 54332+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
16454cff 54333+ do_handle_delete(inodev, new_ino, new_dev);
58c5fc13
MT
54334+ }
54335+
16454cff 54336+ inodev = lookup_inodev_entry(old_ino, old_dev);
6e9df6a3 54337+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
16454cff 54338+ do_handle_delete(inodev, old_ino, old_dev);
58c5fc13
MT
54339+
54340+ if (unlikely((unsigned long)matchn))
54341+ do_handle_create(matchn, old_dentry, mnt);
54342+
54343+ write_unlock(&gr_inode_lock);
54344+ preempt_enable();
54345+
54346+ return;
54347+}
54348+
54349+static int
54350+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
54351+ unsigned char **sum)
54352+{
54353+ struct acl_role_label *r;
54354+ struct role_allowed_ip *ipp;
54355+ struct role_transition *trans;
54356+ unsigned int i;
54357+ int found = 0;
bc901d79
MT
54358+ u32 curr_ip = current->signal->curr_ip;
54359+
54360+ current->signal->saved_ip = curr_ip;
58c5fc13
MT
54361+
54362+ /* check transition table */
54363+
54364+ for (trans = current->role->transitions; trans; trans = trans->next) {
54365+ if (!strcmp(rolename, trans->rolename)) {
54366+ found = 1;
54367+ break;
54368+ }
54369+ }
54370+
54371+ if (!found)
54372+ return 0;
54373+
54374+ /* handle special roles that do not require authentication
54375+ and check ip */
54376+
ae4e228f 54377+ FOR_EACH_ROLE_START(r)
58c5fc13
MT
54378+ if (!strcmp(rolename, r->rolename) &&
54379+ (r->roletype & GR_ROLE_SPECIAL)) {
54380+ found = 0;
54381+ if (r->allowed_ips != NULL) {
54382+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
bc901d79 54383+ if ((ntohl(curr_ip) & ipp->netmask) ==
58c5fc13
MT
54384+ (ntohl(ipp->addr) & ipp->netmask))
54385+ found = 1;
54386+ }
54387+ } else
54388+ found = 2;
54389+ if (!found)
54390+ return 0;
54391+
54392+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
54393+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
54394+ *salt = NULL;
54395+ *sum = NULL;
54396+ return 1;
54397+ }
54398+ }
ae4e228f 54399+ FOR_EACH_ROLE_END(r)
58c5fc13
MT
54400+
54401+ for (i = 0; i < num_sprole_pws; i++) {
54402+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
54403+ *salt = acl_special_roles[i]->salt;
54404+ *sum = acl_special_roles[i]->sum;
54405+ return 1;
54406+ }
54407+ }
54408+
54409+ return 0;
54410+}
54411+
54412+static void
54413+assign_special_role(char *rolename)
54414+{
54415+ struct acl_object_label *obj;
54416+ struct acl_role_label *r;
54417+ struct acl_role_label *assigned = NULL;
54418+ struct task_struct *tsk;
54419+ struct file *filp;
58c5fc13 54420+
ae4e228f 54421+ FOR_EACH_ROLE_START(r)
58c5fc13 54422+ if (!strcmp(rolename, r->rolename) &&
ae4e228f 54423+ (r->roletype & GR_ROLE_SPECIAL)) {
58c5fc13 54424+ assigned = r;
ae4e228f
MT
54425+ break;
54426+ }
54427+ FOR_EACH_ROLE_END(r)
58c5fc13
MT
54428+
54429+ if (!assigned)
54430+ return;
54431+
54432+ read_lock(&tasklist_lock);
54433+ read_lock(&grsec_exec_file_lock);
54434+
6892158b 54435+ tsk = current->real_parent;
58c5fc13
MT
54436+ if (tsk == NULL)
54437+ goto out_unlock;
54438+
54439+ filp = tsk->exec_file;
54440+ if (filp == NULL)
54441+ goto out_unlock;
54442+
54443+ tsk->is_writable = 0;
54444+
54445+ tsk->acl_sp_role = 1;
54446+ tsk->acl_role_id = ++acl_sp_role_value;
54447+ tsk->role = assigned;
54448+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
54449+
54450+ /* ignore additional mmap checks for processes that are writable
54451+ by the default ACL */
54452+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54453+ if (unlikely(obj->mode & GR_WRITE))
54454+ tsk->is_writable = 1;
54455+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
54456+ if (unlikely(obj->mode & GR_WRITE))
54457+ tsk->is_writable = 1;
54458+
16454cff 54459+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58c5fc13
MT
54460+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
54461+#endif
54462+
54463+out_unlock:
54464+ read_unlock(&grsec_exec_file_lock);
54465+ read_unlock(&tasklist_lock);
54466+ return;
54467+}
54468+
54469+int gr_check_secure_terminal(struct task_struct *task)
54470+{
54471+ struct task_struct *p, *p2, *p3;
54472+ struct files_struct *files;
54473+ struct fdtable *fdt;
54474+ struct file *our_file = NULL, *file;
54475+ int i;
54476+
54477+ if (task->signal->tty == NULL)
54478+ return 1;
54479+
54480+ files = get_files_struct(task);
54481+ if (files != NULL) {
54482+ rcu_read_lock();
54483+ fdt = files_fdtable(files);
54484+ for (i=0; i < fdt->max_fds; i++) {
54485+ file = fcheck_files(files, i);
54486+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
54487+ get_file(file);
54488+ our_file = file;
54489+ }
54490+ }
54491+ rcu_read_unlock();
54492+ put_files_struct(files);
54493+ }
54494+
54495+ if (our_file == NULL)
54496+ return 1;
54497+
54498+ read_lock(&tasklist_lock);
54499+ do_each_thread(p2, p) {
54500+ files = get_files_struct(p);
54501+ if (files == NULL ||
54502+ (p->signal && p->signal->tty == task->signal->tty)) {
54503+ if (files != NULL)
54504+ put_files_struct(files);
54505+ continue;
54506+ }
54507+ rcu_read_lock();
54508+ fdt = files_fdtable(files);
54509+ for (i=0; i < fdt->max_fds; i++) {
54510+ file = fcheck_files(files, i);
54511+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
54512+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
54513+ p3 = task;
54514+ while (p3->pid > 0) {
54515+ if (p3 == p)
54516+ break;
6892158b 54517+ p3 = p3->real_parent;
58c5fc13
MT
54518+ }
54519+ if (p3 == p)
54520+ break;
54521+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
54522+ gr_handle_alertkill(p);
54523+ rcu_read_unlock();
54524+ put_files_struct(files);
54525+ read_unlock(&tasklist_lock);
54526+ fput(our_file);
54527+ return 0;
54528+ }
54529+ }
54530+ rcu_read_unlock();
54531+ put_files_struct(files);
54532+ } while_each_thread(p2, p);
54533+ read_unlock(&tasklist_lock);
54534+
54535+ fput(our_file);
54536+ return 1;
54537+}
54538+
54539+ssize_t
54540+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
54541+{
54542+ struct gr_arg_wrapper uwrap;
ae4e228f
MT
54543+ unsigned char *sprole_salt = NULL;
54544+ unsigned char *sprole_sum = NULL;
58c5fc13
MT
54545+ int error = sizeof (struct gr_arg_wrapper);
54546+ int error2 = 0;
54547+
bc901d79 54548+ mutex_lock(&gr_dev_mutex);
58c5fc13
MT
54549+
54550+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
54551+ error = -EPERM;
54552+ goto out;
54553+ }
54554+
54555+ if (count != sizeof (struct gr_arg_wrapper)) {
54556+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
54557+ error = -EINVAL;
54558+ goto out;
54559+ }
54560+
54561+
54562+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
54563+ gr_auth_expires = 0;
54564+ gr_auth_attempts = 0;
54565+ }
54566+
54567+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
54568+ error = -EFAULT;
54569+ goto out;
54570+ }
54571+
54572+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
54573+ error = -EINVAL;
54574+ goto out;
54575+ }
54576+
54577+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
54578+ error = -EFAULT;
54579+ goto out;
54580+ }
54581+
54582+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
54583+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
54584+ time_after(gr_auth_expires, get_seconds())) {
54585+ error = -EBUSY;
54586+ goto out;
54587+ }
54588+
54589+ /* if non-root trying to do anything other than use a special role,
54590+ do not attempt authentication, do not count towards authentication
54591+ locking
54592+ */
54593+
54594+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
54595+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
54596+ current_uid()) {
54597+ error = -EPERM;
54598+ goto out;
54599+ }
54600+
54601+ /* ensure pw and special role name are null terminated */
54602+
54603+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
54604+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
54605+
54606+ /* Okay.
54607+ * We have our enough of the argument structure..(we have yet
54608+ * to copy_from_user the tables themselves) . Copy the tables
54609+ * only if we need them, i.e. for loading operations. */
54610+
54611+ switch (gr_usermode->mode) {
54612+ case GR_STATUS:
54613+ if (gr_status & GR_READY) {
54614+ error = 1;
54615+ if (!gr_check_secure_terminal(current))
54616+ error = 3;
54617+ } else
54618+ error = 2;
54619+ goto out;
54620+ case GR_SHUTDOWN:
54621+ if ((gr_status & GR_READY)
54622+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
ae4e228f 54623+ pax_open_kernel();
58c5fc13 54624+ gr_status &= ~GR_READY;
ae4e228f
MT
54625+ pax_close_kernel();
54626+
58c5fc13
MT
54627+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
54628+ free_variables();
54629+ memset(gr_usermode, 0, sizeof (struct gr_arg));
54630+ memset(gr_system_salt, 0, GR_SALT_LEN);
54631+ memset(gr_system_sum, 0, GR_SHA_LEN);
54632+ } else if (gr_status & GR_READY) {
54633+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
54634+ error = -EPERM;
54635+ } else {
54636+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
54637+ error = -EAGAIN;
54638+ }
54639+ break;
54640+ case GR_ENABLE:
54641+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
54642+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
54643+ else {
54644+ if (gr_status & GR_READY)
54645+ error = -EAGAIN;
54646+ else
54647+ error = error2;
54648+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
54649+ }
54650+ break;
54651+ case GR_RELOAD:
54652+ if (!(gr_status & GR_READY)) {
54653+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
54654+ error = -EAGAIN;
54655+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
c52201e0 54656+ preempt_disable();
58c5fc13 54657+
ae4e228f 54658+ pax_open_kernel();
58c5fc13 54659+ gr_status &= ~GR_READY;
ae4e228f
MT
54660+ pax_close_kernel();
54661+
58c5fc13
MT
54662+ free_variables();
54663+ if (!(error2 = gracl_init(gr_usermode))) {
c52201e0 54664+ preempt_enable();
58c5fc13
MT
54665+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
54666+ } else {
c52201e0 54667+ preempt_enable();
58c5fc13
MT
54668+ error = error2;
54669+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
54670+ }
54671+ } else {
54672+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
54673+ error = -EPERM;
54674+ }
54675+ break;
54676+ case GR_SEGVMOD:
54677+ if (unlikely(!(gr_status & GR_READY))) {
54678+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
54679+ error = -EAGAIN;
54680+ break;
54681+ }
54682+
54683+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54684+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
54685+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
54686+ struct acl_subject_label *segvacl;
54687+ segvacl =
54688+ lookup_acl_subj_label(gr_usermode->segv_inode,
54689+ gr_usermode->segv_device,
54690+ current->role);
54691+ if (segvacl) {
54692+ segvacl->crashes = 0;
54693+ segvacl->expires = 0;
54694+ }
54695+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
54696+ gr_remove_uid(gr_usermode->segv_uid);
54697+ }
54698+ } else {
54699+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
54700+ error = -EPERM;
54701+ }
54702+ break;
54703+ case GR_SPROLE:
54704+ case GR_SPROLEPAM:
54705+ if (unlikely(!(gr_status & GR_READY))) {
54706+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
54707+ error = -EAGAIN;
54708+ break;
54709+ }
54710+
54711+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
54712+ current->role->expires = 0;
54713+ current->role->auth_attempts = 0;
54714+ }
54715+
54716+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
54717+ time_after(current->role->expires, get_seconds())) {
54718+ error = -EBUSY;
54719+ goto out;
54720+ }
54721+
54722+ if (lookup_special_role_auth
54723+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
54724+ && ((!sprole_salt && !sprole_sum)
54725+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
54726+ char *p = "";
54727+ assign_special_role(gr_usermode->sp_role);
54728+ read_lock(&tasklist_lock);
6892158b
MT
54729+ if (current->real_parent)
54730+ p = current->real_parent->role->rolename;
58c5fc13
MT
54731+ read_unlock(&tasklist_lock);
54732+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
54733+ p, acl_sp_role_value);
54734+ } else {
54735+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
54736+ error = -EPERM;
54737+ if(!(current->role->auth_attempts++))
54738+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
54739+
54740+ goto out;
54741+ }
54742+ break;
54743+ case GR_UNSPROLE:
54744+ if (unlikely(!(gr_status & GR_READY))) {
54745+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
54746+ error = -EAGAIN;
54747+ break;
54748+ }
54749+
54750+ if (current->role->roletype & GR_ROLE_SPECIAL) {
54751+ char *p = "";
54752+ int i = 0;
54753+
54754+ read_lock(&tasklist_lock);
6892158b
MT
54755+ if (current->real_parent) {
54756+ p = current->real_parent->role->rolename;
54757+ i = current->real_parent->acl_role_id;
58c5fc13
MT
54758+ }
54759+ read_unlock(&tasklist_lock);
54760+
54761+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
54762+ gr_set_acls(1);
54763+ } else {
58c5fc13
MT
54764+ error = -EPERM;
54765+ goto out;
54766+ }
54767+ break;
54768+ default:
54769+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
54770+ error = -EINVAL;
54771+ break;
54772+ }
54773+
54774+ if (error != -EPERM)
54775+ goto out;
54776+
54777+ if(!(gr_auth_attempts++))
54778+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
54779+
54780+ out:
bc901d79 54781+ mutex_unlock(&gr_dev_mutex);
58c5fc13
MT
54782+ return error;
54783+}
54784+
16454cff
MT
54785+/* must be called with
54786+ rcu_read_lock();
54787+ read_lock(&tasklist_lock);
54788+ read_lock(&grsec_exec_file_lock);
54789+*/
54790+int gr_apply_subject_to_task(struct task_struct *task)
54791+{
54792+ struct acl_object_label *obj;
54793+ char *tmpname;
54794+ struct acl_subject_label *tmpsubj;
54795+ struct file *filp;
54796+ struct name_entry *nmatch;
54797+
54798+ filp = task->exec_file;
54799+ if (filp == NULL)
54800+ return 0;
54801+
54802+ /* the following is to apply the correct subject
54803+ on binaries running when the RBAC system
54804+ is enabled, when the binaries have been
54805+ replaced or deleted since their execution
54806+ -----
54807+ when the RBAC system starts, the inode/dev
54808+ from exec_file will be one the RBAC system
54809+ is unaware of. It only knows the inode/dev
54810+ of the present file on disk, or the absence
54811+ of it.
54812+ */
54813+ preempt_disable();
54814+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
54815+
54816+ nmatch = lookup_name_entry(tmpname);
54817+ preempt_enable();
54818+ tmpsubj = NULL;
54819+ if (nmatch) {
54820+ if (nmatch->deleted)
54821+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
54822+ else
54823+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
54824+ if (tmpsubj != NULL)
54825+ task->acl = tmpsubj;
54826+ }
54827+ if (tmpsubj == NULL)
54828+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
54829+ task->role);
54830+ if (task->acl) {
16454cff
MT
54831+ task->is_writable = 0;
54832+ /* ignore additional mmap checks for processes that are writable
54833+ by the default ACL */
54834+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54835+ if (unlikely(obj->mode & GR_WRITE))
54836+ task->is_writable = 1;
54837+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
54838+ if (unlikely(obj->mode & GR_WRITE))
54839+ task->is_writable = 1;
54840+
54841+ gr_set_proc_res(task);
54842+
54843+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54844+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
54845+#endif
54846+ } else {
54847+ return 1;
54848+ }
54849+
54850+ return 0;
54851+}
54852+
58c5fc13
MT
54853+int
54854+gr_set_acls(const int type)
54855+{
58c5fc13 54856+ struct task_struct *task, *task2;
58c5fc13
MT
54857+ struct acl_role_label *role = current->role;
54858+ __u16 acl_role_id = current->acl_role_id;
54859+ const struct cred *cred;
16454cff 54860+ int ret;
58c5fc13 54861+
ae4e228f 54862+ rcu_read_lock();
58c5fc13
MT
54863+ read_lock(&tasklist_lock);
54864+ read_lock(&grsec_exec_file_lock);
54865+ do_each_thread(task2, task) {
54866+ /* check to see if we're called from the exit handler,
54867+ if so, only replace ACLs that have inherited the admin
54868+ ACL */
54869+
54870+ if (type && (task->role != role ||
54871+ task->acl_role_id != acl_role_id))
54872+ continue;
54873+
54874+ task->acl_role_id = 0;
54875+ task->acl_sp_role = 0;
54876+
16454cff 54877+ if (task->exec_file) {
58c5fc13
MT
54878+ cred = __task_cred(task);
54879+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
16454cff
MT
54880+ ret = gr_apply_subject_to_task(task);
54881+ if (ret) {
58c5fc13
MT
54882+ read_unlock(&grsec_exec_file_lock);
54883+ read_unlock(&tasklist_lock);
ae4e228f 54884+ rcu_read_unlock();
58c5fc13 54885+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
16454cff 54886+ return ret;
58c5fc13
MT
54887+ }
54888+ } else {
54889+ // it's a kernel process
54890+ task->role = kernel_role;
54891+ task->acl = kernel_role->root_label;
54892+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
54893+ task->acl->mode &= ~GR_PROCFIND;
54894+#endif
54895+ }
54896+ } while_each_thread(task2, task);
54897+ read_unlock(&grsec_exec_file_lock);
54898+ read_unlock(&tasklist_lock);
ae4e228f
MT
54899+ rcu_read_unlock();
54900+
58c5fc13
MT
54901+ return 0;
54902+}
54903+
54904+void
54905+gr_learn_resource(const struct task_struct *task,
54906+ const int res, const unsigned long wanted, const int gt)
54907+{
54908+ struct acl_subject_label *acl;
54909+ const struct cred *cred;
54910+
54911+ if (unlikely((gr_status & GR_READY) &&
54912+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
54913+ goto skip_reslog;
54914+
54915+#ifdef CONFIG_GRKERNSEC_RESLOG
54916+ gr_log_resource(task, res, wanted, gt);
54917+#endif
54918+ skip_reslog:
54919+
54920+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
54921+ return;
54922+
54923+ acl = task->acl;
54924+
54925+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
54926+ !(acl->resmask & (1 << (unsigned short) res))))
54927+ return;
54928+
54929+ if (wanted >= acl->res[res].rlim_cur) {
54930+ unsigned long res_add;
54931+
54932+ res_add = wanted;
54933+ switch (res) {
54934+ case RLIMIT_CPU:
54935+ res_add += GR_RLIM_CPU_BUMP;
54936+ break;
54937+ case RLIMIT_FSIZE:
54938+ res_add += GR_RLIM_FSIZE_BUMP;
54939+ break;
54940+ case RLIMIT_DATA:
54941+ res_add += GR_RLIM_DATA_BUMP;
54942+ break;
54943+ case RLIMIT_STACK:
54944+ res_add += GR_RLIM_STACK_BUMP;
54945+ break;
54946+ case RLIMIT_CORE:
54947+ res_add += GR_RLIM_CORE_BUMP;
54948+ break;
54949+ case RLIMIT_RSS:
54950+ res_add += GR_RLIM_RSS_BUMP;
54951+ break;
54952+ case RLIMIT_NPROC:
54953+ res_add += GR_RLIM_NPROC_BUMP;
54954+ break;
54955+ case RLIMIT_NOFILE:
54956+ res_add += GR_RLIM_NOFILE_BUMP;
54957+ break;
54958+ case RLIMIT_MEMLOCK:
54959+ res_add += GR_RLIM_MEMLOCK_BUMP;
54960+ break;
54961+ case RLIMIT_AS:
54962+ res_add += GR_RLIM_AS_BUMP;
54963+ break;
54964+ case RLIMIT_LOCKS:
54965+ res_add += GR_RLIM_LOCKS_BUMP;
54966+ break;
54967+ case RLIMIT_SIGPENDING:
54968+ res_add += GR_RLIM_SIGPENDING_BUMP;
54969+ break;
54970+ case RLIMIT_MSGQUEUE:
54971+ res_add += GR_RLIM_MSGQUEUE_BUMP;
54972+ break;
54973+ case RLIMIT_NICE:
54974+ res_add += GR_RLIM_NICE_BUMP;
54975+ break;
54976+ case RLIMIT_RTPRIO:
54977+ res_add += GR_RLIM_RTPRIO_BUMP;
54978+ break;
54979+ case RLIMIT_RTTIME:
54980+ res_add += GR_RLIM_RTTIME_BUMP;
54981+ break;
54982+ }
54983+
54984+ acl->res[res].rlim_cur = res_add;
54985+
54986+ if (wanted > acl->res[res].rlim_max)
54987+ acl->res[res].rlim_max = res_add;
54988+
54989+ /* only log the subject filename, since resource logging is supported for
54990+ single-subject learning only */
ae4e228f 54991+ rcu_read_lock();
58c5fc13
MT
54992+ cred = __task_cred(task);
54993+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54994+ task->role->roletype, cred->uid, cred->gid, acl->filename,
54995+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
bc901d79 54996+ "", (unsigned long) res, &task->signal->saved_ip);
ae4e228f 54997+ rcu_read_unlock();
58c5fc13
MT
54998+ }
54999+
55000+ return;
55001+}
55002+
55003+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
55004+void
55005+pax_set_initial_flags(struct linux_binprm *bprm)
55006+{
55007+ struct task_struct *task = current;
55008+ struct acl_subject_label *proc;
55009+ unsigned long flags;
55010+
55011+ if (unlikely(!(gr_status & GR_READY)))
55012+ return;
55013+
55014+ flags = pax_get_flags(task);
55015+
55016+ proc = task->acl;
55017+
55018+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
55019+ flags &= ~MF_PAX_PAGEEXEC;
55020+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
55021+ flags &= ~MF_PAX_SEGMEXEC;
55022+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
55023+ flags &= ~MF_PAX_RANDMMAP;
55024+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
55025+ flags &= ~MF_PAX_EMUTRAMP;
55026+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
55027+ flags &= ~MF_PAX_MPROTECT;
55028+
55029+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
55030+ flags |= MF_PAX_PAGEEXEC;
55031+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
55032+ flags |= MF_PAX_SEGMEXEC;
55033+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
55034+ flags |= MF_PAX_RANDMMAP;
55035+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
55036+ flags |= MF_PAX_EMUTRAMP;
55037+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
55038+ flags |= MF_PAX_MPROTECT;
55039+
55040+ pax_set_flags(task, flags);
55041+
55042+ return;
55043+}
55044+#endif
55045+
55046+#ifdef CONFIG_SYSCTL
55047+/* Eric Biederman likes breaking userland ABI and every inode-based security
55048+ system to save 35kb of memory */
55049+
55050+/* we modify the passed in filename, but adjust it back before returning */
55051+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
55052+{
55053+ struct name_entry *nmatch;
55054+ char *p, *lastp = NULL;
55055+ struct acl_object_label *obj = NULL, *tmp;
55056+ struct acl_subject_label *tmpsubj;
55057+ char c = '\0';
55058+
55059+ read_lock(&gr_inode_lock);
55060+
55061+ p = name + len - 1;
55062+ do {
55063+ nmatch = lookup_name_entry(name);
55064+ if (lastp != NULL)
55065+ *lastp = c;
55066+
55067+ if (nmatch == NULL)
55068+ goto next_component;
55069+ tmpsubj = current->acl;
55070+ do {
55071+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
55072+ if (obj != NULL) {
55073+ tmp = obj->globbed;
55074+ while (tmp) {
55075+ if (!glob_match(tmp->filename, name)) {
55076+ obj = tmp;
55077+ goto found_obj;
55078+ }
55079+ tmp = tmp->next;
55080+ }
55081+ goto found_obj;
55082+ }
55083+ } while ((tmpsubj = tmpsubj->parent_subject));
55084+next_component:
55085+ /* end case */
55086+ if (p == name)
55087+ break;
55088+
55089+ while (*p != '/')
55090+ p--;
55091+ if (p == name)
55092+ lastp = p + 1;
55093+ else {
55094+ lastp = p;
55095+ p--;
55096+ }
55097+ c = *lastp;
55098+ *lastp = '\0';
55099+ } while (1);
55100+found_obj:
55101+ read_unlock(&gr_inode_lock);
55102+ /* obj returned will always be non-null */
55103+ return obj;
55104+}
55105+
55106+/* returns 0 when allowing, non-zero on error
55107+ op of 0 is used for readdir, so we don't log the names of hidden files
55108+*/
55109+__u32
55110+gr_handle_sysctl(const struct ctl_table *table, const int op)
55111+{
57199397 55112+ struct ctl_table *tmp;
58c5fc13
MT
55113+ const char *proc_sys = "/proc/sys";
55114+ char *path;
55115+ struct acl_object_label *obj;
55116+ unsigned short len = 0, pos = 0, depth = 0, i;
55117+ __u32 err = 0;
55118+ __u32 mode = 0;
55119+
55120+ if (unlikely(!(gr_status & GR_READY)))
55121+ return 0;
55122+
55123+ /* for now, ignore operations on non-sysctl entries if it's not a
55124+ readdir*/
55125+ if (table->child != NULL && op != 0)
55126+ return 0;
55127+
55128+ mode |= GR_FIND;
55129+ /* it's only a read if it's an entry, read on dirs is for readdir */
55130+ if (op & MAY_READ)
55131+ mode |= GR_READ;
55132+ if (op & MAY_WRITE)
55133+ mode |= GR_WRITE;
55134+
55135+ preempt_disable();
55136+
55137+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
55138+
55139+ /* it's only a read/write if it's an actual entry, not a dir
55140+ (which are opened for readdir)
55141+ */
55142+
55143+ /* convert the requested sysctl entry into a pathname */
55144+
57199397 55145+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
58c5fc13
MT
55146+ len += strlen(tmp->procname);
55147+ len++;
55148+ depth++;
55149+ }
55150+
55151+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
55152+ /* deny */
55153+ goto out;
55154+ }
55155+
55156+ memset(path, 0, PAGE_SIZE);
55157+
55158+ memcpy(path, proc_sys, strlen(proc_sys));
55159+
55160+ pos += strlen(proc_sys);
55161+
55162+ for (; depth > 0; depth--) {
55163+ path[pos] = '/';
55164+ pos++;
57199397 55165+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
58c5fc13
MT
55166+ if (depth == i) {
55167+ memcpy(path + pos, tmp->procname,
55168+ strlen(tmp->procname));
55169+ pos += strlen(tmp->procname);
55170+ }
55171+ i++;
55172+ }
55173+ }
55174+
55175+ obj = gr_lookup_by_name(path, pos);
55176+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
55177+
55178+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
55179+ ((err & mode) != mode))) {
55180+ __u32 new_mode = mode;
55181+
55182+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
55183+
55184+ err = 0;
55185+ gr_log_learn_sysctl(path, new_mode);
55186+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
55187+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
55188+ err = -ENOENT;
55189+ } else if (!(err & GR_FIND)) {
55190+ err = -ENOENT;
55191+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
55192+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
55193+ path, (mode & GR_READ) ? " reading" : "",
55194+ (mode & GR_WRITE) ? " writing" : "");
55195+ err = -EACCES;
55196+ } else if ((err & mode) != mode) {
55197+ err = -EACCES;
55198+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
55199+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
55200+ path, (mode & GR_READ) ? " reading" : "",
55201+ (mode & GR_WRITE) ? " writing" : "");
55202+ err = 0;
55203+ } else
55204+ err = 0;
55205+
55206+ out:
55207+ preempt_enable();
55208+
55209+ return err;
55210+}
55211+#endif
55212+
55213+int
55214+gr_handle_proc_ptrace(struct task_struct *task)
55215+{
55216+ struct file *filp;
55217+ struct task_struct *tmp = task;
55218+ struct task_struct *curtemp = current;
55219+ __u32 retmode;
55220+
55221+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
55222+ if (unlikely(!(gr_status & GR_READY)))
55223+ return 0;
55224+#endif
55225+
55226+ read_lock(&tasklist_lock);
55227+ read_lock(&grsec_exec_file_lock);
55228+ filp = task->exec_file;
55229+
55230+ while (tmp->pid > 0) {
55231+ if (tmp == curtemp)
55232+ break;
6892158b 55233+ tmp = tmp->real_parent;
58c5fc13
MT
55234+ }
55235+
55236+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
55237+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
55238+ read_unlock(&grsec_exec_file_lock);
55239+ read_unlock(&tasklist_lock);
55240+ return 1;
55241+ }
55242+
55243+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55244+ if (!(gr_status & GR_READY)) {
55245+ read_unlock(&grsec_exec_file_lock);
55246+ read_unlock(&tasklist_lock);
55247+ return 0;
55248+ }
55249+#endif
55250+
55251+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
55252+ read_unlock(&grsec_exec_file_lock);
55253+ read_unlock(&tasklist_lock);
55254+
55255+ if (retmode & GR_NOPTRACE)
55256+ return 1;
55257+
55258+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
55259+ && (current->acl != task->acl || (current->acl != current->role->root_label
55260+ && current->pid != task->pid)))
55261+ return 1;
55262+
55263+ return 0;
55264+}
55265+
6892158b
MT
55266+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
55267+{
55268+ if (unlikely(!(gr_status & GR_READY)))
55269+ return;
55270+
55271+ if (!(current->role->roletype & GR_ROLE_GOD))
55272+ return;
55273+
55274+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
55275+ p->role->rolename, gr_task_roletype_to_char(p),
55276+ p->acl->filename);
55277+}
55278+
58c5fc13
MT
55279+int
55280+gr_handle_ptrace(struct task_struct *task, const long request)
55281+{
55282+ struct task_struct *tmp = task;
55283+ struct task_struct *curtemp = current;
55284+ __u32 retmode;
55285+
55286+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
55287+ if (unlikely(!(gr_status & GR_READY)))
55288+ return 0;
55289+#endif
5e856224
MT
55290+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
55291+ read_lock(&tasklist_lock);
55292+ while (tmp->pid > 0) {
55293+ if (tmp == curtemp)
55294+ break;
55295+ tmp = tmp->real_parent;
55296+ }
58c5fc13 55297+
5e856224
MT
55298+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
55299+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
55300+ read_unlock(&tasklist_lock);
55301+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
55302+ return 1;
55303+ }
58c5fc13 55304+ read_unlock(&tasklist_lock);
58c5fc13 55305+ }
58c5fc13
MT
55306+
55307+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55308+ if (!(gr_status & GR_READY))
55309+ return 0;
55310+#endif
55311+
55312+ read_lock(&grsec_exec_file_lock);
55313+ if (unlikely(!task->exec_file)) {
55314+ read_unlock(&grsec_exec_file_lock);
55315+ return 0;
55316+ }
55317+
55318+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
55319+ read_unlock(&grsec_exec_file_lock);
55320+
55321+ if (retmode & GR_NOPTRACE) {
55322+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
55323+ return 1;
55324+ }
55325+
55326+ if (retmode & GR_PTRACERD) {
55327+ switch (request) {
6e9df6a3 55328+ case PTRACE_SEIZE:
58c5fc13
MT
55329+ case PTRACE_POKETEXT:
55330+ case PTRACE_POKEDATA:
55331+ case PTRACE_POKEUSR:
55332+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
55333+ case PTRACE_SETREGS:
55334+ case PTRACE_SETFPREGS:
55335+#endif
55336+#ifdef CONFIG_X86
55337+ case PTRACE_SETFPXREGS:
55338+#endif
55339+#ifdef CONFIG_ALTIVEC
55340+ case PTRACE_SETVRREGS:
55341+#endif
55342+ return 1;
55343+ default:
55344+ return 0;
55345+ }
55346+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
55347+ !(current->role->roletype & GR_ROLE_GOD) &&
55348+ (current->acl != task->acl)) {
55349+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
55350+ return 1;
55351+ }
55352+
55353+ return 0;
55354+}
55355+
55356+static int is_writable_mmap(const struct file *filp)
55357+{
55358+ struct task_struct *task = current;
55359+ struct acl_object_label *obj, *obj2;
55360+
55361+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
71d190be 55362+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
58c5fc13
MT
55363+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
55364+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
55365+ task->role->root_label);
55366+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
55367+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
55368+ return 1;
55369+ }
55370+ }
55371+ return 0;
55372+}
55373+
55374+int
55375+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
55376+{
55377+ __u32 mode;
55378+
55379+ if (unlikely(!file || !(prot & PROT_EXEC)))
55380+ return 1;
55381+
55382+ if (is_writable_mmap(file))
55383+ return 0;
55384+
55385+ mode =
55386+ gr_search_file(file->f_path.dentry,
55387+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
55388+ file->f_path.mnt);
55389+
55390+ if (!gr_tpe_allow(file))
55391+ return 0;
55392+
55393+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
55394+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55395+ return 0;
55396+ } else if (unlikely(!(mode & GR_EXEC))) {
55397+ return 0;
55398+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
55399+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55400+ return 1;
55401+ }
55402+
55403+ return 1;
55404+}
55405+
55406+int
55407+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
55408+{
55409+ __u32 mode;
55410+
55411+ if (unlikely(!file || !(prot & PROT_EXEC)))
55412+ return 1;
55413+
55414+ if (is_writable_mmap(file))
55415+ return 0;
55416+
55417+ mode =
55418+ gr_search_file(file->f_path.dentry,
55419+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
55420+ file->f_path.mnt);
55421+
55422+ if (!gr_tpe_allow(file))
55423+ return 0;
55424+
55425+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
55426+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55427+ return 0;
55428+ } else if (unlikely(!(mode & GR_EXEC))) {
55429+ return 0;
55430+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
55431+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55432+ return 1;
55433+ }
55434+
55435+ return 1;
55436+}
55437+
55438+void
55439+gr_acl_handle_psacct(struct task_struct *task, const long code)
55440+{
55441+ unsigned long runtime;
55442+ unsigned long cputime;
55443+ unsigned int wday, cday;
55444+ __u8 whr, chr;
55445+ __u8 wmin, cmin;
55446+ __u8 wsec, csec;
55447+ struct timespec timeval;
55448+
55449+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
55450+ !(task->acl->mode & GR_PROCACCT)))
55451+ return;
55452+
55453+ do_posix_clock_monotonic_gettime(&timeval);
55454+ runtime = timeval.tv_sec - task->start_time.tv_sec;
55455+ wday = runtime / (3600 * 24);
55456+ runtime -= wday * (3600 * 24);
55457+ whr = runtime / 3600;
55458+ runtime -= whr * 3600;
55459+ wmin = runtime / 60;
55460+ runtime -= wmin * 60;
55461+ wsec = runtime;
55462+
55463+ cputime = (task->utime + task->stime) / HZ;
55464+ cday = cputime / (3600 * 24);
55465+ cputime -= cday * (3600 * 24);
55466+ chr = cputime / 3600;
55467+ cputime -= chr * 3600;
55468+ cmin = cputime / 60;
55469+ cputime -= cmin * 60;
55470+ csec = cputime;
55471+
55472+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
55473+
55474+ return;
55475+}
55476+
55477+void gr_set_kernel_label(struct task_struct *task)
55478+{
55479+ if (gr_status & GR_READY) {
55480+ task->role = kernel_role;
55481+ task->acl = kernel_role->root_label;
55482+ }
55483+ return;
55484+}
55485+
55486+#ifdef CONFIG_TASKSTATS
55487+int gr_is_taskstats_denied(int pid)
55488+{
55489+ struct task_struct *task;
55490+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55491+ const struct cred *cred;
55492+#endif
55493+ int ret = 0;
55494+
55495+ /* restrict taskstats viewing to un-chrooted root users
55496+ who have the 'view' subject flag if the RBAC system is enabled
55497+ */
55498+
df50ba0c 55499+ rcu_read_lock();
58c5fc13
MT
55500+ read_lock(&tasklist_lock);
55501+ task = find_task_by_vpid(pid);
55502+ if (task) {
58c5fc13
MT
55503+#ifdef CONFIG_GRKERNSEC_CHROOT
55504+ if (proc_is_chrooted(task))
55505+ ret = -EACCES;
55506+#endif
55507+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55508+ cred = __task_cred(task);
55509+#ifdef CONFIG_GRKERNSEC_PROC_USER
55510+ if (cred->uid != 0)
55511+ ret = -EACCES;
55512+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55513+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
55514+ ret = -EACCES;
55515+#endif
55516+#endif
55517+ if (gr_status & GR_READY) {
55518+ if (!(task->acl->mode & GR_VIEW))
55519+ ret = -EACCES;
55520+ }
58c5fc13
MT
55521+ } else
55522+ ret = -ENOENT;
55523+
55524+ read_unlock(&tasklist_lock);
df50ba0c 55525+ rcu_read_unlock();
58c5fc13
MT
55526+
55527+ return ret;
55528+}
55529+#endif
55530+
bc901d79
MT
55531+/* AUXV entries are filled via a descendant of search_binary_handler
55532+ after we've already applied the subject for the target
55533+*/
55534+int gr_acl_enable_at_secure(void)
55535+{
55536+ if (unlikely(!(gr_status & GR_READY)))
55537+ return 0;
55538+
55539+ if (current->acl->mode & GR_ATSECURE)
55540+ return 1;
55541+
55542+ return 0;
55543+}
55544+
58c5fc13
MT
55545+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
55546+{
55547+ struct task_struct *task = current;
55548+ struct dentry *dentry = file->f_path.dentry;
55549+ struct vfsmount *mnt = file->f_path.mnt;
55550+ struct acl_object_label *obj, *tmp;
55551+ struct acl_subject_label *subj;
55552+ unsigned int bufsize;
55553+ int is_not_root;
55554+ char *path;
16454cff 55555+ dev_t dev = __get_dev(dentry);
58c5fc13
MT
55556+
55557+ if (unlikely(!(gr_status & GR_READY)))
55558+ return 1;
55559+
55560+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
55561+ return 1;
55562+
55563+ /* ignore Eric Biederman */
55564+ if (IS_PRIVATE(dentry->d_inode))
55565+ return 1;
55566+
55567+ subj = task->acl;
55568+ do {
16454cff 55569+ obj = lookup_acl_obj_label(ino, dev, subj);
58c5fc13
MT
55570+ if (obj != NULL)
55571+ return (obj->mode & GR_FIND) ? 1 : 0;
55572+ } while ((subj = subj->parent_subject));
55573+
55574+ /* this is purely an optimization since we're looking for an object
55575+ for the directory we're doing a readdir on
55576+ if it's possible for any globbed object to match the entry we're
55577+ filling into the directory, then the object we find here will be
55578+ an anchor point with attached globbed objects
55579+ */
55580+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
55581+ if (obj->globbed == NULL)
55582+ return (obj->mode & GR_FIND) ? 1 : 0;
55583+
55584+ is_not_root = ((obj->filename[0] == '/') &&
55585+ (obj->filename[1] == '\0')) ? 0 : 1;
55586+ bufsize = PAGE_SIZE - namelen - is_not_root;
55587+
55588+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
55589+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
55590+ return 1;
55591+
55592+ preempt_disable();
55593+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
55594+ bufsize);
55595+
55596+ bufsize = strlen(path);
55597+
55598+ /* if base is "/", don't append an additional slash */
55599+ if (is_not_root)
55600+ *(path + bufsize) = '/';
55601+ memcpy(path + bufsize + is_not_root, name, namelen);
55602+ *(path + bufsize + namelen + is_not_root) = '\0';
55603+
55604+ tmp = obj->globbed;
55605+ while (tmp) {
55606+ if (!glob_match(tmp->filename, path)) {
55607+ preempt_enable();
55608+ return (tmp->mode & GR_FIND) ? 1 : 0;
55609+ }
55610+ tmp = tmp->next;
55611+ }
55612+ preempt_enable();
55613+ return (obj->mode & GR_FIND) ? 1 : 0;
55614+}
55615+
6892158b
MT
55616+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
55617+EXPORT_SYMBOL(gr_acl_is_enabled);
55618+#endif
58c5fc13
MT
55619+EXPORT_SYMBOL(gr_learn_resource);
55620+EXPORT_SYMBOL(gr_set_kernel_label);
55621+#ifdef CONFIG_SECURITY
55622+EXPORT_SYMBOL(gr_check_user_change);
55623+EXPORT_SYMBOL(gr_check_group_change);
55624+#endif
55625+
fe2de317
MT
55626diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
55627new file mode 100644
55628index 0000000..34fefda
55629--- /dev/null
55630+++ b/grsecurity/gracl_alloc.c
55631@@ -0,0 +1,105 @@
55632+#include <linux/kernel.h>
55633+#include <linux/mm.h>
55634+#include <linux/slab.h>
55635+#include <linux/vmalloc.h>
55636+#include <linux/gracl.h>
55637+#include <linux/grsecurity.h>
55638+
55639+static unsigned long alloc_stack_next = 1;
55640+static unsigned long alloc_stack_size = 1;
55641+static void **alloc_stack;
55642+
55643+static __inline__ int
55644+alloc_pop(void)
55645+{
55646+ if (alloc_stack_next == 1)
55647+ return 0;
55648+
55649+ kfree(alloc_stack[alloc_stack_next - 2]);
55650+
55651+ alloc_stack_next--;
55652+
55653+ return 1;
55654+}
55655+
55656+static __inline__ int
55657+alloc_push(void *buf)
55658+{
55659+ if (alloc_stack_next >= alloc_stack_size)
55660+ return 1;
55661+
55662+ alloc_stack[alloc_stack_next - 1] = buf;
55663+
55664+ alloc_stack_next++;
55665+
55666+ return 0;
55667+}
55668+
55669+void *
55670+acl_alloc(unsigned long len)
55671+{
55672+ void *ret = NULL;
55673+
55674+ if (!len || len > PAGE_SIZE)
55675+ goto out;
55676+
55677+ ret = kmalloc(len, GFP_KERNEL);
55678+
55679+ if (ret) {
55680+ if (alloc_push(ret)) {
55681+ kfree(ret);
55682+ ret = NULL;
55683+ }
55684+ }
55685+
55686+out:
55687+ return ret;
55688+}
55689+
55690+void *
55691+acl_alloc_num(unsigned long num, unsigned long len)
55692+{
55693+ if (!len || (num > (PAGE_SIZE / len)))
55694+ return NULL;
55695+
55696+ return acl_alloc(num * len);
55697+}
55698+
55699+void
55700+acl_free_all(void)
55701+{
55702+ if (gr_acl_is_enabled() || !alloc_stack)
55703+ return;
55704+
55705+ while (alloc_pop()) ;
55706+
55707+ if (alloc_stack) {
55708+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
55709+ kfree(alloc_stack);
55710+ else
55711+ vfree(alloc_stack);
55712+ }
55713+
55714+ alloc_stack = NULL;
55715+ alloc_stack_size = 1;
55716+ alloc_stack_next = 1;
55717+
55718+ return;
55719+}
55720+
55721+int
55722+acl_alloc_stack_init(unsigned long size)
55723+{
55724+ if ((size * sizeof (void *)) <= PAGE_SIZE)
55725+ alloc_stack =
55726+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
55727+ else
55728+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
55729+
55730+ alloc_stack_size = size;
55731+
55732+ if (!alloc_stack)
55733+ return 0;
55734+ else
55735+ return 1;
55736+}
55737diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
55738new file mode 100644
5e856224 55739index 0000000..6d21049
fe2de317
MT
55740--- /dev/null
55741+++ b/grsecurity/gracl_cap.c
5e856224 55742@@ -0,0 +1,110 @@
58c5fc13
MT
55743+#include <linux/kernel.h>
55744+#include <linux/module.h>
55745+#include <linux/sched.h>
55746+#include <linux/gracl.h>
55747+#include <linux/grsecurity.h>
55748+#include <linux/grinternal.h>
55749+
15a11c5b
MT
55750+extern const char *captab_log[];
55751+extern int captab_log_entries;
58c5fc13 55752+
5e856224 55753+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
58c5fc13 55754+{
58c5fc13
MT
55755+ struct acl_subject_label *curracl;
55756+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
df50ba0c 55757+ kernel_cap_t cap_audit = __cap_empty_set;
58c5fc13
MT
55758+
55759+ if (!gr_acl_is_enabled())
55760+ return 1;
55761+
55762+ curracl = task->acl;
55763+
55764+ cap_drop = curracl->cap_lower;
55765+ cap_mask = curracl->cap_mask;
df50ba0c 55766+ cap_audit = curracl->cap_invert_audit;
58c5fc13
MT
55767+
55768+ while ((curracl = curracl->parent_subject)) {
55769+ /* if the cap isn't specified in the current computed mask but is specified in the
55770+ current level subject, and is lowered in the current level subject, then add
55771+ it to the set of dropped capabilities
55772+ otherwise, add the current level subject's mask to the current computed mask
55773+ */
55774+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
55775+ cap_raise(cap_mask, cap);
55776+ if (cap_raised(curracl->cap_lower, cap))
55777+ cap_raise(cap_drop, cap);
df50ba0c
MT
55778+ if (cap_raised(curracl->cap_invert_audit, cap))
55779+ cap_raise(cap_audit, cap);
58c5fc13
MT
55780+ }
55781+ }
55782+
df50ba0c
MT
55783+ if (!cap_raised(cap_drop, cap)) {
55784+ if (cap_raised(cap_audit, cap))
55785+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
58c5fc13 55786+ return 1;
df50ba0c 55787+ }
58c5fc13
MT
55788+
55789+ curracl = task->acl;
55790+
55791+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
55792+ && cap_raised(cred->cap_effective, cap)) {
55793+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
55794+ task->role->roletype, cred->uid,
55795+ cred->gid, task->exec_file ?
55796+ gr_to_filename(task->exec_file->f_path.dentry,
55797+ task->exec_file->f_path.mnt) : curracl->filename,
55798+ curracl->filename, 0UL,
bc901d79 55799+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
58c5fc13
MT
55800+ return 1;
55801+ }
55802+
15a11c5b 55803+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
58c5fc13 55804+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
5e856224 55805+
58c5fc13
MT
55806+ return 0;
55807+}
55808+
55809+int
5e856224
MT
55810+gr_acl_is_capable(const int cap)
55811+{
55812+ return gr_task_acl_is_capable(current, current_cred(), cap);
55813+}
55814+
55815+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
58c5fc13
MT
55816+{
55817+ struct acl_subject_label *curracl;
55818+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
55819+
55820+ if (!gr_acl_is_enabled())
55821+ return 1;
55822+
5e856224 55823+ curracl = task->acl;
58c5fc13
MT
55824+
55825+ cap_drop = curracl->cap_lower;
55826+ cap_mask = curracl->cap_mask;
55827+
55828+ while ((curracl = curracl->parent_subject)) {
55829+ /* if the cap isn't specified in the current computed mask but is specified in the
55830+ current level subject, and is lowered in the current level subject, then add
55831+ it to the set of dropped capabilities
55832+ otherwise, add the current level subject's mask to the current computed mask
55833+ */
55834+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
55835+ cap_raise(cap_mask, cap);
55836+ if (cap_raised(curracl->cap_lower, cap))
55837+ cap_raise(cap_drop, cap);
55838+ }
55839+ }
55840+
55841+ if (!cap_raised(cap_drop, cap))
55842+ return 1;
55843+
55844+ return 0;
55845+}
55846+
5e856224
MT
55847+int
55848+gr_acl_is_capable_nolog(const int cap)
55849+{
55850+ return gr_task_acl_is_capable_nolog(current, cap);
55851+}
55852+
fe2de317
MT
55853diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
55854new file mode 100644
4c928ab7 55855index 0000000..88d0e87
fe2de317
MT
55856--- /dev/null
55857+++ b/grsecurity/gracl_fs.c
4c928ab7 55858@@ -0,0 +1,435 @@
58c5fc13
MT
55859+#include <linux/kernel.h>
55860+#include <linux/sched.h>
55861+#include <linux/types.h>
55862+#include <linux/fs.h>
55863+#include <linux/file.h>
55864+#include <linux/stat.h>
55865+#include <linux/grsecurity.h>
55866+#include <linux/grinternal.h>
55867+#include <linux/gracl.h>
55868+
4c928ab7
MT
55869+umode_t
55870+gr_acl_umask(void)
55871+{
55872+ if (unlikely(!gr_acl_is_enabled()))
55873+ return 0;
55874+
55875+ return current->role->umask;
55876+}
55877+
58c5fc13
MT
55878+__u32
55879+gr_acl_handle_hidden_file(const struct dentry * dentry,
55880+ const struct vfsmount * mnt)
55881+{
55882+ __u32 mode;
55883+
55884+ if (unlikely(!dentry->d_inode))
55885+ return GR_FIND;
55886+
55887+ mode =
55888+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
55889+
55890+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
55891+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55892+ return mode;
55893+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
55894+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55895+ return 0;
55896+ } else if (unlikely(!(mode & GR_FIND)))
55897+ return 0;
55898+
55899+ return GR_FIND;
55900+}
55901+
55902+__u32
55903+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
6e9df6a3 55904+ int acc_mode)
58c5fc13
MT
55905+{
55906+ __u32 reqmode = GR_FIND;
55907+ __u32 mode;
55908+
55909+ if (unlikely(!dentry->d_inode))
55910+ return reqmode;
55911+
6e9df6a3 55912+ if (acc_mode & MAY_APPEND)
58c5fc13 55913+ reqmode |= GR_APPEND;
6e9df6a3 55914+ else if (acc_mode & MAY_WRITE)
58c5fc13 55915+ reqmode |= GR_WRITE;
6e9df6a3 55916+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
58c5fc13 55917+ reqmode |= GR_READ;
6e9df6a3 55918+
58c5fc13
MT
55919+ mode =
55920+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55921+ mnt);
55922+
55923+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55924+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55925+ reqmode & GR_READ ? " reading" : "",
55926+ reqmode & GR_WRITE ? " writing" : reqmode &
55927+ GR_APPEND ? " appending" : "");
55928+ return reqmode;
55929+ } else
55930+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55931+ {
55932+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55933+ reqmode & GR_READ ? " reading" : "",
55934+ reqmode & GR_WRITE ? " writing" : reqmode &
55935+ GR_APPEND ? " appending" : "");
55936+ return 0;
55937+ } else if (unlikely((mode & reqmode) != reqmode))
55938+ return 0;
55939+
55940+ return reqmode;
55941+}
55942+
55943+__u32
55944+gr_acl_handle_creat(const struct dentry * dentry,
55945+ const struct dentry * p_dentry,
6e9df6a3 55946+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
58c5fc13
MT
55947+ const int imode)
55948+{
55949+ __u32 reqmode = GR_WRITE | GR_CREATE;
55950+ __u32 mode;
55951+
6e9df6a3 55952+ if (acc_mode & MAY_APPEND)
58c5fc13 55953+ reqmode |= GR_APPEND;
6e9df6a3
MT
55954+ // if a directory was required or the directory already exists, then
55955+ // don't count this open as a read
55956+ if ((acc_mode & MAY_READ) &&
55957+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
58c5fc13 55958+ reqmode |= GR_READ;
6e9df6a3 55959+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
58c5fc13
MT
55960+ reqmode |= GR_SETID;
55961+
55962+ mode =
55963+ gr_check_create(dentry, p_dentry, p_mnt,
55964+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55965+
55966+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55967+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55968+ reqmode & GR_READ ? " reading" : "",
55969+ reqmode & GR_WRITE ? " writing" : reqmode &
55970+ GR_APPEND ? " appending" : "");
55971+ return reqmode;
55972+ } else
55973+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55974+ {
55975+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55976+ reqmode & GR_READ ? " reading" : "",
55977+ reqmode & GR_WRITE ? " writing" : reqmode &
55978+ GR_APPEND ? " appending" : "");
55979+ return 0;
55980+ } else if (unlikely((mode & reqmode) != reqmode))
55981+ return 0;
55982+
55983+ return reqmode;
55984+}
55985+
55986+__u32
55987+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
55988+ const int fmode)
55989+{
55990+ __u32 mode, reqmode = GR_FIND;
55991+
55992+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
55993+ reqmode |= GR_EXEC;
55994+ if (fmode & S_IWOTH)
55995+ reqmode |= GR_WRITE;
55996+ if (fmode & S_IROTH)
55997+ reqmode |= GR_READ;
55998+
55999+ mode =
56000+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
56001+ mnt);
56002+
56003+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
56004+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
56005+ reqmode & GR_READ ? " reading" : "",
56006+ reqmode & GR_WRITE ? " writing" : "",
56007+ reqmode & GR_EXEC ? " executing" : "");
56008+ return reqmode;
56009+ } else
56010+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
56011+ {
56012+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
56013+ reqmode & GR_READ ? " reading" : "",
56014+ reqmode & GR_WRITE ? " writing" : "",
56015+ reqmode & GR_EXEC ? " executing" : "");
56016+ return 0;
56017+ } else if (unlikely((mode & reqmode) != reqmode))
56018+ return 0;
56019+
56020+ return reqmode;
56021+}
56022+
56023+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
56024+{
56025+ __u32 mode;
56026+
56027+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
56028+
56029+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
56030+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
56031+ return mode;
56032+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
56033+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
56034+ return 0;
56035+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
56036+ return 0;
56037+
56038+ return (reqmode);
56039+}
56040+
56041+__u32
56042+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
56043+{
56044+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
56045+}
56046+
56047+__u32
56048+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
56049+{
56050+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
56051+}
56052+
56053+__u32
56054+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
56055+{
56056+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
56057+}
56058+
56059+__u32
56060+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
56061+{
56062+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
56063+}
56064+
56065+__u32
4c928ab7
MT
56066+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
56067+ umode_t *modeptr)
58c5fc13 56068+{
4c928ab7
MT
56069+ umode_t mode;
56070+
56071+ *modeptr &= ~gr_acl_umask();
56072+ mode = *modeptr;
56073+
58c5fc13
MT
56074+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
56075+ return 1;
56076+
4c928ab7 56077+ if (unlikely(mode & (S_ISUID | S_ISGID))) {
58c5fc13
MT
56078+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
56079+ GR_CHMOD_ACL_MSG);
56080+ } else {
56081+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
56082+ }
56083+}
56084+
56085+__u32
56086+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
56087+{
56088+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
56089+}
56090+
56091+__u32
bc901d79
MT
56092+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
56093+{
56094+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
56095+}
56096+
56097+__u32
58c5fc13
MT
56098+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
56099+{
56100+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
56101+}
56102+
56103+__u32
56104+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
56105+{
56106+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
56107+ GR_UNIXCONNECT_ACL_MSG);
56108+}
56109+
6e9df6a3 56110+/* hardlinks require at minimum create and link permission,
58c5fc13
MT
56111+ any additional privilege required is based on the
56112+ privilege of the file being linked to
56113+*/
56114+__u32
56115+gr_acl_handle_link(const struct dentry * new_dentry,
56116+ const struct dentry * parent_dentry,
56117+ const struct vfsmount * parent_mnt,
56118+ const struct dentry * old_dentry,
56119+ const struct vfsmount * old_mnt, const char *to)
56120+{
56121+ __u32 mode;
56122+ __u32 needmode = GR_CREATE | GR_LINK;
56123+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
56124+
56125+ mode =
56126+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
56127+ old_mnt);
56128+
56129+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
56130+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
56131+ return mode;
56132+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
56133+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
56134+ return 0;
56135+ } else if (unlikely((mode & needmode) != needmode))
56136+ return 0;
56137+
56138+ return 1;
56139+}
56140+
56141+__u32
56142+gr_acl_handle_symlink(const struct dentry * new_dentry,
56143+ const struct dentry * parent_dentry,
56144+ const struct vfsmount * parent_mnt, const char *from)
56145+{
56146+ __u32 needmode = GR_WRITE | GR_CREATE;
56147+ __u32 mode;
56148+
56149+ mode =
56150+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
56151+ GR_CREATE | GR_AUDIT_CREATE |
56152+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
56153+
56154+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
56155+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
56156+ return mode;
56157+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
56158+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
56159+ return 0;
56160+ } else if (unlikely((mode & needmode) != needmode))
56161+ return 0;
56162+
56163+ return (GR_WRITE | GR_CREATE);
56164+}
56165+
56166+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
56167+{
56168+ __u32 mode;
56169+
56170+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
56171+
56172+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
56173+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
56174+ return mode;
56175+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
56176+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
56177+ return 0;
56178+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
56179+ return 0;
56180+
56181+ return (reqmode);
56182+}
56183+
56184+__u32
56185+gr_acl_handle_mknod(const struct dentry * new_dentry,
56186+ const struct dentry * parent_dentry,
56187+ const struct vfsmount * parent_mnt,
56188+ const int mode)
56189+{
56190+ __u32 reqmode = GR_WRITE | GR_CREATE;
56191+ if (unlikely(mode & (S_ISUID | S_ISGID)))
56192+ reqmode |= GR_SETID;
56193+
56194+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
56195+ reqmode, GR_MKNOD_ACL_MSG);
56196+}
56197+
56198+__u32
56199+gr_acl_handle_mkdir(const struct dentry *new_dentry,
56200+ const struct dentry *parent_dentry,
56201+ const struct vfsmount *parent_mnt)
56202+{
56203+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
56204+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
56205+}
56206+
56207+#define RENAME_CHECK_SUCCESS(old, new) \
56208+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
56209+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
56210+
56211+int
56212+gr_acl_handle_rename(struct dentry *new_dentry,
56213+ struct dentry *parent_dentry,
56214+ const struct vfsmount *parent_mnt,
56215+ struct dentry *old_dentry,
56216+ struct inode *old_parent_inode,
56217+ struct vfsmount *old_mnt, const char *newname)
56218+{
56219+ __u32 comp1, comp2;
56220+ int error = 0;
56221+
56222+ if (unlikely(!gr_acl_is_enabled()))
56223+ return 0;
56224+
56225+ if (!new_dentry->d_inode) {
56226+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
56227+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
56228+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
56229+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
56230+ GR_DELETE | GR_AUDIT_DELETE |
56231+ GR_AUDIT_READ | GR_AUDIT_WRITE |
56232+ GR_SUPPRESS, old_mnt);
56233+ } else {
56234+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
56235+ GR_CREATE | GR_DELETE |
56236+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
56237+ GR_AUDIT_READ | GR_AUDIT_WRITE |
56238+ GR_SUPPRESS, parent_mnt);
56239+ comp2 =
56240+ gr_search_file(old_dentry,
56241+ GR_READ | GR_WRITE | GR_AUDIT_READ |
56242+ GR_DELETE | GR_AUDIT_DELETE |
56243+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
56244+ }
56245+
56246+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
56247+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
56248+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
56249+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
56250+ && !(comp2 & GR_SUPPRESS)) {
56251+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
56252+ error = -EACCES;
56253+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
56254+ error = -EACCES;
56255+
56256+ return error;
56257+}
56258+
56259+void
56260+gr_acl_handle_exit(void)
56261+{
56262+ u16 id;
56263+ char *rolename;
56264+ struct file *exec_file;
56265+
16454cff
MT
56266+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
56267+ !(current->role->roletype & GR_ROLE_PERSIST))) {
58c5fc13
MT
56268+ id = current->acl_role_id;
56269+ rolename = current->role->rolename;
56270+ gr_set_acls(1);
56271+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
56272+ }
56273+
56274+ write_lock(&grsec_exec_file_lock);
56275+ exec_file = current->exec_file;
56276+ current->exec_file = NULL;
56277+ write_unlock(&grsec_exec_file_lock);
56278+
56279+ if (exec_file)
56280+ fput(exec_file);
56281+}
56282+
56283+int
56284+gr_acl_handle_procpidmem(const struct task_struct *task)
56285+{
56286+ if (unlikely(!gr_acl_is_enabled()))
56287+ return 0;
56288+
56289+ if (task != current && task->acl->mode & GR_PROTPROCFD)
56290+ return -EACCES;
56291+
56292+ return 0;
56293+}
fe2de317
MT
56294diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
56295new file mode 100644
5e856224 56296index 0000000..58800a7
fe2de317
MT
56297--- /dev/null
56298+++ b/grsecurity/gracl_ip.c
5e856224 56299@@ -0,0 +1,384 @@
58c5fc13
MT
56300+#include <linux/kernel.h>
56301+#include <asm/uaccess.h>
56302+#include <asm/errno.h>
56303+#include <net/sock.h>
56304+#include <linux/file.h>
56305+#include <linux/fs.h>
56306+#include <linux/net.h>
56307+#include <linux/in.h>
56308+#include <linux/skbuff.h>
56309+#include <linux/ip.h>
56310+#include <linux/udp.h>
58c5fc13
MT
56311+#include <linux/types.h>
56312+#include <linux/sched.h>
56313+#include <linux/netdevice.h>
56314+#include <linux/inetdevice.h>
56315+#include <linux/gracl.h>
56316+#include <linux/grsecurity.h>
56317+#include <linux/grinternal.h>
56318+
56319+#define GR_BIND 0x01
56320+#define GR_CONNECT 0x02
56321+#define GR_INVERT 0x04
56322+#define GR_BINDOVERRIDE 0x08
56323+#define GR_CONNECTOVERRIDE 0x10
bc901d79 56324+#define GR_SOCK_FAMILY 0x20
58c5fc13 56325+
bc901d79 56326+static const char * gr_protocols[IPPROTO_MAX] = {
58c5fc13
MT
56327+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
56328+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
56329+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
56330+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
56331+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
56332+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
56333+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
56334+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
56335+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
56336+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
56337+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
56338+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
56339+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
56340+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
56341+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
56342+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
56343+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
56344+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
56345+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
56346+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
56347+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
56348+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
56349+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
56350+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
56351+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
56352+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
56353+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
56354+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
56355+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
56356+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
56357+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
56358+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
56359+ };
56360+
bc901d79 56361+static const char * gr_socktypes[SOCK_MAX] = {
58c5fc13
MT
56362+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
56363+ "unknown:7", "unknown:8", "unknown:9", "packet"
56364+ };
56365+
bc901d79
MT
56366+static const char * gr_sockfamilies[AF_MAX+1] = {
56367+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
56368+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
c52201e0
MT
56369+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
56370+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
bc901d79
MT
56371+ };
56372+
58c5fc13
MT
56373+const char *
56374+gr_proto_to_name(unsigned char proto)
56375+{
56376+ return gr_protocols[proto];
56377+}
56378+
56379+const char *
56380+gr_socktype_to_name(unsigned char type)
56381+{
56382+ return gr_socktypes[type];
56383+}
56384+
bc901d79
MT
56385+const char *
56386+gr_sockfamily_to_name(unsigned char family)
56387+{
56388+ return gr_sockfamilies[family];
56389+}
56390+
58c5fc13
MT
56391+int
56392+gr_search_socket(const int domain, const int type, const int protocol)
56393+{
56394+ struct acl_subject_label *curr;
56395+ const struct cred *cred = current_cred();
56396+
56397+ if (unlikely(!gr_acl_is_enabled()))
56398+ goto exit;
56399+
bc901d79
MT
56400+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
56401+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
58c5fc13
MT
56402+ goto exit; // let the kernel handle it
56403+
56404+ curr = current->acl;
56405+
bc901d79
MT
56406+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
56407+ /* the family is allowed, if this is PF_INET allow it only if
56408+ the extra sock type/protocol checks pass */
56409+ if (domain == PF_INET)
56410+ goto inet_check;
56411+ goto exit;
56412+ } else {
56413+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
56414+ __u32 fakeip = 0;
56415+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56416+ current->role->roletype, cred->uid,
56417+ cred->gid, current->exec_file ?
56418+ gr_to_filename(current->exec_file->f_path.dentry,
56419+ current->exec_file->f_path.mnt) :
56420+ curr->filename, curr->filename,
56421+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
56422+ &current->signal->saved_ip);
56423+ goto exit;
56424+ }
56425+ goto exit_fail;
56426+ }
56427+
56428+inet_check:
56429+ /* the rest of this checking is for IPv4 only */
58c5fc13
MT
56430+ if (!curr->ips)
56431+ goto exit;
56432+
56433+ if ((curr->ip_type & (1 << type)) &&
56434+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
56435+ goto exit;
56436+
56437+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
56438+ /* we don't place acls on raw sockets , and sometimes
56439+ dgram/ip sockets are opened for ioctl and not
56440+ bind/connect, so we'll fake a bind learn log */
56441+ if (type == SOCK_RAW || type == SOCK_PACKET) {
56442+ __u32 fakeip = 0;
56443+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56444+ current->role->roletype, cred->uid,
56445+ cred->gid, current->exec_file ?
56446+ gr_to_filename(current->exec_file->f_path.dentry,
56447+ current->exec_file->f_path.mnt) :
56448+ curr->filename, curr->filename,
ae4e228f 56449+ &fakeip, 0, type,
bc901d79 56450+ protocol, GR_CONNECT, &current->signal->saved_ip);
58c5fc13
MT
56451+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
56452+ __u32 fakeip = 0;
56453+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56454+ current->role->roletype, cred->uid,
56455+ cred->gid, current->exec_file ?
56456+ gr_to_filename(current->exec_file->f_path.dentry,
56457+ current->exec_file->f_path.mnt) :
56458+ curr->filename, curr->filename,
ae4e228f 56459+ &fakeip, 0, type,
bc901d79 56460+ protocol, GR_BIND, &current->signal->saved_ip);
58c5fc13
MT
56461+ }
56462+ /* we'll log when they use connect or bind */
56463+ goto exit;
56464+ }
56465+
bc901d79
MT
56466+exit_fail:
56467+ if (domain == PF_INET)
56468+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
56469+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
56470+ else
56471+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
56472+ gr_socktype_to_name(type), protocol);
58c5fc13
MT
56473+
56474+ return 0;
bc901d79 56475+exit:
58c5fc13
MT
56476+ return 1;
56477+}
56478+
56479+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
56480+{
56481+ if ((ip->mode & mode) &&
56482+ (ip_port >= ip->low) &&
56483+ (ip_port <= ip->high) &&
56484+ ((ntohl(ip_addr) & our_netmask) ==
56485+ (ntohl(our_addr) & our_netmask))
56486+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
56487+ && (ip->type & (1 << type))) {
56488+ if (ip->mode & GR_INVERT)
56489+ return 2; // specifically denied
56490+ else
56491+ return 1; // allowed
56492+ }
56493+
56494+ return 0; // not specifically allowed, may continue parsing
56495+}
56496+
56497+static int
56498+gr_search_connectbind(const int full_mode, struct sock *sk,
56499+ struct sockaddr_in *addr, const int type)
56500+{
56501+ char iface[IFNAMSIZ] = {0};
56502+ struct acl_subject_label *curr;
56503+ struct acl_ip_label *ip;
56504+ struct inet_sock *isk;
56505+ struct net_device *dev;
56506+ struct in_device *idev;
56507+ unsigned long i;
56508+ int ret;
56509+ int mode = full_mode & (GR_BIND | GR_CONNECT);
56510+ __u32 ip_addr = 0;
56511+ __u32 our_addr;
56512+ __u32 our_netmask;
56513+ char *p;
56514+ __u16 ip_port = 0;
56515+ const struct cred *cred = current_cred();
56516+
56517+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
56518+ return 0;
56519+
56520+ curr = current->acl;
56521+ isk = inet_sk(sk);
56522+
56523+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
56524+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
56525+ addr->sin_addr.s_addr = curr->inaddr_any_override;
ae4e228f 56526+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
58c5fc13
MT
56527+ struct sockaddr_in saddr;
56528+ int err;
56529+
56530+ saddr.sin_family = AF_INET;
56531+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
ae4e228f 56532+ saddr.sin_port = isk->inet_sport;
58c5fc13
MT
56533+
56534+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
56535+ if (err)
56536+ return err;
56537+
56538+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
56539+ if (err)
56540+ return err;
56541+ }
56542+
56543+ if (!curr->ips)
56544+ return 0;
56545+
56546+ ip_addr = addr->sin_addr.s_addr;
56547+ ip_port = ntohs(addr->sin_port);
56548+
56549+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
56550+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56551+ current->role->roletype, cred->uid,
56552+ cred->gid, current->exec_file ?
56553+ gr_to_filename(current->exec_file->f_path.dentry,
56554+ current->exec_file->f_path.mnt) :
56555+ curr->filename, curr->filename,
ae4e228f 56556+ &ip_addr, ip_port, type,
bc901d79 56557+ sk->sk_protocol, mode, &current->signal->saved_ip);
58c5fc13
MT
56558+ return 0;
56559+ }
56560+
56561+ for (i = 0; i < curr->ip_num; i++) {
56562+ ip = *(curr->ips + i);
56563+ if (ip->iface != NULL) {
56564+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
56565+ p = strchr(iface, ':');
56566+ if (p != NULL)
56567+ *p = '\0';
56568+ dev = dev_get_by_name(sock_net(sk), iface);
56569+ if (dev == NULL)
56570+ continue;
56571+ idev = in_dev_get(dev);
56572+ if (idev == NULL) {
56573+ dev_put(dev);
56574+ continue;
56575+ }
56576+ rcu_read_lock();
56577+ for_ifa(idev) {
56578+ if (!strcmp(ip->iface, ifa->ifa_label)) {
56579+ our_addr = ifa->ifa_address;
56580+ our_netmask = 0xffffffff;
56581+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
56582+ if (ret == 1) {
56583+ rcu_read_unlock();
56584+ in_dev_put(idev);
56585+ dev_put(dev);
56586+ return 0;
56587+ } else if (ret == 2) {
56588+ rcu_read_unlock();
56589+ in_dev_put(idev);
56590+ dev_put(dev);
56591+ goto denied;
56592+ }
56593+ }
56594+ } endfor_ifa(idev);
56595+ rcu_read_unlock();
56596+ in_dev_put(idev);
56597+ dev_put(dev);
56598+ } else {
56599+ our_addr = ip->addr;
56600+ our_netmask = ip->netmask;
56601+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
56602+ if (ret == 1)
56603+ return 0;
56604+ else if (ret == 2)
56605+ goto denied;
56606+ }
56607+ }
56608+
56609+denied:
56610+ if (mode == GR_BIND)
ae4e228f 56611+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
58c5fc13 56612+ else if (mode == GR_CONNECT)
ae4e228f 56613+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
58c5fc13
MT
56614+
56615+ return -EACCES;
56616+}
56617+
56618+int
56619+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
56620+{
5e856224
MT
56621+ /* always allow disconnection of dgram sockets with connect */
56622+ if (addr->sin_family == AF_UNSPEC)
56623+ return 0;
58c5fc13
MT
56624+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
56625+}
56626+
56627+int
56628+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
56629+{
56630+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
56631+}
56632+
56633+int gr_search_listen(struct socket *sock)
56634+{
56635+ struct sock *sk = sock->sk;
56636+ struct sockaddr_in addr;
56637+
ae4e228f
MT
56638+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
56639+ addr.sin_port = inet_sk(sk)->inet_sport;
58c5fc13
MT
56640+
56641+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
56642+}
56643+
56644+int gr_search_accept(struct socket *sock)
56645+{
56646+ struct sock *sk = sock->sk;
56647+ struct sockaddr_in addr;
56648+
ae4e228f
MT
56649+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
56650+ addr.sin_port = inet_sk(sk)->inet_sport;
58c5fc13
MT
56651+
56652+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
56653+}
56654+
56655+int
56656+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
56657+{
56658+ if (addr)
56659+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
56660+ else {
56661+ struct sockaddr_in sin;
56662+ const struct inet_sock *inet = inet_sk(sk);
56663+
ae4e228f
MT
56664+ sin.sin_addr.s_addr = inet->inet_daddr;
56665+ sin.sin_port = inet->inet_dport;
58c5fc13
MT
56666+
56667+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
56668+ }
56669+}
56670+
56671+int
56672+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
56673+{
56674+ struct sockaddr_in sin;
56675+
56676+ if (unlikely(skb->len < sizeof (struct udphdr)))
56677+ return 0; // skip this packet
56678+
56679+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
56680+ sin.sin_port = udp_hdr(skb)->source;
56681+
56682+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
56683+}
fe2de317
MT
56684diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
56685new file mode 100644
56686index 0000000..25f54ef
56687--- /dev/null
56688+++ b/grsecurity/gracl_learn.c
15a11c5b 56689@@ -0,0 +1,207 @@
58c5fc13
MT
56690+#include <linux/kernel.h>
56691+#include <linux/mm.h>
56692+#include <linux/sched.h>
56693+#include <linux/poll.h>
58c5fc13
MT
56694+#include <linux/string.h>
56695+#include <linux/file.h>
56696+#include <linux/types.h>
56697+#include <linux/vmalloc.h>
56698+#include <linux/grinternal.h>
56699+
56700+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
56701+ size_t count, loff_t *ppos);
56702+extern int gr_acl_is_enabled(void);
56703+
56704+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
56705+static int gr_learn_attached;
56706+
56707+/* use a 512k buffer */
56708+#define LEARN_BUFFER_SIZE (512 * 1024)
56709+
56710+static DEFINE_SPINLOCK(gr_learn_lock);
bc901d79 56711+static DEFINE_MUTEX(gr_learn_user_mutex);
58c5fc13
MT
56712+
56713+/* we need to maintain two buffers, so that the kernel context of grlearn
56714+ uses a semaphore around the userspace copying, and the other kernel contexts
56715+ use a spinlock when copying into the buffer, since they cannot sleep
56716+*/
56717+static char *learn_buffer;
56718+static char *learn_buffer_user;
56719+static int learn_buffer_len;
56720+static int learn_buffer_user_len;
56721+
56722+static ssize_t
56723+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
56724+{
56725+ DECLARE_WAITQUEUE(wait, current);
56726+ ssize_t retval = 0;
56727+
56728+ add_wait_queue(&learn_wait, &wait);
56729+ set_current_state(TASK_INTERRUPTIBLE);
56730+ do {
bc901d79 56731+ mutex_lock(&gr_learn_user_mutex);
58c5fc13
MT
56732+ spin_lock(&gr_learn_lock);
56733+ if (learn_buffer_len)
56734+ break;
56735+ spin_unlock(&gr_learn_lock);
bc901d79 56736+ mutex_unlock(&gr_learn_user_mutex);
58c5fc13
MT
56737+ if (file->f_flags & O_NONBLOCK) {
56738+ retval = -EAGAIN;
56739+ goto out;
56740+ }
56741+ if (signal_pending(current)) {
56742+ retval = -ERESTARTSYS;
56743+ goto out;
56744+ }
56745+
56746+ schedule();
56747+ } while (1);
56748+
56749+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
56750+ learn_buffer_user_len = learn_buffer_len;
56751+ retval = learn_buffer_len;
56752+ learn_buffer_len = 0;
56753+
56754+ spin_unlock(&gr_learn_lock);
56755+
56756+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
56757+ retval = -EFAULT;
56758+
bc901d79 56759+ mutex_unlock(&gr_learn_user_mutex);
58c5fc13
MT
56760+out:
56761+ set_current_state(TASK_RUNNING);
56762+ remove_wait_queue(&learn_wait, &wait);
56763+ return retval;
56764+}
56765+
56766+static unsigned int
56767+poll_learn(struct file * file, poll_table * wait)
56768+{
56769+ poll_wait(file, &learn_wait, wait);
56770+
56771+ if (learn_buffer_len)
56772+ return (POLLIN | POLLRDNORM);
56773+
56774+ return 0;
56775+}
56776+
56777+void
56778+gr_clear_learn_entries(void)
56779+{
56780+ char *tmp;
56781+
bc901d79 56782+ mutex_lock(&gr_learn_user_mutex);
15a11c5b
MT
56783+ spin_lock(&gr_learn_lock);
56784+ tmp = learn_buffer;
56785+ learn_buffer = NULL;
56786+ spin_unlock(&gr_learn_lock);
56787+ if (tmp)
56788+ vfree(tmp);
58c5fc13
MT
56789+ if (learn_buffer_user != NULL) {
56790+ vfree(learn_buffer_user);
56791+ learn_buffer_user = NULL;
56792+ }
56793+ learn_buffer_len = 0;
bc901d79 56794+ mutex_unlock(&gr_learn_user_mutex);
58c5fc13
MT
56795+
56796+ return;
56797+}
56798+
56799+void
56800+gr_add_learn_entry(const char *fmt, ...)
56801+{
56802+ va_list args;
56803+ unsigned int len;
56804+
56805+ if (!gr_learn_attached)
56806+ return;
56807+
56808+ spin_lock(&gr_learn_lock);
56809+
56810+ /* leave a gap at the end so we know when it's "full" but don't have to
56811+ compute the exact length of the string we're trying to append
56812+ */
56813+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
56814+ spin_unlock(&gr_learn_lock);
56815+ wake_up_interruptible(&learn_wait);
56816+ return;
56817+ }
56818+ if (learn_buffer == NULL) {
56819+ spin_unlock(&gr_learn_lock);
56820+ return;
56821+ }
56822+
56823+ va_start(args, fmt);
56824+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
56825+ va_end(args);
56826+
56827+ learn_buffer_len += len + 1;
56828+
56829+ spin_unlock(&gr_learn_lock);
56830+ wake_up_interruptible(&learn_wait);
56831+
56832+ return;
56833+}
56834+
56835+static int
56836+open_learn(struct inode *inode, struct file *file)
56837+{
56838+ if (file->f_mode & FMODE_READ && gr_learn_attached)
56839+ return -EBUSY;
56840+ if (file->f_mode & FMODE_READ) {
56841+ int retval = 0;
bc901d79 56842+ mutex_lock(&gr_learn_user_mutex);
58c5fc13
MT
56843+ if (learn_buffer == NULL)
56844+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
56845+ if (learn_buffer_user == NULL)
56846+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
56847+ if (learn_buffer == NULL) {
56848+ retval = -ENOMEM;
56849+ goto out_error;
56850+ }
56851+ if (learn_buffer_user == NULL) {
56852+ retval = -ENOMEM;
56853+ goto out_error;
56854+ }
56855+ learn_buffer_len = 0;
56856+ learn_buffer_user_len = 0;
56857+ gr_learn_attached = 1;
56858+out_error:
bc901d79 56859+ mutex_unlock(&gr_learn_user_mutex);
58c5fc13
MT
56860+ return retval;
56861+ }
56862+ return 0;
56863+}
56864+
56865+static int
56866+close_learn(struct inode *inode, struct file *file)
56867+{
58c5fc13 56868+ if (file->f_mode & FMODE_READ) {
15a11c5b 56869+ char *tmp = NULL;
bc901d79 56870+ mutex_lock(&gr_learn_user_mutex);
15a11c5b
MT
56871+ spin_lock(&gr_learn_lock);
56872+ tmp = learn_buffer;
56873+ learn_buffer = NULL;
56874+ spin_unlock(&gr_learn_lock);
56875+ if (tmp)
58c5fc13 56876+ vfree(tmp);
58c5fc13
MT
56877+ if (learn_buffer_user != NULL) {
56878+ vfree(learn_buffer_user);
56879+ learn_buffer_user = NULL;
56880+ }
56881+ learn_buffer_len = 0;
56882+ learn_buffer_user_len = 0;
56883+ gr_learn_attached = 0;
bc901d79 56884+ mutex_unlock(&gr_learn_user_mutex);
58c5fc13
MT
56885+ }
56886+
56887+ return 0;
56888+}
56889+
56890+const struct file_operations grsec_fops = {
56891+ .read = read_learn,
56892+ .write = write_grsec_handler,
56893+ .open = open_learn,
56894+ .release = close_learn,
56895+ .poll = poll_learn,
56896+};
fe2de317
MT
56897diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
56898new file mode 100644
56899index 0000000..39645c9
56900--- /dev/null
56901+++ b/grsecurity/gracl_res.c
df50ba0c 56902@@ -0,0 +1,68 @@
58c5fc13
MT
56903+#include <linux/kernel.h>
56904+#include <linux/sched.h>
56905+#include <linux/gracl.h>
56906+#include <linux/grinternal.h>
56907+
56908+static const char *restab_log[] = {
56909+ [RLIMIT_CPU] = "RLIMIT_CPU",
56910+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
56911+ [RLIMIT_DATA] = "RLIMIT_DATA",
56912+ [RLIMIT_STACK] = "RLIMIT_STACK",
56913+ [RLIMIT_CORE] = "RLIMIT_CORE",
56914+ [RLIMIT_RSS] = "RLIMIT_RSS",
56915+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
56916+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
56917+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
56918+ [RLIMIT_AS] = "RLIMIT_AS",
56919+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
56920+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
56921+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
56922+ [RLIMIT_NICE] = "RLIMIT_NICE",
56923+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
56924+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
56925+ [GR_CRASH_RES] = "RLIMIT_CRASH"
56926+};
56927+
56928+void
56929+gr_log_resource(const struct task_struct *task,
56930+ const int res, const unsigned long wanted, const int gt)
56931+{
ae4e228f 56932+ const struct cred *cred;
df50ba0c 56933+ unsigned long rlim;
58c5fc13
MT
56934+
56935+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
56936+ return;
56937+
56938+ // not yet supported resource
df50ba0c
MT
56939+ if (unlikely(!restab_log[res]))
56940+ return;
56941+
56942+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
56943+ rlim = task_rlimit_max(task, res);
56944+ else
56945+ rlim = task_rlimit(task, res);
56946+
56947+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
58c5fc13
MT
56948+ return;
56949+
ae4e228f
MT
56950+ rcu_read_lock();
56951+ cred = __task_cred(task);
56952+
56953+ if (res == RLIMIT_NPROC &&
56954+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
56955+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
56956+ goto out_rcu_unlock;
56957+ else if (res == RLIMIT_MEMLOCK &&
56958+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
56959+ goto out_rcu_unlock;
56960+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
56961+ goto out_rcu_unlock;
56962+ rcu_read_unlock();
56963+
df50ba0c 56964+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
58c5fc13
MT
56965+
56966+ return;
ae4e228f
MT
56967+out_rcu_unlock:
56968+ rcu_read_unlock();
56969+ return;
58c5fc13 56970+}
fe2de317
MT
56971diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
56972new file mode 100644
56973index 0000000..5556be3
56974--- /dev/null
56975+++ b/grsecurity/gracl_segv.c
66a7e928 56976@@ -0,0 +1,299 @@
58c5fc13
MT
56977+#include <linux/kernel.h>
56978+#include <linux/mm.h>
56979+#include <asm/uaccess.h>
56980+#include <asm/errno.h>
56981+#include <asm/mman.h>
56982+#include <net/sock.h>
56983+#include <linux/file.h>
56984+#include <linux/fs.h>
56985+#include <linux/net.h>
56986+#include <linux/in.h>
58c5fc13
MT
56987+#include <linux/slab.h>
56988+#include <linux/types.h>
56989+#include <linux/sched.h>
56990+#include <linux/timer.h>
56991+#include <linux/gracl.h>
56992+#include <linux/grsecurity.h>
56993+#include <linux/grinternal.h>
56994+
56995+static struct crash_uid *uid_set;
56996+static unsigned short uid_used;
56997+static DEFINE_SPINLOCK(gr_uid_lock);
56998+extern rwlock_t gr_inode_lock;
56999+extern struct acl_subject_label *
57000+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
57001+ struct acl_role_label *role);
16454cff
MT
57002+
57003+#ifdef CONFIG_BTRFS_FS
57004+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
57005+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
57006+#endif
57007+
57008+static inline dev_t __get_dev(const struct dentry *dentry)
57009+{
57010+#ifdef CONFIG_BTRFS_FS
57011+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
57012+ return get_btrfs_dev_from_inode(dentry->d_inode);
57013+ else
57014+#endif
57015+ return dentry->d_inode->i_sb->s_dev;
57016+}
57017+
58c5fc13
MT
57018+int
57019+gr_init_uidset(void)
57020+{
57021+ uid_set =
57022+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
57023+ uid_used = 0;
57024+
57025+ return uid_set ? 1 : 0;
57026+}
57027+
57028+void
57029+gr_free_uidset(void)
57030+{
57031+ if (uid_set)
57032+ kfree(uid_set);
57033+
57034+ return;
57035+}
57036+
57037+int
57038+gr_find_uid(const uid_t uid)
57039+{
57040+ struct crash_uid *tmp = uid_set;
57041+ uid_t buid;
57042+ int low = 0, high = uid_used - 1, mid;
57043+
57044+ while (high >= low) {
57045+ mid = (low + high) >> 1;
57046+ buid = tmp[mid].uid;
57047+ if (buid == uid)
57048+ return mid;
57049+ if (buid > uid)
57050+ high = mid - 1;
57051+ if (buid < uid)
57052+ low = mid + 1;
57053+ }
57054+
57055+ return -1;
57056+}
57057+
57058+static __inline__ void
57059+gr_insertsort(void)
57060+{
57061+ unsigned short i, j;
57062+ struct crash_uid index;
57063+
57064+ for (i = 1; i < uid_used; i++) {
57065+ index = uid_set[i];
57066+ j = i;
57067+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
57068+ uid_set[j] = uid_set[j - 1];
57069+ j--;
57070+ }
57071+ uid_set[j] = index;
57072+ }
57073+
57074+ return;
57075+}
57076+
57077+static __inline__ void
57078+gr_insert_uid(const uid_t uid, const unsigned long expires)
57079+{
57080+ int loc;
57081+
57082+ if (uid_used == GR_UIDTABLE_MAX)
57083+ return;
57084+
57085+ loc = gr_find_uid(uid);
57086+
57087+ if (loc >= 0) {
57088+ uid_set[loc].expires = expires;
57089+ return;
57090+ }
57091+
57092+ uid_set[uid_used].uid = uid;
57093+ uid_set[uid_used].expires = expires;
57094+ uid_used++;
57095+
57096+ gr_insertsort();
57097+
57098+ return;
57099+}
57100+
57101+void
57102+gr_remove_uid(const unsigned short loc)
57103+{
57104+ unsigned short i;
57105+
57106+ for (i = loc + 1; i < uid_used; i++)
57107+ uid_set[i - 1] = uid_set[i];
57108+
57109+ uid_used--;
57110+
57111+ return;
57112+}
57113+
57114+int
57115+gr_check_crash_uid(const uid_t uid)
57116+{
57117+ int loc;
57118+ int ret = 0;
57119+
57120+ if (unlikely(!gr_acl_is_enabled()))
57121+ return 0;
57122+
57123+ spin_lock(&gr_uid_lock);
57124+ loc = gr_find_uid(uid);
57125+
57126+ if (loc < 0)
57127+ goto out_unlock;
57128+
57129+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
57130+ gr_remove_uid(loc);
57131+ else
57132+ ret = 1;
57133+
57134+out_unlock:
57135+ spin_unlock(&gr_uid_lock);
57136+ return ret;
57137+}
57138+
57139+static __inline__ int
57140+proc_is_setxid(const struct cred *cred)
57141+{
57142+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
57143+ cred->uid != cred->fsuid)
57144+ return 1;
57145+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
57146+ cred->gid != cred->fsgid)
57147+ return 1;
57148+
57149+ return 0;
57150+}
58c5fc13 57151+
71d190be 57152+extern int gr_fake_force_sig(int sig, struct task_struct *t);
58c5fc13
MT
57153+
57154+void
57155+gr_handle_crash(struct task_struct *task, const int sig)
57156+{
57157+ struct acl_subject_label *curr;
58c5fc13 57158+ struct task_struct *tsk, *tsk2;
ae4e228f 57159+ const struct cred *cred;
58c5fc13
MT
57160+ const struct cred *cred2;
57161+
57162+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
57163+ return;
57164+
57165+ if (unlikely(!gr_acl_is_enabled()))
57166+ return;
57167+
57168+ curr = task->acl;
57169+
57170+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
57171+ return;
57172+
57173+ if (time_before_eq(curr->expires, get_seconds())) {
57174+ curr->expires = 0;
57175+ curr->crashes = 0;
57176+ }
57177+
57178+ curr->crashes++;
57179+
57180+ if (!curr->expires)
57181+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
57182+
57183+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
57184+ time_after(curr->expires, get_seconds())) {
ae4e228f
MT
57185+ rcu_read_lock();
57186+ cred = __task_cred(task);
58c5fc13
MT
57187+ if (cred->uid && proc_is_setxid(cred)) {
57188+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
57189+ spin_lock(&gr_uid_lock);
57190+ gr_insert_uid(cred->uid, curr->expires);
57191+ spin_unlock(&gr_uid_lock);
57192+ curr->expires = 0;
57193+ curr->crashes = 0;
57194+ read_lock(&tasklist_lock);
57195+ do_each_thread(tsk2, tsk) {
57196+ cred2 = __task_cred(tsk);
57197+ if (tsk != task && cred2->uid == cred->uid)
57198+ gr_fake_force_sig(SIGKILL, tsk);
57199+ } while_each_thread(tsk2, tsk);
57200+ read_unlock(&tasklist_lock);
57201+ } else {
57202+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
57203+ read_lock(&tasklist_lock);
6e9df6a3 57204+ read_lock(&grsec_exec_file_lock);
58c5fc13
MT
57205+ do_each_thread(tsk2, tsk) {
57206+ if (likely(tsk != task)) {
6e9df6a3
MT
57207+ // if this thread has the same subject as the one that triggered
57208+ // RES_CRASH and it's the same binary, kill it
57209+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
58c5fc13
MT
57210+ gr_fake_force_sig(SIGKILL, tsk);
57211+ }
57212+ } while_each_thread(tsk2, tsk);
6e9df6a3 57213+ read_unlock(&grsec_exec_file_lock);
58c5fc13
MT
57214+ read_unlock(&tasklist_lock);
57215+ }
ae4e228f 57216+ rcu_read_unlock();
58c5fc13
MT
57217+ }
57218+
57219+ return;
57220+}
57221+
57222+int
57223+gr_check_crash_exec(const struct file *filp)
57224+{
57225+ struct acl_subject_label *curr;
57226+
57227+ if (unlikely(!gr_acl_is_enabled()))
57228+ return 0;
57229+
57230+ read_lock(&gr_inode_lock);
57231+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
16454cff 57232+ __get_dev(filp->f_path.dentry),
58c5fc13
MT
57233+ current->role);
57234+ read_unlock(&gr_inode_lock);
57235+
57236+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
57237+ (!curr->crashes && !curr->expires))
57238+ return 0;
57239+
57240+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
57241+ time_after(curr->expires, get_seconds()))
57242+ return 1;
57243+ else if (time_before_eq(curr->expires, get_seconds())) {
57244+ curr->crashes = 0;
57245+ curr->expires = 0;
57246+ }
57247+
57248+ return 0;
57249+}
57250+
57251+void
57252+gr_handle_alertkill(struct task_struct *task)
57253+{
57254+ struct acl_subject_label *curracl;
57255+ __u32 curr_ip;
57256+ struct task_struct *p, *p2;
57257+
57258+ if (unlikely(!gr_acl_is_enabled()))
57259+ return;
57260+
57261+ curracl = task->acl;
57262+ curr_ip = task->signal->curr_ip;
57263+
57264+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
57265+ read_lock(&tasklist_lock);
57266+ do_each_thread(p2, p) {
57267+ if (p->signal->curr_ip == curr_ip)
57268+ gr_fake_force_sig(SIGKILL, p);
57269+ } while_each_thread(p2, p);
57270+ read_unlock(&tasklist_lock);
57271+ } else if (curracl->mode & GR_KILLPROC)
57272+ gr_fake_force_sig(SIGKILL, task);
57273+
57274+ return;
57275+}
fe2de317
MT
57276diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
57277new file mode 100644
57278index 0000000..9d83a69
57279--- /dev/null
57280+++ b/grsecurity/gracl_shm.c
df50ba0c 57281@@ -0,0 +1,40 @@
58c5fc13
MT
57282+#include <linux/kernel.h>
57283+#include <linux/mm.h>
57284+#include <linux/sched.h>
57285+#include <linux/file.h>
57286+#include <linux/ipc.h>
57287+#include <linux/gracl.h>
57288+#include <linux/grsecurity.h>
57289+#include <linux/grinternal.h>
57290+
57291+int
57292+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57293+ const time_t shm_createtime, const uid_t cuid, const int shmid)
57294+{
57295+ struct task_struct *task;
57296+
57297+ if (!gr_acl_is_enabled())
57298+ return 1;
57299+
df50ba0c 57300+ rcu_read_lock();
58c5fc13
MT
57301+ read_lock(&tasklist_lock);
57302+
57303+ task = find_task_by_vpid(shm_cprid);
57304+
57305+ if (unlikely(!task))
57306+ task = find_task_by_vpid(shm_lapid);
57307+
57308+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
57309+ (task->pid == shm_lapid)) &&
57310+ (task->acl->mode & GR_PROTSHM) &&
57311+ (task->acl != current->acl))) {
57312+ read_unlock(&tasklist_lock);
df50ba0c 57313+ rcu_read_unlock();
58c5fc13
MT
57314+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
57315+ return 0;
57316+ }
57317+ read_unlock(&tasklist_lock);
df50ba0c 57318+ rcu_read_unlock();
58c5fc13
MT
57319+
57320+ return 1;
57321+}
fe2de317
MT
57322diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
57323new file mode 100644
57324index 0000000..bc0be01
57325--- /dev/null
57326+++ b/grsecurity/grsec_chdir.c
58c5fc13
MT
57327@@ -0,0 +1,19 @@
57328+#include <linux/kernel.h>
57329+#include <linux/sched.h>
57330+#include <linux/fs.h>
57331+#include <linux/file.h>
57332+#include <linux/grsecurity.h>
57333+#include <linux/grinternal.h>
57334+
57335+void
57336+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
57337+{
57338+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57339+ if ((grsec_enable_chdir && grsec_enable_group &&
57340+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
57341+ !grsec_enable_group)) {
57342+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
57343+ }
57344+#endif
57345+ return;
57346+}
fe2de317
MT
57347diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
57348new file mode 100644
5e856224 57349index 0000000..9807ee2
fe2de317
MT
57350--- /dev/null
57351+++ b/grsecurity/grsec_chroot.c
5e856224 57352@@ -0,0 +1,368 @@
58c5fc13
MT
57353+#include <linux/kernel.h>
57354+#include <linux/module.h>
57355+#include <linux/sched.h>
57356+#include <linux/file.h>
57357+#include <linux/fs.h>
57358+#include <linux/mount.h>
57359+#include <linux/types.h>
5e856224 57360+#include "../fs/mount.h"
58c5fc13
MT
57361+#include <linux/grsecurity.h>
57362+#include <linux/grinternal.h>
57363+
df50ba0c
MT
57364+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
57365+{
57366+#ifdef CONFIG_GRKERNSEC
57367+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
5e856224 57368+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
df50ba0c
MT
57369+ task->gr_is_chrooted = 1;
57370+ else
57371+ task->gr_is_chrooted = 0;
57372+
57373+ task->gr_chroot_dentry = path->dentry;
57374+#endif
57375+ return;
57376+}
57377+
57378+void gr_clear_chroot_entries(struct task_struct *task)
57379+{
57380+#ifdef CONFIG_GRKERNSEC
57381+ task->gr_is_chrooted = 0;
57382+ task->gr_chroot_dentry = NULL;
57383+#endif
57384+ return;
57385+}
57386+
58c5fc13 57387+int
15a11c5b 57388+gr_handle_chroot_unix(const pid_t pid)
58c5fc13
MT
57389+{
57390+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
6892158b 57391+ struct task_struct *p;
58c5fc13
MT
57392+
57393+ if (unlikely(!grsec_enable_chroot_unix))
57394+ return 1;
57395+
57396+ if (likely(!proc_is_chrooted(current)))
57397+ return 1;
57398+
df50ba0c 57399+ rcu_read_lock();
58c5fc13 57400+ read_lock(&tasklist_lock);
15a11c5b 57401+ p = find_task_by_vpid_unrestricted(pid);
71d190be 57402+ if (unlikely(p && !have_same_root(current, p))) {
6892158b
MT
57403+ read_unlock(&tasklist_lock);
57404+ rcu_read_unlock();
57405+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
57406+ return 0;
58c5fc13
MT
57407+ }
57408+ read_unlock(&tasklist_lock);
df50ba0c 57409+ rcu_read_unlock();
58c5fc13
MT
57410+#endif
57411+ return 1;
57412+}
57413+
57414+int
57415+gr_handle_chroot_nice(void)
57416+{
57417+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57418+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
57419+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
57420+ return -EPERM;
57421+ }
57422+#endif
57423+ return 0;
57424+}
57425+
57426+int
57427+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
57428+{
57429+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57430+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
57431+ && proc_is_chrooted(current)) {
57432+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
57433+ return -EACCES;
57434+ }
57435+#endif
57436+ return 0;
57437+}
57438+
57439+int
57440+gr_handle_chroot_rawio(const struct inode *inode)
57441+{
57442+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57443+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
57444+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
57445+ return 1;
57446+#endif
57447+ return 0;
57448+}
57449+
57450+int
57199397
MT
57451+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
57452+{
57453+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57454+ struct task_struct *p;
57455+ int ret = 0;
57456+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
57457+ return ret;
57458+
57459+ read_lock(&tasklist_lock);
57460+ do_each_pid_task(pid, type, p) {
57461+ if (!have_same_root(current, p)) {
57462+ ret = 1;
57463+ goto out;
57464+ }
57465+ } while_each_pid_task(pid, type, p);
57466+out:
57467+ read_unlock(&tasklist_lock);
57468+ return ret;
57469+#endif
57470+ return 0;
57471+}
57472+
57473+int
58c5fc13
MT
57474+gr_pid_is_chrooted(struct task_struct *p)
57475+{
57476+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57477+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
57478+ return 0;
57479+
58c5fc13
MT
57480+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
57481+ !have_same_root(current, p)) {
58c5fc13
MT
57482+ return 1;
57483+ }
58c5fc13
MT
57484+#endif
57485+ return 0;
57486+}
57487+
57488+EXPORT_SYMBOL(gr_pid_is_chrooted);
57489+
57490+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
57491+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
57492+{
16454cff
MT
57493+ struct path path, currentroot;
57494+ int ret = 0;
58c5fc13 57495+
16454cff
MT
57496+ path.dentry = (struct dentry *)u_dentry;
57497+ path.mnt = (struct vfsmount *)u_mnt;
6892158b 57498+ get_fs_root(current->fs, &currentroot);
16454cff
MT
57499+ if (path_is_under(&path, &currentroot))
57500+ ret = 1;
6892158b 57501+ path_put(&currentroot);
58c5fc13 57502+
58c5fc13
MT
57503+ return ret;
57504+}
57505+#endif
57506+
57507+int
57508+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
57509+{
57510+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57511+ if (!grsec_enable_chroot_fchdir)
57512+ return 1;
57513+
57514+ if (!proc_is_chrooted(current))
57515+ return 1;
57516+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
57517+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
57518+ return 0;
57519+ }
57520+#endif
57521+ return 1;
57522+}
57523+
57524+int
57525+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57526+ const time_t shm_createtime)
57527+{
57528+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
15a11c5b 57529+ struct task_struct *p;
58c5fc13
MT
57530+ time_t starttime;
57531+
57532+ if (unlikely(!grsec_enable_chroot_shmat))
57533+ return 1;
57534+
57535+ if (likely(!proc_is_chrooted(current)))
57536+ return 1;
57537+
df50ba0c 57538+ rcu_read_lock();
58c5fc13
MT
57539+ read_lock(&tasklist_lock);
57540+
15a11c5b 57541+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
58c5fc13 57542+ starttime = p->start_time.tv_sec;
15a11c5b
MT
57543+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
57544+ if (have_same_root(current, p)) {
57545+ goto allow;
57546+ } else {
58c5fc13 57547+ read_unlock(&tasklist_lock);
df50ba0c 57548+ rcu_read_unlock();
58c5fc13
MT
57549+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
57550+ return 0;
57551+ }
58c5fc13 57552+ }
15a11c5b 57553+ /* creator exited, pid reuse, fall through to next check */
58c5fc13 57554+ }
15a11c5b
MT
57555+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
57556+ if (unlikely(!have_same_root(current, p))) {
57557+ read_unlock(&tasklist_lock);
57558+ rcu_read_unlock();
57559+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
57560+ return 0;
57561+ }
57562+ }
57563+
57564+allow:
58c5fc13 57565+ read_unlock(&tasklist_lock);
df50ba0c 57566+ rcu_read_unlock();
58c5fc13
MT
57567+#endif
57568+ return 1;
57569+}
57570+
57571+void
57572+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
57573+{
57574+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57575+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
57576+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
57577+#endif
57578+ return;
57579+}
57580+
57581+int
57582+gr_handle_chroot_mknod(const struct dentry *dentry,
57583+ const struct vfsmount *mnt, const int mode)
57584+{
57585+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57586+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
57587+ proc_is_chrooted(current)) {
57588+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
57589+ return -EPERM;
57590+ }
57591+#endif
57592+ return 0;
57593+}
57594+
57595+int
57596+gr_handle_chroot_mount(const struct dentry *dentry,
57597+ const struct vfsmount *mnt, const char *dev_name)
57598+{
57599+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57600+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
15a11c5b 57601+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
58c5fc13
MT
57602+ return -EPERM;
57603+ }
57604+#endif
57605+ return 0;
57606+}
57607+
57608+int
57609+gr_handle_chroot_pivot(void)
57610+{
57611+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57612+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
57613+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
57614+ return -EPERM;
57615+ }
57616+#endif
57617+ return 0;
57618+}
57619+
57620+int
57621+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
57622+{
57623+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57624+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
57625+ !gr_is_outside_chroot(dentry, mnt)) {
57626+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
57627+ return -EPERM;
57628+ }
57629+#endif
57630+ return 0;
57631+}
57632+
15a11c5b
MT
57633+extern const char *captab_log[];
57634+extern int captab_log_entries;
57635+
58c5fc13 57636+int
5e856224 57637+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
58c5fc13
MT
57638+{
57639+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
5e856224 57640+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
58c5fc13 57641+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
15a11c5b 57642+ if (cap_raised(chroot_caps, cap)) {
5e856224
MT
57643+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
57644+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
15a11c5b
MT
57645+ }
57646+ return 0;
57647+ }
57648+ }
57649+#endif
57650+ return 1;
57651+}
58c5fc13 57652+
15a11c5b 57653+int
5e856224 57654+gr_chroot_is_capable(const int cap)
15a11c5b
MT
57655+{
57656+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
5e856224
MT
57657+ return gr_task_chroot_is_capable(current, current_cred(), cap);
57658+#endif
57659+ return 1;
57660+}
57661+
57662+int
57663+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
57664+{
57665+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57666+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
15a11c5b
MT
57667+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
57668+ if (cap_raised(chroot_caps, cap)) {
57669+ return 0;
57670+ }
58c5fc13
MT
57671+ }
57672+#endif
15a11c5b 57673+ return 1;
58c5fc13
MT
57674+}
57675+
57676+int
5e856224
MT
57677+gr_chroot_is_capable_nolog(const int cap)
57678+{
57679+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57680+ return gr_task_chroot_is_capable_nolog(current, cap);
57681+#endif
57682+ return 1;
57683+}
57684+
57685+int
58c5fc13
MT
57686+gr_handle_chroot_sysctl(const int op)
57687+{
57688+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
ae4e228f
MT
57689+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
57690+ proc_is_chrooted(current))
58c5fc13
MT
57691+ return -EACCES;
57692+#endif
57693+ return 0;
57694+}
57695+
57696+void
57697+gr_handle_chroot_chdir(struct path *path)
57698+{
57699+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57700+ if (grsec_enable_chroot_chdir)
57701+ set_fs_pwd(current->fs, path);
57702+#endif
57703+ return;
57704+}
57705+
57706+int
57707+gr_handle_chroot_chmod(const struct dentry *dentry,
57708+ const struct vfsmount *mnt, const int mode)
57709+{
57710+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
bc901d79
MT
57711+ /* allow chmod +s on directories, but not files */
57712+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
58c5fc13
MT
57713+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
57714+ proc_is_chrooted(current)) {
57715+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
57716+ return -EPERM;
57717+ }
57718+#endif
57719+ return 0;
57720+}
fe2de317
MT
57721diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
57722new file mode 100644
4c928ab7 57723index 0000000..213ad8b
fe2de317
MT
57724--- /dev/null
57725+++ b/grsecurity/grsec_disabled.c
4c928ab7 57726@@ -0,0 +1,437 @@
58c5fc13
MT
57727+#include <linux/kernel.h>
57728+#include <linux/module.h>
57729+#include <linux/sched.h>
57730+#include <linux/file.h>
57731+#include <linux/fs.h>
57732+#include <linux/kdev_t.h>
57733+#include <linux/net.h>
57734+#include <linux/in.h>
57735+#include <linux/ip.h>
57736+#include <linux/skbuff.h>
57737+#include <linux/sysctl.h>
57738+
57739+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
57740+void
57741+pax_set_initial_flags(struct linux_binprm *bprm)
57742+{
57743+ return;
57744+}
57745+#endif
57746+
57747+#ifdef CONFIG_SYSCTL
57748+__u32
57749+gr_handle_sysctl(const struct ctl_table * table, const int op)
57750+{
57751+ return 0;
57752+}
57753+#endif
57754+
57755+#ifdef CONFIG_TASKSTATS
57756+int gr_is_taskstats_denied(int pid)
57757+{
57758+ return 0;
57759+}
57760+#endif
57761+
57762+int
57763+gr_acl_is_enabled(void)
57764+{
57765+ return 0;
57766+}
57767+
6e9df6a3
MT
57768+void
57769+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
57770+{
57771+ return;
57772+}
57773+
58c5fc13
MT
57774+int
57775+gr_handle_rawio(const struct inode *inode)
57776+{
57777+ return 0;
57778+}
57779+
57780+void
57781+gr_acl_handle_psacct(struct task_struct *task, const long code)
57782+{
57783+ return;
57784+}
57785+
57786+int
57787+gr_handle_ptrace(struct task_struct *task, const long request)
57788+{
57789+ return 0;
57790+}
57791+
57792+int
57793+gr_handle_proc_ptrace(struct task_struct *task)
57794+{
57795+ return 0;
57796+}
57797+
57798+void
57799+gr_learn_resource(const struct task_struct *task,
57800+ const int res, const unsigned long wanted, const int gt)
57801+{
57802+ return;
57803+}
57804+
57805+int
57806+gr_set_acls(const int type)
57807+{
57808+ return 0;
57809+}
57810+
57811+int
57812+gr_check_hidden_task(const struct task_struct *tsk)
57813+{
57814+ return 0;
57815+}
57816+
57817+int
57818+gr_check_protected_task(const struct task_struct *task)
57819+{
57820+ return 0;
57821+}
57822+
57199397
MT
57823+int
57824+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
57825+{
57826+ return 0;
57827+}
57828+
58c5fc13
MT
57829+void
57830+gr_copy_label(struct task_struct *tsk)
57831+{
57832+ return;
57833+}
57834+
57835+void
57836+gr_set_pax_flags(struct task_struct *task)
57837+{
57838+ return;
57839+}
57840+
57841+int
57842+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
57843+ const int unsafe_share)
57844+{
57845+ return 0;
57846+}
57847+
57848+void
57849+gr_handle_delete(const ino_t ino, const dev_t dev)
57850+{
57851+ return;
57852+}
57853+
57854+void
57855+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
57856+{
57857+ return;
57858+}
57859+
57860+void
57861+gr_handle_crash(struct task_struct *task, const int sig)
57862+{
57863+ return;
57864+}
57865+
57866+int
57867+gr_check_crash_exec(const struct file *filp)
57868+{
57869+ return 0;
57870+}
57871+
57872+int
57873+gr_check_crash_uid(const uid_t uid)
57874+{
57875+ return 0;
57876+}
57877+
57878+void
57879+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
57880+ struct dentry *old_dentry,
57881+ struct dentry *new_dentry,
57882+ struct vfsmount *mnt, const __u8 replace)
57883+{
57884+ return;
57885+}
57886+
57887+int
57888+gr_search_socket(const int family, const int type, const int protocol)
57889+{
57890+ return 1;
57891+}
57892+
57893+int
57894+gr_search_connectbind(const int mode, const struct socket *sock,
57895+ const struct sockaddr_in *addr)
57896+{
57897+ return 0;
57898+}
57899+
58c5fc13
MT
57900+void
57901+gr_handle_alertkill(struct task_struct *task)
57902+{
57903+ return;
57904+}
57905+
57906+__u32
57907+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
57908+{
57909+ return 1;
57910+}
57911+
57912+__u32
57913+gr_acl_handle_hidden_file(const struct dentry * dentry,
57914+ const struct vfsmount * mnt)
57915+{
57916+ return 1;
57917+}
57918+
57919+__u32
57920+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
6e9df6a3 57921+ int acc_mode)
58c5fc13
MT
57922+{
57923+ return 1;
57924+}
57925+
57926+__u32
57927+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
57928+{
57929+ return 1;
57930+}
57931+
57932+__u32
57933+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
57934+{
57935+ return 1;
57936+}
57937+
57938+int
57939+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
57940+ unsigned int *vm_flags)
57941+{
57942+ return 1;
57943+}
57944+
57945+__u32
57946+gr_acl_handle_truncate(const struct dentry * dentry,
57947+ const struct vfsmount * mnt)
57948+{
57949+ return 1;
57950+}
57951+
57952+__u32
57953+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
57954+{
57955+ return 1;
57956+}
57957+
57958+__u32
57959+gr_acl_handle_access(const struct dentry * dentry,
57960+ const struct vfsmount * mnt, const int fmode)
57961+{
57962+ return 1;
57963+}
57964+
57965+__u32
58c5fc13 57966+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
4c928ab7 57967+ umode_t *mode)
58c5fc13
MT
57968+{
57969+ return 1;
57970+}
57971+
57972+__u32
57973+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
57974+{
57975+ return 1;
57976+}
57977+
bc901d79
MT
57978+__u32
57979+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
57980+{
57981+ return 1;
57982+}
57983+
58c5fc13
MT
57984+void
57985+grsecurity_init(void)
57986+{
57987+ return;
57988+}
57989+
4c928ab7
MT
57990+umode_t gr_acl_umask(void)
57991+{
57992+ return 0;
57993+}
57994+
58c5fc13
MT
57995+__u32
57996+gr_acl_handle_mknod(const struct dentry * new_dentry,
57997+ const struct dentry * parent_dentry,
57998+ const struct vfsmount * parent_mnt,
57999+ const int mode)
58000+{
58001+ return 1;
58002+}
58003+
58004+__u32
58005+gr_acl_handle_mkdir(const struct dentry * new_dentry,
58006+ const struct dentry * parent_dentry,
58007+ const struct vfsmount * parent_mnt)
58008+{
58009+ return 1;
58010+}
58011+
58012+__u32
58013+gr_acl_handle_symlink(const struct dentry * new_dentry,
58014+ const struct dentry * parent_dentry,
58015+ const struct vfsmount * parent_mnt, const char *from)
58016+{
58017+ return 1;
58018+}
58019+
58020+__u32
58021+gr_acl_handle_link(const struct dentry * new_dentry,
58022+ const struct dentry * parent_dentry,
58023+ const struct vfsmount * parent_mnt,
58024+ const struct dentry * old_dentry,
58025+ const struct vfsmount * old_mnt, const char *to)
58026+{
58027+ return 1;
58028+}
58029+
58030+int
58031+gr_acl_handle_rename(const struct dentry *new_dentry,
58032+ const struct dentry *parent_dentry,
58033+ const struct vfsmount *parent_mnt,
58034+ const struct dentry *old_dentry,
58035+ const struct inode *old_parent_inode,
58036+ const struct vfsmount *old_mnt, const char *newname)
58037+{
58038+ return 0;
58039+}
58040+
58041+int
58042+gr_acl_handle_filldir(const struct file *file, const char *name,
58043+ const int namelen, const ino_t ino)
58044+{
58045+ return 1;
58046+}
58047+
58048+int
58049+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58050+ const time_t shm_createtime, const uid_t cuid, const int shmid)
58051+{
58052+ return 1;
58053+}
58054+
58055+int
58056+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
58057+{
58058+ return 0;
58059+}
58060+
58061+int
58062+gr_search_accept(const struct socket *sock)
58063+{
58064+ return 0;
58065+}
58066+
58067+int
58068+gr_search_listen(const struct socket *sock)
58069+{
58070+ return 0;
58071+}
58072+
58073+int
58074+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
58075+{
58076+ return 0;
58077+}
58078+
58079+__u32
58080+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
58081+{
58082+ return 1;
58083+}
58084+
58085+__u32
58086+gr_acl_handle_creat(const struct dentry * dentry,
58087+ const struct dentry * p_dentry,
6e9df6a3 58088+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
58c5fc13
MT
58089+ const int imode)
58090+{
58091+ return 1;
58092+}
58093+
58094+void
58095+gr_acl_handle_exit(void)
58096+{
58097+ return;
58098+}
58099+
58100+int
58101+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
58102+{
58103+ return 1;
58104+}
58105+
58106+void
58107+gr_set_role_label(const uid_t uid, const gid_t gid)
58108+{
58109+ return;
58110+}
58111+
58112+int
58113+gr_acl_handle_procpidmem(const struct task_struct *task)
58114+{
58115+ return 0;
58116+}
58117+
58118+int
58119+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
58120+{
58121+ return 0;
58122+}
58123+
58124+int
58125+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
58126+{
58127+ return 0;
58128+}
58129+
58130+void
58131+gr_set_kernel_label(struct task_struct *task)
58132+{
58133+ return;
58134+}
58135+
58136+int
58137+gr_check_user_change(int real, int effective, int fs)
58138+{
58139+ return 0;
58140+}
58141+
58142+int
58143+gr_check_group_change(int real, int effective, int fs)
58144+{
58145+ return 0;
58146+}
58147+
bc901d79
MT
58148+int gr_acl_enable_at_secure(void)
58149+{
58150+ return 0;
58151+}
58152+
16454cff
MT
58153+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
58154+{
58155+ return dentry->d_inode->i_sb->s_dev;
58156+}
58157+
58c5fc13
MT
58158+EXPORT_SYMBOL(gr_learn_resource);
58159+EXPORT_SYMBOL(gr_set_kernel_label);
58160+#ifdef CONFIG_SECURITY
58161+EXPORT_SYMBOL(gr_check_user_change);
58162+EXPORT_SYMBOL(gr_check_group_change);
58163+#endif
fe2de317
MT
58164diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
58165new file mode 100644
5e856224 58166index 0000000..abfa971
fe2de317
MT
58167--- /dev/null
58168+++ b/grsecurity/grsec_exec.c
5e856224 58169@@ -0,0 +1,174 @@
58c5fc13
MT
58170+#include <linux/kernel.h>
58171+#include <linux/sched.h>
58172+#include <linux/file.h>
58173+#include <linux/binfmts.h>
58c5fc13
MT
58174+#include <linux/fs.h>
58175+#include <linux/types.h>
58176+#include <linux/grdefs.h>
15a11c5b 58177+#include <linux/grsecurity.h>
58c5fc13
MT
58178+#include <linux/grinternal.h>
58179+#include <linux/capability.h>
15a11c5b 58180+#include <linux/module.h>
58c5fc13
MT
58181+
58182+#include <asm/uaccess.h>
58183+
58184+#ifdef CONFIG_GRKERNSEC_EXECLOG
58185+static char gr_exec_arg_buf[132];
bc901d79 58186+static DEFINE_MUTEX(gr_exec_arg_mutex);
58c5fc13
MT
58187+#endif
58188+
15a11c5b 58189+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
58c5fc13
MT
58190+
58191+void
15a11c5b 58192+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
58c5fc13
MT
58193+{
58194+#ifdef CONFIG_GRKERNSEC_EXECLOG
58195+ char *grarg = gr_exec_arg_buf;
58196+ unsigned int i, x, execlen = 0;
58197+ char c;
58198+
58199+ if (!((grsec_enable_execlog && grsec_enable_group &&
58200+ in_group_p(grsec_audit_gid))
58201+ || (grsec_enable_execlog && !grsec_enable_group)))
58202+ return;
58203+
bc901d79 58204+ mutex_lock(&gr_exec_arg_mutex);
58c5fc13
MT
58205+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
58206+
58c5fc13
MT
58207+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
58208+ const char __user *p;
58209+ unsigned int len;
58210+
15a11c5b
MT
58211+ p = get_user_arg_ptr(argv, i);
58212+ if (IS_ERR(p))
58c5fc13 58213+ goto log;
15a11c5b 58214+
58c5fc13
MT
58215+ len = strnlen_user(p, 128 - execlen);
58216+ if (len > 128 - execlen)
58217+ len = 128 - execlen;
58218+ else if (len > 0)
58219+ len--;
58220+ if (copy_from_user(grarg + execlen, p, len))
58221+ goto log;
58222+
58223+ /* rewrite unprintable characters */
58224+ for (x = 0; x < len; x++) {
58225+ c = *(grarg + execlen + x);
58226+ if (c < 32 || c > 126)
58227+ *(grarg + execlen + x) = ' ';
58228+ }
58229+
58230+ execlen += len;
58231+ *(grarg + execlen) = ' ';
58232+ *(grarg + execlen + 1) = '\0';
58233+ execlen++;
58234+ }
58235+
58236+ log:
58237+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
58238+ bprm->file->f_path.mnt, grarg);
bc901d79 58239+ mutex_unlock(&gr_exec_arg_mutex);
58c5fc13
MT
58240+#endif
58241+ return;
58242+}
bc901d79 58243+
15a11c5b
MT
58244+#ifdef CONFIG_GRKERNSEC
58245+extern int gr_acl_is_capable(const int cap);
58246+extern int gr_acl_is_capable_nolog(const int cap);
5e856224
MT
58247+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
58248+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
15a11c5b
MT
58249+extern int gr_chroot_is_capable(const int cap);
58250+extern int gr_chroot_is_capable_nolog(const int cap);
5e856224
MT
58251+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
58252+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
15a11c5b 58253+#endif
bc901d79 58254+
15a11c5b
MT
58255+const char *captab_log[] = {
58256+ "CAP_CHOWN",
58257+ "CAP_DAC_OVERRIDE",
58258+ "CAP_DAC_READ_SEARCH",
58259+ "CAP_FOWNER",
58260+ "CAP_FSETID",
58261+ "CAP_KILL",
58262+ "CAP_SETGID",
58263+ "CAP_SETUID",
58264+ "CAP_SETPCAP",
58265+ "CAP_LINUX_IMMUTABLE",
58266+ "CAP_NET_BIND_SERVICE",
58267+ "CAP_NET_BROADCAST",
58268+ "CAP_NET_ADMIN",
58269+ "CAP_NET_RAW",
58270+ "CAP_IPC_LOCK",
58271+ "CAP_IPC_OWNER",
58272+ "CAP_SYS_MODULE",
58273+ "CAP_SYS_RAWIO",
58274+ "CAP_SYS_CHROOT",
58275+ "CAP_SYS_PTRACE",
58276+ "CAP_SYS_PACCT",
58277+ "CAP_SYS_ADMIN",
58278+ "CAP_SYS_BOOT",
58279+ "CAP_SYS_NICE",
58280+ "CAP_SYS_RESOURCE",
58281+ "CAP_SYS_TIME",
58282+ "CAP_SYS_TTY_CONFIG",
58283+ "CAP_MKNOD",
58284+ "CAP_LEASE",
58285+ "CAP_AUDIT_WRITE",
58286+ "CAP_AUDIT_CONTROL",
58287+ "CAP_SETFCAP",
58288+ "CAP_MAC_OVERRIDE",
58289+ "CAP_MAC_ADMIN",
6e9df6a3
MT
58290+ "CAP_SYSLOG",
58291+ "CAP_WAKE_ALARM"
15a11c5b 58292+};
bc901d79 58293+
15a11c5b 58294+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
bc901d79 58295+
15a11c5b
MT
58296+int gr_is_capable(const int cap)
58297+{
58298+#ifdef CONFIG_GRKERNSEC
58299+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
58300+ return 1;
58301+ return 0;
58302+#else
58303+ return 1;
bc901d79 58304+#endif
bc901d79 58305+}
15a11c5b 58306+
5e856224
MT
58307+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
58308+{
58309+#ifdef CONFIG_GRKERNSEC
58310+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
58311+ return 1;
58312+ return 0;
58313+#else
58314+ return 1;
58315+#endif
58316+}
58317+
15a11c5b
MT
58318+int gr_is_capable_nolog(const int cap)
58319+{
58320+#ifdef CONFIG_GRKERNSEC
58321+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
58322+ return 1;
58323+ return 0;
58324+#else
58325+ return 1;
bc901d79 58326+#endif
15a11c5b
MT
58327+}
58328+
5e856224
MT
58329+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
58330+{
58331+#ifdef CONFIG_GRKERNSEC
58332+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
58333+ return 1;
58334+ return 0;
58335+#else
58336+ return 1;
58337+#endif
58338+}
58339+
15a11c5b
MT
58340+EXPORT_SYMBOL(gr_is_capable);
58341+EXPORT_SYMBOL(gr_is_capable_nolog);
5e856224
MT
58342+EXPORT_SYMBOL(gr_task_is_capable);
58343+EXPORT_SYMBOL(gr_task_is_capable_nolog);
fe2de317
MT
58344diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
58345new file mode 100644
58346index 0000000..d3ee748
58347--- /dev/null
58348+++ b/grsecurity/grsec_fifo.c
58c5fc13
MT
58349@@ -0,0 +1,24 @@
58350+#include <linux/kernel.h>
58351+#include <linux/sched.h>
58352+#include <linux/fs.h>
58353+#include <linux/file.h>
58354+#include <linux/grinternal.h>
58355+
58356+int
58357+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
58358+ const struct dentry *dir, const int flag, const int acc_mode)
58359+{
58360+#ifdef CONFIG_GRKERNSEC_FIFO
58361+ const struct cred *cred = current_cred();
58362+
58363+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
58364+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
58365+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
58366+ (cred->fsuid != dentry->d_inode->i_uid)) {
16454cff 58367+ if (!inode_permission(dentry->d_inode, acc_mode))
58c5fc13
MT
58368+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
58369+ return -EACCES;
58370+ }
58371+#endif
58372+ return 0;
58373+}
fe2de317
MT
58374diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
58375new file mode 100644
58376index 0000000..8ca18bf
58377--- /dev/null
58378+++ b/grsecurity/grsec_fork.c
6892158b 58379@@ -0,0 +1,23 @@
58c5fc13
MT
58380+#include <linux/kernel.h>
58381+#include <linux/sched.h>
58382+#include <linux/grsecurity.h>
58383+#include <linux/grinternal.h>
58384+#include <linux/errno.h>
58385+
58386+void
58387+gr_log_forkfail(const int retval)
58388+{
58389+#ifdef CONFIG_GRKERNSEC_FORKFAIL
6892158b
MT
58390+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
58391+ switch (retval) {
58392+ case -EAGAIN:
58393+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
58394+ break;
58395+ case -ENOMEM:
58396+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
58397+ break;
58398+ }
58399+ }
58c5fc13
MT
58400+#endif
58401+ return;
58402+}
fe2de317
MT
58403diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
58404new file mode 100644
4c928ab7 58405index 0000000..01ddde4
fe2de317
MT
58406--- /dev/null
58407+++ b/grsecurity/grsec_init.c
4c928ab7 58408@@ -0,0 +1,277 @@
58c5fc13
MT
58409+#include <linux/kernel.h>
58410+#include <linux/sched.h>
58411+#include <linux/mm.h>
58c5fc13
MT
58412+#include <linux/gracl.h>
58413+#include <linux/slab.h>
58414+#include <linux/vmalloc.h>
58415+#include <linux/percpu.h>
df50ba0c 58416+#include <linux/module.h>
58c5fc13 58417+
4c928ab7
MT
58418+int grsec_enable_ptrace_readexec;
58419+int grsec_enable_setxid;
15a11c5b 58420+int grsec_enable_brute;
58c5fc13
MT
58421+int grsec_enable_link;
58422+int grsec_enable_dmesg;
58423+int grsec_enable_harden_ptrace;
58424+int grsec_enable_fifo;
58c5fc13
MT
58425+int grsec_enable_execlog;
58426+int grsec_enable_signal;
58427+int grsec_enable_forkfail;
ae4e228f 58428+int grsec_enable_audit_ptrace;
58c5fc13
MT
58429+int grsec_enable_time;
58430+int grsec_enable_audit_textrel;
58431+int grsec_enable_group;
58432+int grsec_audit_gid;
58433+int grsec_enable_chdir;
58434+int grsec_enable_mount;
ae4e228f 58435+int grsec_enable_rofs;
58c5fc13
MT
58436+int grsec_enable_chroot_findtask;
58437+int grsec_enable_chroot_mount;
58438+int grsec_enable_chroot_shmat;
58439+int grsec_enable_chroot_fchdir;
58440+int grsec_enable_chroot_double;
58441+int grsec_enable_chroot_pivot;
58442+int grsec_enable_chroot_chdir;
58443+int grsec_enable_chroot_chmod;
58444+int grsec_enable_chroot_mknod;
58445+int grsec_enable_chroot_nice;
58446+int grsec_enable_chroot_execlog;
58447+int grsec_enable_chroot_caps;
58448+int grsec_enable_chroot_sysctl;
58449+int grsec_enable_chroot_unix;
58450+int grsec_enable_tpe;
58451+int grsec_tpe_gid;
ae4e228f 58452+int grsec_enable_blackhole;
df50ba0c
MT
58453+#ifdef CONFIG_IPV6_MODULE
58454+EXPORT_SYMBOL(grsec_enable_blackhole);
58455+#endif
ae4e228f 58456+int grsec_lastack_retries;
58c5fc13 58457+int grsec_enable_tpe_all;
57199397 58458+int grsec_enable_tpe_invert;
58c5fc13
MT
58459+int grsec_enable_socket_all;
58460+int grsec_socket_all_gid;
58461+int grsec_enable_socket_client;
58462+int grsec_socket_client_gid;
58463+int grsec_enable_socket_server;
58464+int grsec_socket_server_gid;
58465+int grsec_resource_logging;
df50ba0c 58466+int grsec_disable_privio;
6892158b 58467+int grsec_enable_log_rwxmaps;
58c5fc13
MT
58468+int grsec_lock;
58469+
58470+DEFINE_SPINLOCK(grsec_alert_lock);
58471+unsigned long grsec_alert_wtime = 0;
58472+unsigned long grsec_alert_fyet = 0;
58473+
58474+DEFINE_SPINLOCK(grsec_audit_lock);
58475+
58476+DEFINE_RWLOCK(grsec_exec_file_lock);
58477+
58478+char *gr_shared_page[4];
58479+
58480+char *gr_alert_log_fmt;
58481+char *gr_audit_log_fmt;
58482+char *gr_alert_log_buf;
58483+char *gr_audit_log_buf;
58484+
58485+extern struct gr_arg *gr_usermode;
58486+extern unsigned char *gr_system_salt;
58487+extern unsigned char *gr_system_sum;
58488+
58489+void __init
58490+grsecurity_init(void)
58491+{
58492+ int j;
58493+ /* create the per-cpu shared pages */
58494+
58495+#ifdef CONFIG_X86
58496+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
58497+#endif
58498+
58499+ for (j = 0; j < 4; j++) {
58500+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
58501+ if (gr_shared_page[j] == NULL) {
58502+ panic("Unable to allocate grsecurity shared page");
58503+ return;
58504+ }
58505+ }
58506+
58507+ /* allocate log buffers */
58508+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
58509+ if (!gr_alert_log_fmt) {
58510+ panic("Unable to allocate grsecurity alert log format buffer");
58511+ return;
58512+ }
58513+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
58514+ if (!gr_audit_log_fmt) {
58515+ panic("Unable to allocate grsecurity audit log format buffer");
58516+ return;
58517+ }
58518+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
58519+ if (!gr_alert_log_buf) {
58520+ panic("Unable to allocate grsecurity alert log buffer");
58521+ return;
58522+ }
58523+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
58524+ if (!gr_audit_log_buf) {
58525+ panic("Unable to allocate grsecurity audit log buffer");
58526+ return;
58527+ }
58528+
58529+ /* allocate memory for authentication structure */
58530+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
58531+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
58532+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
58533+
58534+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
58535+ panic("Unable to allocate grsecurity authentication structure");
58536+ return;
58537+ }
58538+
df50ba0c
MT
58539+
58540+#ifdef CONFIG_GRKERNSEC_IO
58541+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
58542+ grsec_disable_privio = 1;
58543+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
58544+ grsec_disable_privio = 1;
58545+#else
58546+ grsec_disable_privio = 0;
58547+#endif
58548+#endif
58549+
57199397
MT
58550+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58551+ /* for backward compatibility, tpe_invert always defaults to on if
58552+ enabled in the kernel
58553+ */
58554+ grsec_enable_tpe_invert = 1;
58555+#endif
58556+
58c5fc13
MT
58557+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
58558+#ifndef CONFIG_GRKERNSEC_SYSCTL
58559+ grsec_lock = 1;
58560+#endif
df50ba0c 58561+
58c5fc13
MT
58562+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58563+ grsec_enable_audit_textrel = 1;
58564+#endif
6892158b
MT
58565+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58566+ grsec_enable_log_rwxmaps = 1;
58567+#endif
58c5fc13
MT
58568+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
58569+ grsec_enable_group = 1;
58570+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
58571+#endif
4c928ab7
MT
58572+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58573+ grsec_enable_ptrace_readexec = 1;
58574+#endif
58c5fc13
MT
58575+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
58576+ grsec_enable_chdir = 1;
58577+#endif
58578+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
58579+ grsec_enable_harden_ptrace = 1;
58580+#endif
58581+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58582+ grsec_enable_mount = 1;
58583+#endif
58584+#ifdef CONFIG_GRKERNSEC_LINK
58585+ grsec_enable_link = 1;
58586+#endif
15a11c5b
MT
58587+#ifdef CONFIG_GRKERNSEC_BRUTE
58588+ grsec_enable_brute = 1;
58589+#endif
58c5fc13
MT
58590+#ifdef CONFIG_GRKERNSEC_DMESG
58591+ grsec_enable_dmesg = 1;
58592+#endif
ae4e228f
MT
58593+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58594+ grsec_enable_blackhole = 1;
58595+ grsec_lastack_retries = 4;
58596+#endif
58c5fc13
MT
58597+#ifdef CONFIG_GRKERNSEC_FIFO
58598+ grsec_enable_fifo = 1;
58599+#endif
58c5fc13
MT
58600+#ifdef CONFIG_GRKERNSEC_EXECLOG
58601+ grsec_enable_execlog = 1;
58602+#endif
4c928ab7
MT
58603+#ifdef CONFIG_GRKERNSEC_SETXID
58604+ grsec_enable_setxid = 1;
58605+#endif
58c5fc13
MT
58606+#ifdef CONFIG_GRKERNSEC_SIGNAL
58607+ grsec_enable_signal = 1;
58608+#endif
58609+#ifdef CONFIG_GRKERNSEC_FORKFAIL
58610+ grsec_enable_forkfail = 1;
58611+#endif
58612+#ifdef CONFIG_GRKERNSEC_TIME
58613+ grsec_enable_time = 1;
58614+#endif
58615+#ifdef CONFIG_GRKERNSEC_RESLOG
58616+ grsec_resource_logging = 1;
58617+#endif
58618+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58619+ grsec_enable_chroot_findtask = 1;
58620+#endif
58621+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58622+ grsec_enable_chroot_unix = 1;
58623+#endif
58624+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58625+ grsec_enable_chroot_mount = 1;
58626+#endif
58627+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58628+ grsec_enable_chroot_fchdir = 1;
58629+#endif
58630+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58631+ grsec_enable_chroot_shmat = 1;
58632+#endif
ae4e228f
MT
58633+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58634+ grsec_enable_audit_ptrace = 1;
58635+#endif
58c5fc13
MT
58636+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58637+ grsec_enable_chroot_double = 1;
58638+#endif
58639+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58640+ grsec_enable_chroot_pivot = 1;
58641+#endif
58642+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58643+ grsec_enable_chroot_chdir = 1;
58644+#endif
58645+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58646+ grsec_enable_chroot_chmod = 1;
58647+#endif
58648+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
58649+ grsec_enable_chroot_mknod = 1;
58650+#endif
58651+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58652+ grsec_enable_chroot_nice = 1;
58653+#endif
58654+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
58655+ grsec_enable_chroot_execlog = 1;
58656+#endif
58657+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58658+ grsec_enable_chroot_caps = 1;
58659+#endif
58660+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
58661+ grsec_enable_chroot_sysctl = 1;
58662+#endif
58663+#ifdef CONFIG_GRKERNSEC_TPE
58664+ grsec_enable_tpe = 1;
58665+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
58666+#ifdef CONFIG_GRKERNSEC_TPE_ALL
58667+ grsec_enable_tpe_all = 1;
58668+#endif
58669+#endif
58670+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58671+ grsec_enable_socket_all = 1;
58672+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
58673+#endif
58674+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58675+ grsec_enable_socket_client = 1;
58676+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
58677+#endif
58678+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58679+ grsec_enable_socket_server = 1;
58680+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
58681+#endif
58682+#endif
58683+
58684+ return;
58685+}
fe2de317
MT
58686diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
58687new file mode 100644
58688index 0000000..3efe141
58689--- /dev/null
58690+++ b/grsecurity/grsec_link.c
58c5fc13
MT
58691@@ -0,0 +1,43 @@
58692+#include <linux/kernel.h>
58693+#include <linux/sched.h>
58694+#include <linux/fs.h>
58695+#include <linux/file.h>
58696+#include <linux/grinternal.h>
58697+
58698+int
58699+gr_handle_follow_link(const struct inode *parent,
58700+ const struct inode *inode,
58701+ const struct dentry *dentry, const struct vfsmount *mnt)
58702+{
58703+#ifdef CONFIG_GRKERNSEC_LINK
58704+ const struct cred *cred = current_cred();
58705+
58706+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
58707+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
58708+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
58709+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
58710+ return -EACCES;
58711+ }
58712+#endif
58713+ return 0;
58714+}
58715+
58716+int
58717+gr_handle_hardlink(const struct dentry *dentry,
58718+ const struct vfsmount *mnt,
58719+ struct inode *inode, const int mode, const char *to)
58720+{
58721+#ifdef CONFIG_GRKERNSEC_LINK
58722+ const struct cred *cred = current_cred();
58723+
58724+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
58725+ (!S_ISREG(mode) || (mode & S_ISUID) ||
58726+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
16454cff 58727+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
58c5fc13
MT
58728+ !capable(CAP_FOWNER) && cred->uid) {
58729+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
58730+ return -EPERM;
58731+ }
58732+#endif
58733+ return 0;
58734+}
fe2de317
MT
58735diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
58736new file mode 100644
58737index 0000000..a45d2e9
58738--- /dev/null
58739+++ b/grsecurity/grsec_log.c
6e9df6a3 58740@@ -0,0 +1,322 @@
58c5fc13
MT
58741+#include <linux/kernel.h>
58742+#include <linux/sched.h>
58743+#include <linux/file.h>
58744+#include <linux/tty.h>
58745+#include <linux/fs.h>
58746+#include <linux/grinternal.h>
58747+
df50ba0c
MT
58748+#ifdef CONFIG_TREE_PREEMPT_RCU
58749+#define DISABLE_PREEMPT() preempt_disable()
58750+#define ENABLE_PREEMPT() preempt_enable()
58751+#else
58752+#define DISABLE_PREEMPT()
58753+#define ENABLE_PREEMPT()
58754+#endif
58755+
58c5fc13 58756+#define BEGIN_LOCKS(x) \
df50ba0c 58757+ DISABLE_PREEMPT(); \
ae4e228f 58758+ rcu_read_lock(); \
58c5fc13
MT
58759+ read_lock(&tasklist_lock); \
58760+ read_lock(&grsec_exec_file_lock); \
58761+ if (x != GR_DO_AUDIT) \
58762+ spin_lock(&grsec_alert_lock); \
58763+ else \
58764+ spin_lock(&grsec_audit_lock)
58765+
58766+#define END_LOCKS(x) \
58767+ if (x != GR_DO_AUDIT) \
58768+ spin_unlock(&grsec_alert_lock); \
58769+ else \
58770+ spin_unlock(&grsec_audit_lock); \
58771+ read_unlock(&grsec_exec_file_lock); \
58772+ read_unlock(&tasklist_lock); \
ae4e228f 58773+ rcu_read_unlock(); \
df50ba0c 58774+ ENABLE_PREEMPT(); \
58c5fc13
MT
58775+ if (x == GR_DONT_AUDIT) \
58776+ gr_handle_alertkill(current)
58777+
58778+enum {
58779+ FLOODING,
58780+ NO_FLOODING
58781+};
58782+
58783+extern char *gr_alert_log_fmt;
58784+extern char *gr_audit_log_fmt;
58785+extern char *gr_alert_log_buf;
58786+extern char *gr_audit_log_buf;
58787+
58788+static int gr_log_start(int audit)
58789+{
58790+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
58791+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
58792+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
15a11c5b
MT
58793+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
58794+ unsigned long curr_secs = get_seconds();
58c5fc13
MT
58795+
58796+ if (audit == GR_DO_AUDIT)
58797+ goto set_fmt;
58798+
15a11c5b
MT
58799+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
58800+ grsec_alert_wtime = curr_secs;
58c5fc13 58801+ grsec_alert_fyet = 0;
15a11c5b
MT
58802+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
58803+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
58c5fc13
MT
58804+ grsec_alert_fyet++;
58805+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
15a11c5b 58806+ grsec_alert_wtime = curr_secs;
58c5fc13
MT
58807+ grsec_alert_fyet++;
58808+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
58809+ return FLOODING;
15a11c5b
MT
58810+ }
58811+ else return FLOODING;
58c5fc13
MT
58812+
58813+set_fmt:
15a11c5b 58814+#endif
58c5fc13
MT
58815+ memset(buf, 0, PAGE_SIZE);
58816+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
ae4e228f
MT
58817+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
58818+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58c5fc13 58819+ } else if (current->signal->curr_ip) {
ae4e228f
MT
58820+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
58821+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
58c5fc13
MT
58822+ } else if (gr_acl_is_enabled()) {
58823+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
58824+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58825+ } else {
58826+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
58827+ strcpy(buf, fmt);
58828+ }
58829+
58830+ return NO_FLOODING;
58831+}
58832+
58833+static void gr_log_middle(int audit, const char *msg, va_list ap)
58834+ __attribute__ ((format (printf, 2, 0)));
58835+
58836+static void gr_log_middle(int audit, const char *msg, va_list ap)
58837+{
58838+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58839+ unsigned int len = strlen(buf);
58840+
58841+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
58842+
58843+ return;
58844+}
58845+
58846+static void gr_log_middle_varargs(int audit, const char *msg, ...)
58847+ __attribute__ ((format (printf, 2, 3)));
58848+
58849+static void gr_log_middle_varargs(int audit, const char *msg, ...)
58850+{
58851+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58852+ unsigned int len = strlen(buf);
58853+ va_list ap;
58854+
58855+ va_start(ap, msg);
58856+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
58857+ va_end(ap);
58858+
58859+ return;
58860+}
58861+
6e9df6a3 58862+static void gr_log_end(int audit, int append_default)
58c5fc13
MT
58863+{
58864+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58c5fc13 58865+
6e9df6a3
MT
58866+ if (append_default) {
58867+ unsigned int len = strlen(buf);
58868+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
58869+ }
58870+
58c5fc13
MT
58871+ printk("%s\n", buf);
58872+
58873+ return;
58874+}
58875+
58876+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
58877+{
58878+ int logtype;
58879+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
66a7e928
MT
58880+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
58881+ void *voidptr = NULL;
58882+ int num1 = 0, num2 = 0;
58883+ unsigned long ulong1 = 0, ulong2 = 0;
58884+ struct dentry *dentry = NULL;
58885+ struct vfsmount *mnt = NULL;
58886+ struct file *file = NULL;
58887+ struct task_struct *task = NULL;
58c5fc13
MT
58888+ const struct cred *cred, *pcred;
58889+ va_list ap;
58890+
58891+ BEGIN_LOCKS(audit);
58892+ logtype = gr_log_start(audit);
58893+ if (logtype == FLOODING) {
58894+ END_LOCKS(audit);
58895+ return;
58896+ }
58897+ va_start(ap, argtypes);
58898+ switch (argtypes) {
58899+ case GR_TTYSNIFF:
58900+ task = va_arg(ap, struct task_struct *);
6892158b 58901+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
58c5fc13
MT
58902+ break;
58903+ case GR_SYSCTL_HIDDEN:
58904+ str1 = va_arg(ap, char *);
58905+ gr_log_middle_varargs(audit, msg, result, str1);
58906+ break;
58907+ case GR_RBAC:
58908+ dentry = va_arg(ap, struct dentry *);
58909+ mnt = va_arg(ap, struct vfsmount *);
58910+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
58911+ break;
58912+ case GR_RBAC_STR:
58913+ dentry = va_arg(ap, struct dentry *);
58914+ mnt = va_arg(ap, struct vfsmount *);
58915+ str1 = va_arg(ap, char *);
58916+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
58917+ break;
58918+ case GR_STR_RBAC:
58919+ str1 = va_arg(ap, char *);
58920+ dentry = va_arg(ap, struct dentry *);
58921+ mnt = va_arg(ap, struct vfsmount *);
58922+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
58923+ break;
58924+ case GR_RBAC_MODE2:
58925+ dentry = va_arg(ap, struct dentry *);
58926+ mnt = va_arg(ap, struct vfsmount *);
58927+ str1 = va_arg(ap, char *);
58928+ str2 = va_arg(ap, char *);
58929+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
58930+ break;
58931+ case GR_RBAC_MODE3:
58932+ dentry = va_arg(ap, struct dentry *);
58933+ mnt = va_arg(ap, struct vfsmount *);
58934+ str1 = va_arg(ap, char *);
58935+ str2 = va_arg(ap, char *);
58936+ str3 = va_arg(ap, char *);
58937+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
58938+ break;
58939+ case GR_FILENAME:
58940+ dentry = va_arg(ap, struct dentry *);
58941+ mnt = va_arg(ap, struct vfsmount *);
58942+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
58943+ break;
58944+ case GR_STR_FILENAME:
58945+ str1 = va_arg(ap, char *);
58946+ dentry = va_arg(ap, struct dentry *);
58947+ mnt = va_arg(ap, struct vfsmount *);
58948+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
58949+ break;
58950+ case GR_FILENAME_STR:
58951+ dentry = va_arg(ap, struct dentry *);
58952+ mnt = va_arg(ap, struct vfsmount *);
58953+ str1 = va_arg(ap, char *);
58954+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
58955+ break;
58956+ case GR_FILENAME_TWO_INT:
58957+ dentry = va_arg(ap, struct dentry *);
58958+ mnt = va_arg(ap, struct vfsmount *);
58959+ num1 = va_arg(ap, int);
58960+ num2 = va_arg(ap, int);
58961+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
58962+ break;
58963+ case GR_FILENAME_TWO_INT_STR:
58964+ dentry = va_arg(ap, struct dentry *);
58965+ mnt = va_arg(ap, struct vfsmount *);
58966+ num1 = va_arg(ap, int);
58967+ num2 = va_arg(ap, int);
58968+ str1 = va_arg(ap, char *);
58969+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
58970+ break;
58971+ case GR_TEXTREL:
58972+ file = va_arg(ap, struct file *);
58973+ ulong1 = va_arg(ap, unsigned long);
58974+ ulong2 = va_arg(ap, unsigned long);
58975+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
58976+ break;
58977+ case GR_PTRACE:
58978+ task = va_arg(ap, struct task_struct *);
58979+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
58980+ break;
58981+ case GR_RESOURCE:
58982+ task = va_arg(ap, struct task_struct *);
58983+ cred = __task_cred(task);
6892158b 58984+ pcred = __task_cred(task->real_parent);
58c5fc13
MT
58985+ ulong1 = va_arg(ap, unsigned long);
58986+ str1 = va_arg(ap, char *);
58987+ ulong2 = va_arg(ap, unsigned long);
6892158b 58988+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58c5fc13
MT
58989+ break;
58990+ case GR_CAP:
58991+ task = va_arg(ap, struct task_struct *);
58992+ cred = __task_cred(task);
6892158b 58993+ pcred = __task_cred(task->real_parent);
58c5fc13 58994+ str1 = va_arg(ap, char *);
6892158b 58995+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58c5fc13
MT
58996+ break;
58997+ case GR_SIG:
58998+ str1 = va_arg(ap, char *);
58999+ voidptr = va_arg(ap, void *);
59000+ gr_log_middle_varargs(audit, msg, str1, voidptr);
59001+ break;
59002+ case GR_SIG2:
59003+ task = va_arg(ap, struct task_struct *);
59004+ cred = __task_cred(task);
6892158b 59005+ pcred = __task_cred(task->real_parent);
58c5fc13 59006+ num1 = va_arg(ap, int);
6892158b 59007+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58c5fc13
MT
59008+ break;
59009+ case GR_CRASH1:
59010+ task = va_arg(ap, struct task_struct *);
59011+ cred = __task_cred(task);
6892158b 59012+ pcred = __task_cred(task->real_parent);
58c5fc13 59013+ ulong1 = va_arg(ap, unsigned long);
6892158b 59014+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
58c5fc13
MT
59015+ break;
59016+ case GR_CRASH2:
59017+ task = va_arg(ap, struct task_struct *);
59018+ cred = __task_cred(task);
6892158b 59019+ pcred = __task_cred(task->real_parent);
58c5fc13 59020+ ulong1 = va_arg(ap, unsigned long);
6892158b
MT
59021+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
59022+ break;
59023+ case GR_RWXMAP:
59024+ file = va_arg(ap, struct file *);
59025+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
58c5fc13
MT
59026+ break;
59027+ case GR_PSACCT:
59028+ {
59029+ unsigned int wday, cday;
59030+ __u8 whr, chr;
59031+ __u8 wmin, cmin;
59032+ __u8 wsec, csec;
59033+ char cur_tty[64] = { 0 };
59034+ char parent_tty[64] = { 0 };
59035+
59036+ task = va_arg(ap, struct task_struct *);
59037+ wday = va_arg(ap, unsigned int);
59038+ cday = va_arg(ap, unsigned int);
59039+ whr = va_arg(ap, int);
59040+ chr = va_arg(ap, int);
59041+ wmin = va_arg(ap, int);
59042+ cmin = va_arg(ap, int);
59043+ wsec = va_arg(ap, int);
59044+ csec = va_arg(ap, int);
59045+ ulong1 = va_arg(ap, unsigned long);
59046+ cred = __task_cred(task);
6892158b 59047+ pcred = __task_cred(task->real_parent);
58c5fc13 59048+
6892158b 59049+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58c5fc13
MT
59050+ }
59051+ break;
59052+ default:
59053+ gr_log_middle(audit, msg, ap);
59054+ }
59055+ va_end(ap);
6e9df6a3
MT
59056+ // these don't need DEFAULTSECARGS printed on the end
59057+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
59058+ gr_log_end(audit, 0);
59059+ else
59060+ gr_log_end(audit, 1);
58c5fc13
MT
59061+ END_LOCKS(audit);
59062+}
fe2de317
MT
59063diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
59064new file mode 100644
4c928ab7 59065index 0000000..f536303
fe2de317
MT
59066--- /dev/null
59067+++ b/grsecurity/grsec_mem.c
4c928ab7 59068@@ -0,0 +1,40 @@
58c5fc13
MT
59069+#include <linux/kernel.h>
59070+#include <linux/sched.h>
59071+#include <linux/mm.h>
59072+#include <linux/mman.h>
59073+#include <linux/grinternal.h>
59074+
59075+void
59076+gr_handle_ioperm(void)
59077+{
59078+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
59079+ return;
59080+}
59081+
59082+void
59083+gr_handle_iopl(void)
59084+{
59085+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
59086+ return;
59087+}
59088+
59089+void
71d190be 59090+gr_handle_mem_readwrite(u64 from, u64 to)
58c5fc13 59091+{
71d190be 59092+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
58c5fc13
MT
59093+ return;
59094+}
59095+
59096+void
ae4e228f
MT
59097+gr_handle_vm86(void)
59098+{
59099+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
59100+ return;
59101+}
4c928ab7
MT
59102+
59103+void
59104+gr_log_badprocpid(const char *entry)
59105+{
59106+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
59107+ return;
59108+}
fe2de317
MT
59109diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
59110new file mode 100644
59111index 0000000..2131422
59112--- /dev/null
59113+++ b/grsecurity/grsec_mount.c
ae4e228f 59114@@ -0,0 +1,62 @@
58c5fc13
MT
59115+#include <linux/kernel.h>
59116+#include <linux/sched.h>
ae4e228f 59117+#include <linux/mount.h>
58c5fc13
MT
59118+#include <linux/grsecurity.h>
59119+#include <linux/grinternal.h>
59120+
59121+void
59122+gr_log_remount(const char *devname, const int retval)
59123+{
59124+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59125+ if (grsec_enable_mount && (retval >= 0))
59126+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
59127+#endif
59128+ return;
59129+}
59130+
59131+void
59132+gr_log_unmount(const char *devname, const int retval)
59133+{
59134+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59135+ if (grsec_enable_mount && (retval >= 0))
59136+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
59137+#endif
59138+ return;
59139+}
59140+
59141+void
59142+gr_log_mount(const char *from, const char *to, const int retval)
59143+{
59144+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59145+ if (grsec_enable_mount && (retval >= 0))
15a11c5b 59146+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
58c5fc13
MT
59147+#endif
59148+ return;
59149+}
ae4e228f
MT
59150+
59151+int
59152+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
59153+{
59154+#ifdef CONFIG_GRKERNSEC_ROFS
59155+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
59156+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
59157+ return -EPERM;
59158+ } else
59159+ return 0;
59160+#endif
59161+ return 0;
59162+}
59163+
59164+int
59165+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
59166+{
59167+#ifdef CONFIG_GRKERNSEC_ROFS
59168+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
59169+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
59170+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
59171+ return -EPERM;
59172+ } else
59173+ return 0;
59174+#endif
59175+ return 0;
59176+}
fe2de317
MT
59177diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
59178new file mode 100644
59179index 0000000..a3b12a0
59180--- /dev/null
59181+++ b/grsecurity/grsec_pax.c
6892158b
MT
59182@@ -0,0 +1,36 @@
59183+#include <linux/kernel.h>
59184+#include <linux/sched.h>
59185+#include <linux/mm.h>
59186+#include <linux/file.h>
59187+#include <linux/grinternal.h>
59188+#include <linux/grsecurity.h>
59189+
59190+void
59191+gr_log_textrel(struct vm_area_struct * vma)
59192+{
59193+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
59194+ if (grsec_enable_audit_textrel)
59195+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
59196+#endif
59197+ return;
59198+}
59199+
59200+void
59201+gr_log_rwxmmap(struct file *file)
59202+{
59203+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59204+ if (grsec_enable_log_rwxmaps)
59205+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
59206+#endif
59207+ return;
59208+}
59209+
59210+void
59211+gr_log_rwxmprotect(struct file *file)
59212+{
59213+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59214+ if (grsec_enable_log_rwxmaps)
59215+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
59216+#endif
59217+ return;
59218+}
fe2de317
MT
59219diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
59220new file mode 100644
4c928ab7 59221index 0000000..f7f29aa
fe2de317
MT
59222--- /dev/null
59223+++ b/grsecurity/grsec_ptrace.c
4c928ab7 59224@@ -0,0 +1,30 @@
ae4e228f
MT
59225+#include <linux/kernel.h>
59226+#include <linux/sched.h>
59227+#include <linux/grinternal.h>
4c928ab7 59228+#include <linux/security.h>
ae4e228f
MT
59229+
59230+void
59231+gr_audit_ptrace(struct task_struct *task)
59232+{
59233+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
59234+ if (grsec_enable_audit_ptrace)
59235+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
59236+#endif
59237+ return;
59238+}
4c928ab7
MT
59239+
59240+int
59241+gr_ptrace_readexec(struct file *file, int unsafe_flags)
59242+{
59243+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
59244+ const struct dentry *dentry = file->f_path.dentry;
59245+ const struct vfsmount *mnt = file->f_path.mnt;
59246+
59247+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
59248+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
59249+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
59250+ return -EACCES;
59251+ }
59252+#endif
59253+ return 0;
59254+}
fe2de317
MT
59255diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
59256new file mode 100644
4c928ab7 59257index 0000000..7a5b2de
fe2de317
MT
59258--- /dev/null
59259+++ b/grsecurity/grsec_sig.c
4c928ab7 59260@@ -0,0 +1,207 @@
58c5fc13
MT
59261+#include <linux/kernel.h>
59262+#include <linux/sched.h>
59263+#include <linux/delay.h>
59264+#include <linux/grsecurity.h>
59265+#include <linux/grinternal.h>
71d190be 59266+#include <linux/hardirq.h>
58c5fc13
MT
59267+
59268+char *signames[] = {
59269+ [SIGSEGV] = "Segmentation fault",
59270+ [SIGILL] = "Illegal instruction",
59271+ [SIGABRT] = "Abort",
59272+ [SIGBUS] = "Invalid alignment/Bus error"
59273+};
59274+
59275+void
59276+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
59277+{
59278+#ifdef CONFIG_GRKERNSEC_SIGNAL
59279+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
59280+ (sig == SIGABRT) || (sig == SIGBUS))) {
59281+ if (t->pid == current->pid) {
59282+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
59283+ } else {
59284+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
59285+ }
59286+ }
59287+#endif
59288+ return;
59289+}
59290+
59291+int
59292+gr_handle_signal(const struct task_struct *p, const int sig)
59293+{
59294+#ifdef CONFIG_GRKERNSEC
4c928ab7
MT
59295+ /* ignore the 0 signal for protected task checks */
59296+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
58c5fc13
MT
59297+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
59298+ return -EPERM;
59299+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
59300+ return -EPERM;
59301+ }
59302+#endif
59303+ return 0;
59304+}
59305+
71d190be
MT
59306+#ifdef CONFIG_GRKERNSEC
59307+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
59308+
59309+int gr_fake_force_sig(int sig, struct task_struct *t)
59310+{
59311+ unsigned long int flags;
59312+ int ret, blocked, ignored;
59313+ struct k_sigaction *action;
59314+
59315+ spin_lock_irqsave(&t->sighand->siglock, flags);
59316+ action = &t->sighand->action[sig-1];
59317+ ignored = action->sa.sa_handler == SIG_IGN;
59318+ blocked = sigismember(&t->blocked, sig);
59319+ if (blocked || ignored) {
59320+ action->sa.sa_handler = SIG_DFL;
59321+ if (blocked) {
59322+ sigdelset(&t->blocked, sig);
59323+ recalc_sigpending_and_wake(t);
59324+ }
59325+ }
59326+ if (action->sa.sa_handler == SIG_DFL)
59327+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
59328+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
59329+
59330+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
59331+
59332+ return ret;
59333+}
59334+#endif
59335+
59336+#ifdef CONFIG_GRKERNSEC_BRUTE
59337+#define GR_USER_BAN_TIME (15 * 60)
59338+
59339+static int __get_dumpable(unsigned long mm_flags)
59340+{
59341+ int ret;
59342+
59343+ ret = mm_flags & MMF_DUMPABLE_MASK;
59344+ return (ret >= 2) ? 2 : ret;
59345+}
59346+#endif
59347+
59348+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
58c5fc13
MT
59349+{
59350+#ifdef CONFIG_GRKERNSEC_BRUTE
71d190be
MT
59351+ uid_t uid = 0;
59352+
15a11c5b
MT
59353+ if (!grsec_enable_brute)
59354+ return;
59355+
71d190be 59356+ rcu_read_lock();
58c5fc13
MT
59357+ read_lock(&tasklist_lock);
59358+ read_lock(&grsec_exec_file_lock);
6892158b
MT
59359+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
59360+ p->real_parent->brute = 1;
71d190be
MT
59361+ else {
59362+ const struct cred *cred = __task_cred(p), *cred2;
59363+ struct task_struct *tsk, *tsk2;
59364+
59365+ if (!__get_dumpable(mm_flags) && cred->uid) {
59366+ struct user_struct *user;
59367+
59368+ uid = cred->uid;
59369+
59370+ /* this is put upon execution past expiration */
59371+ user = find_user(uid);
59372+ if (user == NULL)
59373+ goto unlock;
59374+ user->banned = 1;
59375+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
59376+ if (user->ban_expires == ~0UL)
59377+ user->ban_expires--;
59378+
59379+ do_each_thread(tsk2, tsk) {
59380+ cred2 = __task_cred(tsk);
59381+ if (tsk != p && cred2->uid == uid)
59382+ gr_fake_force_sig(SIGKILL, tsk);
59383+ } while_each_thread(tsk2, tsk);
59384+ }
59385+ }
59386+unlock:
58c5fc13
MT
59387+ read_unlock(&grsec_exec_file_lock);
59388+ read_unlock(&tasklist_lock);
71d190be
MT
59389+ rcu_read_unlock();
59390+
59391+ if (uid)
59392+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
59393+
58c5fc13
MT
59394+#endif
59395+ return;
59396+}
59397+
59398+void gr_handle_brute_check(void)
59399+{
59400+#ifdef CONFIG_GRKERNSEC_BRUTE
59401+ if (current->brute)
59402+ msleep(30 * 1000);
59403+#endif
59404+ return;
59405+}
59406+
71d190be
MT
59407+void gr_handle_kernel_exploit(void)
59408+{
59409+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
59410+ const struct cred *cred;
59411+ struct task_struct *tsk, *tsk2;
59412+ struct user_struct *user;
59413+ uid_t uid;
59414+
59415+ if (in_irq() || in_serving_softirq() || in_nmi())
59416+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
59417+
59418+ uid = current_uid();
59419+
59420+ if (uid == 0)
59421+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
59422+ else {
59423+ /* kill all the processes of this user, hold a reference
59424+ to their creds struct, and prevent them from creating
59425+ another process until system reset
59426+ */
59427+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
59428+ /* we intentionally leak this ref */
59429+ user = get_uid(current->cred->user);
59430+ if (user) {
59431+ user->banned = 1;
59432+ user->ban_expires = ~0UL;
59433+ }
59434+
59435+ read_lock(&tasklist_lock);
59436+ do_each_thread(tsk2, tsk) {
59437+ cred = __task_cred(tsk);
59438+ if (cred->uid == uid)
59439+ gr_fake_force_sig(SIGKILL, tsk);
59440+ } while_each_thread(tsk2, tsk);
59441+ read_unlock(&tasklist_lock);
59442+ }
59443+#endif
59444+}
59445+
66a7e928 59446+int __gr_process_user_ban(struct user_struct *user)
71d190be
MT
59447+{
59448+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
66a7e928 59449+ if (unlikely(user->banned)) {
71d190be
MT
59450+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
59451+ user->banned = 0;
59452+ user->ban_expires = 0;
59453+ free_uid(user);
59454+ } else
59455+ return -EPERM;
59456+ }
59457+#endif
59458+ return 0;
66a7e928 59459+}
71d190be 59460+
66a7e928
MT
59461+int gr_process_user_ban(void)
59462+{
59463+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59464+ return __gr_process_user_ban(current->cred->user);
59465+#endif
59466+ return 0;
71d190be 59467+}
fe2de317
MT
59468diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
59469new file mode 100644
59470index 0000000..4030d57
59471--- /dev/null
59472+++ b/grsecurity/grsec_sock.c
66a7e928 59473@@ -0,0 +1,244 @@
58c5fc13
MT
59474+#include <linux/kernel.h>
59475+#include <linux/module.h>
59476+#include <linux/sched.h>
59477+#include <linux/file.h>
59478+#include <linux/net.h>
59479+#include <linux/in.h>
59480+#include <linux/ip.h>
59481+#include <net/sock.h>
59482+#include <net/inet_sock.h>
59483+#include <linux/grsecurity.h>
59484+#include <linux/grinternal.h>
59485+#include <linux/gracl.h>
59486+
58c5fc13
MT
59487+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
59488+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
59489+
59490+EXPORT_SYMBOL(gr_search_udp_recvmsg);
59491+EXPORT_SYMBOL(gr_search_udp_sendmsg);
59492+
59493+#ifdef CONFIG_UNIX_MODULE
59494+EXPORT_SYMBOL(gr_acl_handle_unix);
59495+EXPORT_SYMBOL(gr_acl_handle_mknod);
59496+EXPORT_SYMBOL(gr_handle_chroot_unix);
59497+EXPORT_SYMBOL(gr_handle_create);
59498+#endif
59499+
59500+#ifdef CONFIG_GRKERNSEC
59501+#define gr_conn_table_size 32749
59502+struct conn_table_entry {
59503+ struct conn_table_entry *next;
59504+ struct signal_struct *sig;
59505+};
59506+
59507+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
59508+DEFINE_SPINLOCK(gr_conn_table_lock);
59509+
59510+extern const char * gr_socktype_to_name(unsigned char type);
59511+extern const char * gr_proto_to_name(unsigned char proto);
bc901d79 59512+extern const char * gr_sockfamily_to_name(unsigned char family);
58c5fc13
MT
59513+
59514+static __inline__ int
59515+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
59516+{
59517+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
59518+}
59519+
59520+static __inline__ int
59521+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
59522+ __u16 sport, __u16 dport)
59523+{
59524+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
59525+ sig->gr_sport == sport && sig->gr_dport == dport))
59526+ return 1;
59527+ else
59528+ return 0;
59529+}
59530+
59531+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
59532+{
59533+ struct conn_table_entry **match;
59534+ unsigned int index;
59535+
59536+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
59537+ sig->gr_sport, sig->gr_dport,
59538+ gr_conn_table_size);
59539+
59540+ newent->sig = sig;
59541+
59542+ match = &gr_conn_table[index];
59543+ newent->next = *match;
59544+ *match = newent;
59545+
59546+ return;
59547+}
59548+
59549+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
59550+{
59551+ struct conn_table_entry *match, *last = NULL;
59552+ unsigned int index;
59553+
59554+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
59555+ sig->gr_sport, sig->gr_dport,
59556+ gr_conn_table_size);
59557+
59558+ match = gr_conn_table[index];
59559+ while (match && !conn_match(match->sig,
59560+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
59561+ sig->gr_dport)) {
59562+ last = match;
59563+ match = match->next;
59564+ }
59565+
59566+ if (match) {
59567+ if (last)
59568+ last->next = match->next;
59569+ else
59570+ gr_conn_table[index] = NULL;
59571+ kfree(match);
59572+ }
59573+
59574+ return;
59575+}
59576+
59577+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
59578+ __u16 sport, __u16 dport)
59579+{
59580+ struct conn_table_entry *match;
59581+ unsigned int index;
59582+
59583+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
59584+
59585+ match = gr_conn_table[index];
59586+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
59587+ match = match->next;
59588+
59589+ if (match)
59590+ return match->sig;
59591+ else
59592+ return NULL;
59593+}
59594+
59595+#endif
59596+
59597+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
59598+{
59599+#ifdef CONFIG_GRKERNSEC
59600+ struct signal_struct *sig = task->signal;
59601+ struct conn_table_entry *newent;
59602+
59603+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
59604+ if (newent == NULL)
59605+ return;
59606+ /* no bh lock needed since we are called with bh disabled */
59607+ spin_lock(&gr_conn_table_lock);
59608+ gr_del_task_from_ip_table_nolock(sig);
ae4e228f
MT
59609+ sig->gr_saddr = inet->inet_rcv_saddr;
59610+ sig->gr_daddr = inet->inet_daddr;
59611+ sig->gr_sport = inet->inet_sport;
59612+ sig->gr_dport = inet->inet_dport;
58c5fc13
MT
59613+ gr_add_to_task_ip_table_nolock(sig, newent);
59614+ spin_unlock(&gr_conn_table_lock);
59615+#endif
59616+ return;
59617+}
59618+
59619+void gr_del_task_from_ip_table(struct task_struct *task)
59620+{
59621+#ifdef CONFIG_GRKERNSEC
59622+ spin_lock_bh(&gr_conn_table_lock);
59623+ gr_del_task_from_ip_table_nolock(task->signal);
59624+ spin_unlock_bh(&gr_conn_table_lock);
59625+#endif
59626+ return;
59627+}
59628+
59629+void
59630+gr_attach_curr_ip(const struct sock *sk)
59631+{
59632+#ifdef CONFIG_GRKERNSEC
59633+ struct signal_struct *p, *set;
59634+ const struct inet_sock *inet = inet_sk(sk);
59635+
59636+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
59637+ return;
59638+
59639+ set = current->signal;
59640+
59641+ spin_lock_bh(&gr_conn_table_lock);
ae4e228f
MT
59642+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
59643+ inet->inet_dport, inet->inet_sport);
58c5fc13
MT
59644+ if (unlikely(p != NULL)) {
59645+ set->curr_ip = p->curr_ip;
59646+ set->used_accept = 1;
59647+ gr_del_task_from_ip_table_nolock(p);
59648+ spin_unlock_bh(&gr_conn_table_lock);
59649+ return;
59650+ }
59651+ spin_unlock_bh(&gr_conn_table_lock);
59652+
ae4e228f 59653+ set->curr_ip = inet->inet_daddr;
58c5fc13
MT
59654+ set->used_accept = 1;
59655+#endif
59656+ return;
59657+}
59658+
59659+int
59660+gr_handle_sock_all(const int family, const int type, const int protocol)
59661+{
59662+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59663+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
bc901d79
MT
59664+ (family != AF_UNIX)) {
59665+ if (family == AF_INET)
59666+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
59667+ else
59668+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
58c5fc13
MT
59669+ return -EACCES;
59670+ }
59671+#endif
59672+ return 0;
59673+}
59674+
59675+int
59676+gr_handle_sock_server(const struct sockaddr *sck)
59677+{
59678+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59679+ if (grsec_enable_socket_server &&
59680+ in_group_p(grsec_socket_server_gid) &&
59681+ sck && (sck->sa_family != AF_UNIX) &&
59682+ (sck->sa_family != AF_LOCAL)) {
59683+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
59684+ return -EACCES;
59685+ }
59686+#endif
59687+ return 0;
59688+}
59689+
59690+int
59691+gr_handle_sock_server_other(const struct sock *sck)
59692+{
59693+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59694+ if (grsec_enable_socket_server &&
59695+ in_group_p(grsec_socket_server_gid) &&
59696+ sck && (sck->sk_family != AF_UNIX) &&
59697+ (sck->sk_family != AF_LOCAL)) {
59698+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
59699+ return -EACCES;
59700+ }
59701+#endif
59702+ return 0;
59703+}
59704+
59705+int
59706+gr_handle_sock_client(const struct sockaddr *sck)
59707+{
59708+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59709+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
59710+ sck && (sck->sa_family != AF_UNIX) &&
59711+ (sck->sa_family != AF_LOCAL)) {
59712+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
59713+ return -EACCES;
59714+ }
59715+#endif
59716+ return 0;
59717+}
fe2de317
MT
59718diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
59719new file mode 100644
4c928ab7 59720index 0000000..a1aedd7
fe2de317
MT
59721--- /dev/null
59722+++ b/grsecurity/grsec_sysctl.c
4c928ab7 59723@@ -0,0 +1,451 @@
58c5fc13
MT
59724+#include <linux/kernel.h>
59725+#include <linux/sched.h>
59726+#include <linux/sysctl.h>
59727+#include <linux/grsecurity.h>
59728+#include <linux/grinternal.h>
59729+
59730+int
59731+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
59732+{
59733+#ifdef CONFIG_GRKERNSEC_SYSCTL
59734+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
59735+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
59736+ return -EACCES;
59737+ }
59738+#endif
59739+ return 0;
59740+}
59741+
ae4e228f
MT
59742+#ifdef CONFIG_GRKERNSEC_ROFS
59743+static int __maybe_unused one = 1;
59744+#endif
59745+
59746+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
57199397 59747+struct ctl_table grsecurity_table[] = {
58c5fc13 59748+#ifdef CONFIG_GRKERNSEC_SYSCTL
df50ba0c
MT
59749+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
59750+#ifdef CONFIG_GRKERNSEC_IO
59751+ {
59752+ .procname = "disable_priv_io",
59753+ .data = &grsec_disable_privio,
59754+ .maxlen = sizeof(int),
59755+ .mode = 0600,
59756+ .proc_handler = &proc_dointvec,
59757+ },
59758+#endif
59759+#endif
58c5fc13
MT
59760+#ifdef CONFIG_GRKERNSEC_LINK
59761+ {
58c5fc13
MT
59762+ .procname = "linking_restrictions",
59763+ .data = &grsec_enable_link,
59764+ .maxlen = sizeof(int),
59765+ .mode = 0600,
59766+ .proc_handler = &proc_dointvec,
59767+ },
59768+#endif
15a11c5b 59769+#ifdef CONFIG_GRKERNSEC_BRUTE
58c5fc13 59770+ {
15a11c5b
MT
59771+ .procname = "deter_bruteforce",
59772+ .data = &grsec_enable_brute,
58c5fc13
MT
59773+ .maxlen = sizeof(int),
59774+ .mode = 0600,
59775+ .proc_handler = &proc_dointvec,
59776+ },
59777+#endif
15a11c5b 59778+#ifdef CONFIG_GRKERNSEC_FIFO
58c5fc13 59779+ {
15a11c5b
MT
59780+ .procname = "fifo_restrictions",
59781+ .data = &grsec_enable_fifo,
58c5fc13
MT
59782+ .maxlen = sizeof(int),
59783+ .mode = 0600,
59784+ .proc_handler = &proc_dointvec,
59785+ },
59786+#endif
4c928ab7
MT
59787+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
59788+ {
59789+ .procname = "ptrace_readexec",
59790+ .data = &grsec_enable_ptrace_readexec,
59791+ .maxlen = sizeof(int),
59792+ .mode = 0600,
59793+ .proc_handler = &proc_dointvec,
59794+ },
59795+#endif
59796+#ifdef CONFIG_GRKERNSEC_SETXID
59797+ {
59798+ .procname = "consistent_setxid",
59799+ .data = &grsec_enable_setxid,
59800+ .maxlen = sizeof(int),
59801+ .mode = 0600,
59802+ .proc_handler = &proc_dointvec,
59803+ },
59804+#endif
ae4e228f
MT
59805+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
59806+ {
59807+ .procname = "ip_blackhole",
59808+ .data = &grsec_enable_blackhole,
59809+ .maxlen = sizeof(int),
59810+ .mode = 0600,
59811+ .proc_handler = &proc_dointvec,
59812+ },
59813+ {
59814+ .procname = "lastack_retries",
59815+ .data = &grsec_lastack_retries,
59816+ .maxlen = sizeof(int),
59817+ .mode = 0600,
59818+ .proc_handler = &proc_dointvec,
59819+ },
59820+#endif
58c5fc13
MT
59821+#ifdef CONFIG_GRKERNSEC_EXECLOG
59822+ {
58c5fc13
MT
59823+ .procname = "exec_logging",
59824+ .data = &grsec_enable_execlog,
59825+ .maxlen = sizeof(int),
59826+ .mode = 0600,
59827+ .proc_handler = &proc_dointvec,
59828+ },
59829+#endif
6892158b
MT
59830+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59831+ {
59832+ .procname = "rwxmap_logging",
59833+ .data = &grsec_enable_log_rwxmaps,
59834+ .maxlen = sizeof(int),
59835+ .mode = 0600,
59836+ .proc_handler = &proc_dointvec,
59837+ },
59838+#endif
58c5fc13
MT
59839+#ifdef CONFIG_GRKERNSEC_SIGNAL
59840+ {
58c5fc13
MT
59841+ .procname = "signal_logging",
59842+ .data = &grsec_enable_signal,
59843+ .maxlen = sizeof(int),
59844+ .mode = 0600,
59845+ .proc_handler = &proc_dointvec,
59846+ },
59847+#endif
59848+#ifdef CONFIG_GRKERNSEC_FORKFAIL
59849+ {
58c5fc13
MT
59850+ .procname = "forkfail_logging",
59851+ .data = &grsec_enable_forkfail,
59852+ .maxlen = sizeof(int),
59853+ .mode = 0600,
59854+ .proc_handler = &proc_dointvec,
59855+ },
59856+#endif
59857+#ifdef CONFIG_GRKERNSEC_TIME
59858+ {
58c5fc13
MT
59859+ .procname = "timechange_logging",
59860+ .data = &grsec_enable_time,
59861+ .maxlen = sizeof(int),
59862+ .mode = 0600,
59863+ .proc_handler = &proc_dointvec,
59864+ },
59865+#endif
59866+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
59867+ {
58c5fc13
MT
59868+ .procname = "chroot_deny_shmat",
59869+ .data = &grsec_enable_chroot_shmat,
59870+ .maxlen = sizeof(int),
59871+ .mode = 0600,
59872+ .proc_handler = &proc_dointvec,
59873+ },
59874+#endif
59875+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
59876+ {
58c5fc13
MT
59877+ .procname = "chroot_deny_unix",
59878+ .data = &grsec_enable_chroot_unix,
59879+ .maxlen = sizeof(int),
59880+ .mode = 0600,
59881+ .proc_handler = &proc_dointvec,
59882+ },
59883+#endif
59884+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
59885+ {
58c5fc13
MT
59886+ .procname = "chroot_deny_mount",
59887+ .data = &grsec_enable_chroot_mount,
59888+ .maxlen = sizeof(int),
59889+ .mode = 0600,
59890+ .proc_handler = &proc_dointvec,
59891+ },
59892+#endif
59893+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
59894+ {
58c5fc13
MT
59895+ .procname = "chroot_deny_fchdir",
59896+ .data = &grsec_enable_chroot_fchdir,
59897+ .maxlen = sizeof(int),
59898+ .mode = 0600,
59899+ .proc_handler = &proc_dointvec,
59900+ },
59901+#endif
59902+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
59903+ {
58c5fc13
MT
59904+ .procname = "chroot_deny_chroot",
59905+ .data = &grsec_enable_chroot_double,
59906+ .maxlen = sizeof(int),
59907+ .mode = 0600,
59908+ .proc_handler = &proc_dointvec,
59909+ },
59910+#endif
59911+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
59912+ {
58c5fc13
MT
59913+ .procname = "chroot_deny_pivot",
59914+ .data = &grsec_enable_chroot_pivot,
59915+ .maxlen = sizeof(int),
59916+ .mode = 0600,
59917+ .proc_handler = &proc_dointvec,
59918+ },
59919+#endif
59920+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
59921+ {
58c5fc13
MT
59922+ .procname = "chroot_enforce_chdir",
59923+ .data = &grsec_enable_chroot_chdir,
59924+ .maxlen = sizeof(int),
59925+ .mode = 0600,
59926+ .proc_handler = &proc_dointvec,
59927+ },
59928+#endif
59929+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
59930+ {
58c5fc13
MT
59931+ .procname = "chroot_deny_chmod",
59932+ .data = &grsec_enable_chroot_chmod,
59933+ .maxlen = sizeof(int),
59934+ .mode = 0600,
59935+ .proc_handler = &proc_dointvec,
59936+ },
59937+#endif
59938+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
59939+ {
58c5fc13
MT
59940+ .procname = "chroot_deny_mknod",
59941+ .data = &grsec_enable_chroot_mknod,
59942+ .maxlen = sizeof(int),
59943+ .mode = 0600,
59944+ .proc_handler = &proc_dointvec,
59945+ },
59946+#endif
59947+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
59948+ {
58c5fc13
MT
59949+ .procname = "chroot_restrict_nice",
59950+ .data = &grsec_enable_chroot_nice,
59951+ .maxlen = sizeof(int),
59952+ .mode = 0600,
59953+ .proc_handler = &proc_dointvec,
59954+ },
59955+#endif
59956+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
59957+ {
58c5fc13
MT
59958+ .procname = "chroot_execlog",
59959+ .data = &grsec_enable_chroot_execlog,
59960+ .maxlen = sizeof(int),
59961+ .mode = 0600,
59962+ .proc_handler = &proc_dointvec,
59963+ },
59964+#endif
59965+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
fe2de317
MT
59966+ {
59967+ .procname = "chroot_caps",
59968+ .data = &grsec_enable_chroot_caps,
59969+ .maxlen = sizeof(int),
59970+ .mode = 0600,
59971+ .proc_handler = &proc_dointvec,
59972+ },
59973+#endif
59974+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
59975+ {
59976+ .procname = "chroot_deny_sysctl",
59977+ .data = &grsec_enable_chroot_sysctl,
59978+ .maxlen = sizeof(int),
59979+ .mode = 0600,
59980+ .proc_handler = &proc_dointvec,
59981+ },
59982+#endif
59983+#ifdef CONFIG_GRKERNSEC_TPE
59984+ {
59985+ .procname = "tpe",
59986+ .data = &grsec_enable_tpe,
59987+ .maxlen = sizeof(int),
59988+ .mode = 0600,
59989+ .proc_handler = &proc_dointvec,
59990+ },
59991+ {
59992+ .procname = "tpe_gid",
59993+ .data = &grsec_tpe_gid,
59994+ .maxlen = sizeof(int),
59995+ .mode = 0600,
59996+ .proc_handler = &proc_dointvec,
59997+ },
59998+#endif
59999+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
60000+ {
60001+ .procname = "tpe_invert",
60002+ .data = &grsec_enable_tpe_invert,
60003+ .maxlen = sizeof(int),
60004+ .mode = 0600,
60005+ .proc_handler = &proc_dointvec,
60006+ },
60007+#endif
60008+#ifdef CONFIG_GRKERNSEC_TPE_ALL
60009+ {
60010+ .procname = "tpe_restrict_all",
60011+ .data = &grsec_enable_tpe_all,
60012+ .maxlen = sizeof(int),
60013+ .mode = 0600,
60014+ .proc_handler = &proc_dointvec,
60015+ },
60016+#endif
60017+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
60018+ {
60019+ .procname = "socket_all",
60020+ .data = &grsec_enable_socket_all,
60021+ .maxlen = sizeof(int),
60022+ .mode = 0600,
60023+ .proc_handler = &proc_dointvec,
60024+ },
60025+ {
60026+ .procname = "socket_all_gid",
60027+ .data = &grsec_socket_all_gid,
60028+ .maxlen = sizeof(int),
60029+ .mode = 0600,
60030+ .proc_handler = &proc_dointvec,
60031+ },
60032+#endif
60033+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
60034+ {
60035+ .procname = "socket_client",
60036+ .data = &grsec_enable_socket_client,
60037+ .maxlen = sizeof(int),
60038+ .mode = 0600,
60039+ .proc_handler = &proc_dointvec,
60040+ },
60041+ {
60042+ .procname = "socket_client_gid",
60043+ .data = &grsec_socket_client_gid,
60044+ .maxlen = sizeof(int),
60045+ .mode = 0600,
60046+ .proc_handler = &proc_dointvec,
60047+ },
60048+#endif
60049+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
60050+ {
60051+ .procname = "socket_server",
60052+ .data = &grsec_enable_socket_server,
60053+ .maxlen = sizeof(int),
60054+ .mode = 0600,
60055+ .proc_handler = &proc_dointvec,
60056+ },
60057+ {
60058+ .procname = "socket_server_gid",
60059+ .data = &grsec_socket_server_gid,
60060+ .maxlen = sizeof(int),
60061+ .mode = 0600,
60062+ .proc_handler = &proc_dointvec,
60063+ },
60064+#endif
60065+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
60066+ {
60067+ .procname = "audit_group",
60068+ .data = &grsec_enable_group,
60069+ .maxlen = sizeof(int),
60070+ .mode = 0600,
60071+ .proc_handler = &proc_dointvec,
60072+ },
60073+ {
60074+ .procname = "audit_gid",
60075+ .data = &grsec_audit_gid,
60076+ .maxlen = sizeof(int),
60077+ .mode = 0600,
60078+ .proc_handler = &proc_dointvec,
60079+ },
60080+#endif
60081+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
60082+ {
60083+ .procname = "audit_chdir",
60084+ .data = &grsec_enable_chdir,
60085+ .maxlen = sizeof(int),
60086+ .mode = 0600,
60087+ .proc_handler = &proc_dointvec,
60088+ },
60089+#endif
60090+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
60091+ {
60092+ .procname = "audit_mount",
60093+ .data = &grsec_enable_mount,
60094+ .maxlen = sizeof(int),
60095+ .mode = 0600,
60096+ .proc_handler = &proc_dointvec,
60097+ },
60098+#endif
60099+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
60100+ {
60101+ .procname = "audit_textrel",
60102+ .data = &grsec_enable_audit_textrel,
60103+ .maxlen = sizeof(int),
60104+ .mode = 0600,
60105+ .proc_handler = &proc_dointvec,
60106+ },
60107+#endif
60108+#ifdef CONFIG_GRKERNSEC_DMESG
60109+ {
60110+ .procname = "dmesg",
60111+ .data = &grsec_enable_dmesg,
60112+ .maxlen = sizeof(int),
60113+ .mode = 0600,
60114+ .proc_handler = &proc_dointvec,
60115+ },
60116+#endif
60117+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
60118+ {
60119+ .procname = "chroot_findtask",
60120+ .data = &grsec_enable_chroot_findtask,
60121+ .maxlen = sizeof(int),
60122+ .mode = 0600,
60123+ .proc_handler = &proc_dointvec,
60124+ },
60125+#endif
60126+#ifdef CONFIG_GRKERNSEC_RESLOG
60127+ {
60128+ .procname = "resource_logging",
60129+ .data = &grsec_resource_logging,
60130+ .maxlen = sizeof(int),
60131+ .mode = 0600,
60132+ .proc_handler = &proc_dointvec,
60133+ },
60134+#endif
60135+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
60136+ {
60137+ .procname = "audit_ptrace",
60138+ .data = &grsec_enable_audit_ptrace,
60139+ .maxlen = sizeof(int),
60140+ .mode = 0600,
60141+ .proc_handler = &proc_dointvec,
60142+ },
60143+#endif
60144+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60145+ {
60146+ .procname = "harden_ptrace",
60147+ .data = &grsec_enable_harden_ptrace,
60148+ .maxlen = sizeof(int),
60149+ .mode = 0600,
60150+ .proc_handler = &proc_dointvec,
60151+ },
60152+#endif
60153+ {
60154+ .procname = "grsec_lock",
60155+ .data = &grsec_lock,
60156+ .maxlen = sizeof(int),
60157+ .mode = 0600,
60158+ .proc_handler = &proc_dointvec,
60159+ },
60160+#endif
60161+#ifdef CONFIG_GRKERNSEC_ROFS
60162+ {
60163+ .procname = "romount_protect",
60164+ .data = &grsec_enable_rofs,
60165+ .maxlen = sizeof(int),
60166+ .mode = 0600,
60167+ .proc_handler = &proc_dointvec_minmax,
60168+ .extra1 = &one,
60169+ .extra2 = &one,
60170+ },
60171+#endif
60172+ { }
60173+};
60174+#endif
60175diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
60176new file mode 100644
60177index 0000000..0dc13c3
60178--- /dev/null
60179+++ b/grsecurity/grsec_time.c
60180@@ -0,0 +1,16 @@
60181+#include <linux/kernel.h>
60182+#include <linux/sched.h>
60183+#include <linux/grinternal.h>
60184+#include <linux/module.h>
58c5fc13 60185+
fe2de317
MT
60186+void
60187+gr_log_timechange(void)
60188+{
60189+#ifdef CONFIG_GRKERNSEC_TIME
60190+ if (grsec_enable_time)
60191+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
60192+#endif
60193+ return;
60194+}
58c5fc13 60195+
fe2de317
MT
60196+EXPORT_SYMBOL(gr_log_timechange);
60197diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
60198new file mode 100644
4c928ab7 60199index 0000000..07e0dc0
fe2de317
MT
60200--- /dev/null
60201+++ b/grsecurity/grsec_tpe.c
4c928ab7 60202@@ -0,0 +1,73 @@
fe2de317
MT
60203+#include <linux/kernel.h>
60204+#include <linux/sched.h>
60205+#include <linux/file.h>
60206+#include <linux/fs.h>
60207+#include <linux/grinternal.h>
58c5fc13 60208+
fe2de317 60209+extern int gr_acl_tpe_check(void);
58c5fc13 60210+
fe2de317
MT
60211+int
60212+gr_tpe_allow(const struct file *file)
60213+{
60214+#ifdef CONFIG_GRKERNSEC
60215+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
60216+ const struct cred *cred = current_cred();
4c928ab7
MT
60217+ char *msg = NULL;
60218+ char *msg2 = NULL;
60219+
60220+ // never restrict root
60221+ if (!cred->uid)
60222+ return 1;
58c5fc13 60223+
4c928ab7 60224+ if (grsec_enable_tpe) {
fe2de317 60225+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
4c928ab7
MT
60226+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
60227+ msg = "not being in trusted group";
60228+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
60229+ msg = "being in untrusted group";
fe2de317 60230+#else
4c928ab7
MT
60231+ if (in_group_p(grsec_tpe_gid))
60232+ msg = "being in untrusted group";
fe2de317 60233+#endif
4c928ab7
MT
60234+ }
60235+ if (!msg && gr_acl_tpe_check())
60236+ msg = "being in untrusted role";
60237+
60238+ // not in any affected group/role
60239+ if (!msg)
60240+ goto next_check;
60241+
60242+ if (inode->i_uid)
60243+ msg2 = "file in non-root-owned directory";
60244+ else if (inode->i_mode & S_IWOTH)
60245+ msg2 = "file in world-writable directory";
60246+ else if (inode->i_mode & S_IWGRP)
60247+ msg2 = "file in group-writable directory";
60248+
60249+ if (msg && msg2) {
60250+ char fullmsg[70] = {0};
60251+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
60252+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
fe2de317
MT
60253+ return 0;
60254+ }
4c928ab7
MT
60255+ msg = NULL;
60256+next_check:
fe2de317 60257+#ifdef CONFIG_GRKERNSEC_TPE_ALL
4c928ab7
MT
60258+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
60259+ return 1;
60260+
60261+ if (inode->i_uid && (inode->i_uid != cred->uid))
60262+ msg = "directory not owned by user";
60263+ else if (inode->i_mode & S_IWOTH)
60264+ msg = "file in world-writable directory";
60265+ else if (inode->i_mode & S_IWGRP)
60266+ msg = "file in group-writable directory";
60267+
60268+ if (msg) {
60269+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
fe2de317
MT
60270+ return 0;
60271+ }
60272+#endif
60273+#endif
60274+ return 1;
60275+}
60276diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
60277new file mode 100644
60278index 0000000..9f7b1ac
60279--- /dev/null
60280+++ b/grsecurity/grsum.c
60281@@ -0,0 +1,61 @@
60282+#include <linux/err.h>
60283+#include <linux/kernel.h>
60284+#include <linux/sched.h>
60285+#include <linux/mm.h>
60286+#include <linux/scatterlist.h>
60287+#include <linux/crypto.h>
60288+#include <linux/gracl.h>
58c5fc13 60289+
58c5fc13 60290+
fe2de317
MT
60291+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
60292+#error "crypto and sha256 must be built into the kernel"
60293+#endif
58c5fc13 60294+
fe2de317
MT
60295+int
60296+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
60297+{
60298+ char *p;
60299+ struct crypto_hash *tfm;
60300+ struct hash_desc desc;
60301+ struct scatterlist sg;
60302+ unsigned char temp_sum[GR_SHA_LEN];
60303+ volatile int retval = 0;
60304+ volatile int dummy = 0;
60305+ unsigned int i;
57199397 60306+
fe2de317 60307+ sg_init_table(&sg, 1);
57199397 60308+
fe2de317
MT
60309+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
60310+ if (IS_ERR(tfm)) {
60311+ /* should never happen, since sha256 should be built in */
60312+ return 1;
60313+ }
57199397 60314+
fe2de317
MT
60315+ desc.tfm = tfm;
60316+ desc.flags = 0;
57199397 60317+
fe2de317 60318+ crypto_hash_init(&desc);
57199397 60319+
fe2de317
MT
60320+ p = salt;
60321+ sg_set_buf(&sg, p, GR_SALT_LEN);
60322+ crypto_hash_update(&desc, &sg, sg.length);
57199397 60323+
fe2de317
MT
60324+ p = entry->pw;
60325+ sg_set_buf(&sg, p, strlen(p));
60326+
60327+ crypto_hash_update(&desc, &sg, sg.length);
57199397 60328+
fe2de317 60329+ crypto_hash_final(&desc, temp_sum);
57199397 60330+
fe2de317 60331+ memset(entry->pw, 0, GR_PW_LEN);
57199397 60332+
fe2de317
MT
60333+ for (i = 0; i < GR_SHA_LEN; i++)
60334+ if (sum[i] != temp_sum[i])
60335+ retval = 1;
60336+ else
60337+ dummy = 1; // waste a cycle
15a11c5b 60338+
fe2de317 60339+ crypto_free_hash(tfm);
57199397 60340+
fe2de317
MT
60341+ return retval;
60342+}
60343diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
60344index 6cd5b64..f620d2d 100644
60345--- a/include/acpi/acpi_bus.h
60346+++ b/include/acpi/acpi_bus.h
15a11c5b
MT
60347@@ -107,7 +107,7 @@ struct acpi_device_ops {
60348 acpi_op_bind bind;
60349 acpi_op_unbind unbind;
60350 acpi_op_notify notify;
60351-};
60352+} __no_const;
60353
60354 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
60355
fe2de317
MT
60356diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
60357index b7babf0..71e4e74 100644
60358--- a/include/asm-generic/atomic-long.h
60359+++ b/include/asm-generic/atomic-long.h
ae4e228f
MT
60360@@ -22,6 +22,12 @@
60361
60362 typedef atomic64_t atomic_long_t;
60363
60364+#ifdef CONFIG_PAX_REFCOUNT
60365+typedef atomic64_unchecked_t atomic_long_unchecked_t;
60366+#else
60367+typedef atomic64_t atomic_long_unchecked_t;
60368+#endif
58c5fc13 60369+
ae4e228f 60370 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
58c5fc13 60371
ae4e228f 60372 static inline long atomic_long_read(atomic_long_t *l)
fe2de317 60373@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
ae4e228f
MT
60374 return (long)atomic64_read(v);
60375 }
60376
60377+#ifdef CONFIG_PAX_REFCOUNT
60378+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
60379+{
60380+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58c5fc13 60381+
ae4e228f
MT
60382+ return (long)atomic64_read_unchecked(v);
60383+}
60384+#endif
60385+
60386 static inline void atomic_long_set(atomic_long_t *l, long i)
60387 {
60388 atomic64_t *v = (atomic64_t *)l;
fe2de317 60389@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
ae4e228f
MT
60390 atomic64_set(v, i);
60391 }
58c5fc13 60392
ae4e228f
MT
60393+#ifdef CONFIG_PAX_REFCOUNT
60394+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
60395+{
60396+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60397+
60398+ atomic64_set_unchecked(v, i);
60399+}
60400+#endif
60401+
60402 static inline void atomic_long_inc(atomic_long_t *l)
60403 {
60404 atomic64_t *v = (atomic64_t *)l;
fe2de317 60405@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
ae4e228f 60406 atomic64_inc(v);
58c5fc13
MT
60407 }
60408
ae4e228f
MT
60409+#ifdef CONFIG_PAX_REFCOUNT
60410+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
58c5fc13 60411+{
ae4e228f
MT
60412+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60413+
60414+ atomic64_inc_unchecked(v);
58c5fc13 60415+}
ae4e228f 60416+#endif
58c5fc13 60417+
ae4e228f 60418 static inline void atomic_long_dec(atomic_long_t *l)
58c5fc13 60419 {
ae4e228f 60420 atomic64_t *v = (atomic64_t *)l;
fe2de317 60421@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
df50ba0c
MT
60422 atomic64_dec(v);
60423 }
60424
60425+#ifdef CONFIG_PAX_REFCOUNT
60426+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
60427+{
60428+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60429+
60430+ atomic64_dec_unchecked(v);
60431+}
60432+#endif
60433+
60434 static inline void atomic_long_add(long i, atomic_long_t *l)
60435 {
60436 atomic64_t *v = (atomic64_t *)l;
fe2de317 60437@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
ae4e228f 60438 atomic64_add(i, v);
58c5fc13
MT
60439 }
60440
ae4e228f
MT
60441+#ifdef CONFIG_PAX_REFCOUNT
60442+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
58c5fc13 60443+{
ae4e228f
MT
60444+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60445+
60446+ atomic64_add_unchecked(i, v);
58c5fc13 60447+}
ae4e228f 60448+#endif
58c5fc13 60449+
ae4e228f 60450 static inline void atomic_long_sub(long i, atomic_long_t *l)
58c5fc13 60451 {
ae4e228f 60452 atomic64_t *v = (atomic64_t *)l;
fe2de317 60453@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
6892158b
MT
60454 atomic64_sub(i, v);
60455 }
60456
60457+#ifdef CONFIG_PAX_REFCOUNT
60458+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
60459+{
60460+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60461+
60462+ atomic64_sub_unchecked(i, v);
60463+}
60464+#endif
60465+
60466 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
60467 {
60468 atomic64_t *v = (atomic64_t *)l;
fe2de317 60469@@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
ae4e228f 60470 return (long)atomic64_inc_return(v);
58c5fc13
MT
60471 }
60472
ae4e228f
MT
60473+#ifdef CONFIG_PAX_REFCOUNT
60474+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
58c5fc13 60475+{
ae4e228f
MT
60476+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60477+
60478+ return (long)atomic64_inc_return_unchecked(v);
58c5fc13 60479+}
ae4e228f 60480+#endif
58c5fc13 60481+
ae4e228f
MT
60482 static inline long atomic_long_dec_return(atomic_long_t *l)
60483 {
60484 atomic64_t *v = (atomic64_t *)l;
fe2de317 60485@@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
ae4e228f
MT
60486
60487 typedef atomic_t atomic_long_t;
60488
60489+#ifdef CONFIG_PAX_REFCOUNT
60490+typedef atomic_unchecked_t atomic_long_unchecked_t;
60491+#else
60492+typedef atomic_t atomic_long_unchecked_t;
60493+#endif
60494+
60495 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
60496 static inline long atomic_long_read(atomic_long_t *l)
60497 {
fe2de317 60498@@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
ae4e228f
MT
60499 return (long)atomic_read(v);
60500 }
60501
60502+#ifdef CONFIG_PAX_REFCOUNT
60503+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
60504+{
60505+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60506+
60507+ return (long)atomic_read_unchecked(v);
60508+}
60509+#endif
60510+
60511 static inline void atomic_long_set(atomic_long_t *l, long i)
60512 {
60513 atomic_t *v = (atomic_t *)l;
fe2de317 60514@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
ae4e228f
MT
60515 atomic_set(v, i);
60516 }
60517
60518+#ifdef CONFIG_PAX_REFCOUNT
60519+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
60520+{
60521+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60522+
60523+ atomic_set_unchecked(v, i);
60524+}
60525+#endif
60526+
60527 static inline void atomic_long_inc(atomic_long_t *l)
60528 {
60529 atomic_t *v = (atomic_t *)l;
fe2de317 60530@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
ae4e228f
MT
60531 atomic_inc(v);
60532 }
60533
60534+#ifdef CONFIG_PAX_REFCOUNT
60535+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
60536+{
60537+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60538+
60539+ atomic_inc_unchecked(v);
60540+}
60541+#endif
60542+
60543 static inline void atomic_long_dec(atomic_long_t *l)
60544 {
60545 atomic_t *v = (atomic_t *)l;
fe2de317 60546@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
df50ba0c
MT
60547 atomic_dec(v);
60548 }
60549
60550+#ifdef CONFIG_PAX_REFCOUNT
60551+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
60552+{
60553+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60554+
60555+ atomic_dec_unchecked(v);
60556+}
60557+#endif
60558+
60559 static inline void atomic_long_add(long i, atomic_long_t *l)
60560 {
60561 atomic_t *v = (atomic_t *)l;
fe2de317 60562@@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
ae4e228f
MT
60563 atomic_add(i, v);
60564 }
60565
60566+#ifdef CONFIG_PAX_REFCOUNT
60567+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
60568+{
60569+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60570+
60571+ atomic_add_unchecked(i, v);
60572+}
60573+#endif
60574+
60575 static inline void atomic_long_sub(long i, atomic_long_t *l)
58c5fc13 60576 {
ae4e228f 60577 atomic_t *v = (atomic_t *)l;
fe2de317 60578@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
6892158b
MT
60579 atomic_sub(i, v);
60580 }
60581
60582+#ifdef CONFIG_PAX_REFCOUNT
60583+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
60584+{
60585+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60586+
60587+ atomic_sub_unchecked(i, v);
60588+}
60589+#endif
60590+
60591 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
60592 {
60593 atomic_t *v = (atomic_t *)l;
fe2de317 60594@@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
ae4e228f
MT
60595 return (long)atomic_inc_return(v);
60596 }
60597
60598+#ifdef CONFIG_PAX_REFCOUNT
60599+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
60600+{
60601+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60602+
60603+ return (long)atomic_inc_return_unchecked(v);
60604+}
60605+#endif
60606+
60607 static inline long atomic_long_dec_return(atomic_long_t *l)
60608 {
60609 atomic_t *v = (atomic_t *)l;
fe2de317 60610@@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
ae4e228f
MT
60611
60612 #endif /* BITS_PER_LONG == 64 */
60613
60614+#ifdef CONFIG_PAX_REFCOUNT
60615+static inline void pax_refcount_needs_these_functions(void)
60616+{
60617+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
60618+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
60619+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
60620+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
60621+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
15a11c5b 60622+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57199397 60623+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
6892158b 60624+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
8308f9c9
MT
60625+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
60626+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
15a11c5b 60627+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
ae4e228f
MT
60628+
60629+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
60630+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
60631+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
6892158b 60632+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
ae4e228f
MT
60633+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
60634+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
df50ba0c 60635+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
ae4e228f
MT
60636+}
60637+#else
60638+#define atomic_read_unchecked(v) atomic_read(v)
60639+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
60640+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
60641+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
60642+#define atomic_inc_unchecked(v) atomic_inc(v)
66a7e928 60643+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57199397 60644+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
6892158b 60645+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
8308f9c9
MT
60646+#define atomic_dec_unchecked(v) atomic_dec(v)
60647+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
60648+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
ae4e228f
MT
60649+
60650+#define atomic_long_read_unchecked(v) atomic_long_read(v)
60651+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
60652+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
6892158b 60653+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
ae4e228f
MT
60654+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
60655+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
df50ba0c 60656+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
ae4e228f
MT
60657+#endif
60658+
60659 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
fe2de317
MT
60660diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
60661index b18ce4f..2ee2843 100644
60662--- a/include/asm-generic/atomic64.h
60663+++ b/include/asm-generic/atomic64.h
60664@@ -16,6 +16,8 @@ typedef struct {
60665 long long counter;
60666 } atomic64_t;
60667
60668+typedef atomic64_t atomic64_unchecked_t;
60669+
60670 #define ATOMIC64_INIT(i) { (i) }
60671
60672 extern long long atomic64_read(const atomic64_t *v);
60673@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
60674 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
60675 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
60676
60677+#define atomic64_read_unchecked(v) atomic64_read(v)
60678+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
60679+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
60680+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
60681+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
60682+#define atomic64_inc_unchecked(v) atomic64_inc(v)
60683+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
60684+#define atomic64_dec_unchecked(v) atomic64_dec(v)
60685+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
60686+
60687 #endif /* _ASM_GENERIC_ATOMIC64_H */
60688diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
60689index 1bfcfe5..e04c5c9 100644
60690--- a/include/asm-generic/cache.h
60691+++ b/include/asm-generic/cache.h
8308f9c9
MT
60692@@ -6,7 +6,7 @@
60693 * cache lines need to provide their own cache.h.
60694 */
60695
60696-#define L1_CACHE_SHIFT 5
60697-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
15a11c5b
MT
60698+#define L1_CACHE_SHIFT 5UL
60699+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
8308f9c9
MT
60700
60701 #endif /* __ASM_GENERIC_CACHE_H */
4c928ab7
MT
60702diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
60703index 0d68a1e..b74a761 100644
60704--- a/include/asm-generic/emergency-restart.h
60705+++ b/include/asm-generic/emergency-restart.h
60706@@ -1,7 +1,7 @@
60707 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
60708 #define _ASM_GENERIC_EMERGENCY_RESTART_H
60709
60710-static inline void machine_emergency_restart(void)
60711+static inline __noreturn void machine_emergency_restart(void)
60712 {
60713 machine_restart(NULL);
60714 }
fe2de317
MT
60715diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
60716index 0232ccb..13d9165 100644
60717--- a/include/asm-generic/kmap_types.h
60718+++ b/include/asm-generic/kmap_types.h
57199397 60719@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
ae4e228f
MT
60720 KMAP_D(17) KM_NMI,
60721 KMAP_D(18) KM_NMI_PTE,
57199397
MT
60722 KMAP_D(19) KM_KDB,
60723+KMAP_D(20) KM_CLEARPAGE,
60724 /*
60725 * Remember to update debug_kmap_atomic() when adding new kmap types!
60726 */
60727-KMAP_D(20) KM_TYPE_NR
60728+KMAP_D(21) KM_TYPE_NR
58c5fc13
MT
60729 };
60730
ae4e228f 60731 #undef KMAP_D
4c928ab7
MT
60732diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
60733index 9ceb03b..2efbcbd 100644
60734--- a/include/asm-generic/local.h
60735+++ b/include/asm-generic/local.h
60736@@ -39,6 +39,7 @@ typedef struct
60737 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
60738 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
60739 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
60740+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
60741
60742 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
60743 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
fe2de317
MT
60744diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
60745index 725612b..9cc513a 100644
60746--- a/include/asm-generic/pgtable-nopmd.h
60747+++ b/include/asm-generic/pgtable-nopmd.h
57199397
MT
60748@@ -1,14 +1,19 @@
60749 #ifndef _PGTABLE_NOPMD_H
60750 #define _PGTABLE_NOPMD_H
60751
60752-#ifndef __ASSEMBLY__
60753-
60754 #include <asm-generic/pgtable-nopud.h>
60755
60756-struct mm_struct;
60757-
60758 #define __PAGETABLE_PMD_FOLDED
60759
60760+#define PMD_SHIFT PUD_SHIFT
60761+#define PTRS_PER_PMD 1
60762+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
60763+#define PMD_MASK (~(PMD_SIZE-1))
60764+
60765+#ifndef __ASSEMBLY__
60766+
60767+struct mm_struct;
60768+
60769 /*
60770 * Having the pmd type consist of a pud gets the size right, and allows
60771 * us to conceptually access the pud entry that this pmd is folded into
60772@@ -16,11 +21,6 @@ struct mm_struct;
60773 */
60774 typedef struct { pud_t pud; } pmd_t;
60775
60776-#define PMD_SHIFT PUD_SHIFT
60777-#define PTRS_PER_PMD 1
60778-#define PMD_SIZE (1UL << PMD_SHIFT)
60779-#define PMD_MASK (~(PMD_SIZE-1))
60780-
60781 /*
60782 * The "pud_xxx()" functions here are trivial for a folded two-level
60783 * setup: the pmd is never bad, and a pmd always exists (as it's folded
fe2de317
MT
60784diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
60785index 810431d..ccc3638 100644
60786--- a/include/asm-generic/pgtable-nopud.h
60787+++ b/include/asm-generic/pgtable-nopud.h
57199397
MT
60788@@ -1,10 +1,15 @@
60789 #ifndef _PGTABLE_NOPUD_H
60790 #define _PGTABLE_NOPUD_H
60791
60792-#ifndef __ASSEMBLY__
60793-
60794 #define __PAGETABLE_PUD_FOLDED
60795
60796+#define PUD_SHIFT PGDIR_SHIFT
60797+#define PTRS_PER_PUD 1
60798+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
60799+#define PUD_MASK (~(PUD_SIZE-1))
60800+
60801+#ifndef __ASSEMBLY__
60802+
60803 /*
60804 * Having the pud type consist of a pgd gets the size right, and allows
60805 * us to conceptually access the pgd entry that this pud is folded into
60806@@ -12,11 +17,6 @@
60807 */
60808 typedef struct { pgd_t pgd; } pud_t;
60809
60810-#define PUD_SHIFT PGDIR_SHIFT
60811-#define PTRS_PER_PUD 1
60812-#define PUD_SIZE (1UL << PUD_SHIFT)
60813-#define PUD_MASK (~(PUD_SIZE-1))
60814-
60815 /*
60816 * The "pgd_xxx()" functions here are trivial for a folded two-level
60817 * setup: the pud is never bad, and a pud always exists (as it's folded
fe2de317 60818diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
5e856224 60819index a03c098..7e5b223 100644
fe2de317
MT
60820--- a/include/asm-generic/pgtable.h
60821+++ b/include/asm-generic/pgtable.h
5e856224 60822@@ -502,6 +502,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
fe2de317 60823 #endif
5e856224 60824 }
fe2de317
MT
60825
60826+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
60827+static inline unsigned long pax_open_kernel(void) { return 0; }
60828+#endif
60829+
60830+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
60831+static inline unsigned long pax_close_kernel(void) { return 0; }
60832+#endif
60833+
5e856224 60834 #endif /* CONFIG_MMU */
fe2de317 60835
5e856224 60836 #endif /* !__ASSEMBLY__ */
4c928ab7 60837diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
5e856224 60838index 9788568..510dece 100644
4c928ab7
MT
60839--- a/include/asm-generic/uaccess.h
60840+++ b/include/asm-generic/uaccess.h
60841@@ -76,6 +76,8 @@ extern unsigned long search_exception_table(unsigned long);
60842 */
60843 #ifndef __copy_from_user
60844 static inline __must_check long __copy_from_user(void *to,
60845+ const void __user * from, unsigned long n) __size_overflow(3);
60846+static inline __must_check long __copy_from_user(void *to,
60847 const void __user * from, unsigned long n)
60848 {
60849 if (__builtin_constant_p(n)) {
60850@@ -106,6 +108,8 @@ static inline __must_check long __copy_from_user(void *to,
60851
60852 #ifndef __copy_to_user
60853 static inline __must_check long __copy_to_user(void __user *to,
60854+ const void *from, unsigned long n) __size_overflow(3);
60855+static inline __must_check long __copy_to_user(void __user *to,
60856 const void *from, unsigned long n)
60857 {
60858 if (__builtin_constant_p(n)) {
60859@@ -224,6 +228,7 @@ extern int __put_user_bad(void) __attribute__((noreturn));
60860 -EFAULT; \
60861 })
60862
60863+static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) __size_overflow(1);
60864 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
60865 {
60866 size = __copy_from_user(x, ptr, size);
60867@@ -240,6 +245,7 @@ extern int __get_user_bad(void) __attribute__((noreturn));
60868 #define __copy_to_user_inatomic __copy_to_user
60869 #endif
60870
60871+static inline long copy_from_user(void *to, const void __user * from, unsigned long n) __size_overflow(3);
60872 static inline long copy_from_user(void *to,
60873 const void __user * from, unsigned long n)
60874 {
60875@@ -250,6 +256,7 @@ static inline long copy_from_user(void *to,
60876 return n;
60877 }
60878
60879+static inline long copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
60880 static inline long copy_to_user(void __user *to,
60881 const void *from, unsigned long n)
60882 {
5e856224 60883@@ -314,6 +321,8 @@ static inline long strlen_user(const char __user *src)
4c928ab7
MT
60884 */
60885 #ifndef __clear_user
60886 static inline __must_check unsigned long
60887+__clear_user(void __user *to, unsigned long n) __size_overflow(2);
60888+static inline __must_check unsigned long
60889 __clear_user(void __user *to, unsigned long n)
60890 {
60891 memset((void __force *)to, 0, n);
5e856224 60892@@ -322,6 +331,8 @@ __clear_user(void __user *to, unsigned long n)
4c928ab7
MT
60893 #endif
60894
60895 static inline __must_check unsigned long
60896+clear_user(void __user *to, unsigned long n) __size_overflow(2);
60897+static inline __must_check unsigned long
60898 clear_user(void __user *to, unsigned long n)
60899 {
60900 might_sleep();
fe2de317 60901diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
4c928ab7 60902index b5e2e4c..6a5373e 100644
fe2de317
MT
60903--- a/include/asm-generic/vmlinux.lds.h
60904+++ b/include/asm-generic/vmlinux.lds.h
15a11c5b 60905@@ -217,6 +217,7 @@
58c5fc13
MT
60906 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
60907 VMLINUX_SYMBOL(__start_rodata) = .; \
60908 *(.rodata) *(.rodata.*) \
57199397 60909+ *(.data..read_only) \
58c5fc13 60910 *(__vermagic) /* Kernel version magic */ \
16454cff
MT
60911 . = ALIGN(8); \
60912 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
4c928ab7 60913@@ -722,17 +723,18 @@
58c5fc13
MT
60914 * section in the linker script will go there too. @phdr should have
60915 * a leading colon.
60916 *
60917- * Note that this macros defines __per_cpu_load as an absolute symbol.
60918+ * Note that this macros defines per_cpu_load as an absolute symbol.
60919 * If there is no need to put the percpu section at a predetermined
15a11c5b 60920 * address, use PERCPU_SECTION.
58c5fc13 60921 */
66a7e928 60922 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
58c5fc13 60923- VMLINUX_SYMBOL(__per_cpu_load) = .; \
57199397 60924- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
58c5fc13 60925+ per_cpu_load = .; \
57199397 60926+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
58c5fc13
MT
60927 - LOAD_OFFSET) { \
60928+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
15a11c5b 60929 PERCPU_INPUT(cacheline) \
58c5fc13 60930 } phdr \
57199397
MT
60931- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
60932+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
58c5fc13
MT
60933
60934 /**
15a11c5b 60935 * PERCPU_SECTION - define output section for percpu area, simple version
fe2de317 60936diff --git a/include/drm/drmP.h b/include/drm/drmP.h
5e856224 60937index 92f0981..d44a37c 100644
fe2de317
MT
60938--- a/include/drm/drmP.h
60939+++ b/include/drm/drmP.h
4c928ab7 60940@@ -72,6 +72,7 @@
c52201e0
MT
60941 #include <linux/workqueue.h>
60942 #include <linux/poll.h>
60943 #include <asm/pgalloc.h>
60944+#include <asm/local.h>
60945 #include "drm.h"
60946
60947 #include <linux/idr.h>
4c928ab7 60948@@ -1038,7 +1039,7 @@ struct drm_device {
57199397
MT
60949
60950 /** \name Usage Counters */
60951 /*@{ */
60952- int open_count; /**< Outstanding files open */
c52201e0 60953+ local_t open_count; /**< Outstanding files open */
57199397
MT
60954 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
60955 atomic_t vma_count; /**< Outstanding vma areas open */
60956 int buf_use; /**< Buffers in use -- cannot alloc */
4c928ab7 60957@@ -1049,7 +1050,7 @@ struct drm_device {
57199397
MT
60958 /*@{ */
60959 unsigned long counters;
60960 enum drm_stat_type types[15];
60961- atomic_t counts[15];
60962+ atomic_unchecked_t counts[15];
60963 /*@} */
60964
60965 struct list_head filelist;
fe2de317 60966diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
5e856224 60967index 37515d1..34fa8b0 100644
fe2de317
MT
60968--- a/include/drm/drm_crtc_helper.h
60969+++ b/include/drm/drm_crtc_helper.h
60970@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
60971
60972 /* disable crtc when not in use - more explicit than dpms off */
60973 void (*disable)(struct drm_crtc *crtc);
60974-};
60975+} __no_const;
60976
60977 struct drm_encoder_helper_funcs {
60978 void (*dpms)(struct drm_encoder *encoder, int mode);
60979@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
60980 struct drm_connector *connector);
60981 /* disable encoder when not in use - more explicit than dpms off */
60982 void (*disable)(struct drm_encoder *encoder);
60983-};
60984+} __no_const;
60985
60986 struct drm_connector_helper_funcs {
60987 int (*get_modes)(struct drm_connector *connector);
60988diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
60989index 26c1f78..6722682 100644
60990--- a/include/drm/ttm/ttm_memory.h
60991+++ b/include/drm/ttm/ttm_memory.h
15a11c5b
MT
60992@@ -47,7 +47,7 @@
60993
60994 struct ttm_mem_shrink {
60995 int (*do_shrink) (struct ttm_mem_shrink *);
60996-};
60997+} __no_const;
60998
60999 /**
61000 * struct ttm_mem_global - Global memory accounting structure.
fe2de317
MT
61001diff --git a/include/linux/a.out.h b/include/linux/a.out.h
61002index e86dfca..40cc55f 100644
61003--- a/include/linux/a.out.h
61004+++ b/include/linux/a.out.h
58c5fc13
MT
61005@@ -39,6 +39,14 @@ enum machine_type {
61006 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
61007 };
61008
61009+/* Constants for the N_FLAGS field */
61010+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
61011+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
61012+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
61013+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
61014+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
61015+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
61016+
61017 #if !defined (N_MAGIC)
61018 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
61019 #endif
fe2de317 61020diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
5e856224 61021index f4ff882..84b53a6 100644
fe2de317
MT
61022--- a/include/linux/atmdev.h
61023+++ b/include/linux/atmdev.h
58c5fc13
MT
61024@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
61025 #endif
61026
61027 struct k_atm_aal_stats {
61028-#define __HANDLE_ITEM(i) atomic_t i
61029+#define __HANDLE_ITEM(i) atomic_unchecked_t i
61030 __AAL_STAT_ITEMS
61031 #undef __HANDLE_ITEM
61032 };
fe2de317 61033diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
5e856224 61034index 0092102..8a801b4 100644
fe2de317
MT
61035--- a/include/linux/binfmts.h
61036+++ b/include/linux/binfmts.h
5e856224 61037@@ -89,6 +89,7 @@ struct linux_binfmt {
58c5fc13
MT
61038 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
61039 int (*load_shlib)(struct file *);
ae4e228f 61040 int (*core_dump)(struct coredump_params *cprm);
58c5fc13
MT
61041+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
61042 unsigned long min_coredump; /* minimal dump size */
58c5fc13 61043 };
16454cff 61044
fe2de317 61045diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
5e856224 61046index 606cf33..b72c577 100644
fe2de317
MT
61047--- a/include/linux/blkdev.h
61048+++ b/include/linux/blkdev.h
5e856224 61049@@ -1379,7 +1379,7 @@ struct block_device_operations {
57199397 61050 /* this callback is with swap_lock and sometimes page table lock held */
15a11c5b
MT
61051 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
61052 struct module *owner;
61053-};
61054+} __do_const;
ae4e228f
MT
61055
61056 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
15a11c5b 61057 unsigned long);
fe2de317 61058diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
4c928ab7 61059index 4d1a074..88f929a 100644
fe2de317
MT
61060--- a/include/linux/blktrace_api.h
61061+++ b/include/linux/blktrace_api.h
6e9df6a3 61062@@ -162,7 +162,7 @@ struct blk_trace {
8308f9c9
MT
61063 struct dentry *dir;
61064 struct dentry *dropped_file;
61065 struct dentry *msg_file;
61066- atomic_t dropped;
61067+ atomic_unchecked_t dropped;
61068 };
61069
61070 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
fe2de317
MT
61071diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
61072index 83195fb..0b0f77d 100644
61073--- a/include/linux/byteorder/little_endian.h
61074+++ b/include/linux/byteorder/little_endian.h
bc901d79
MT
61075@@ -42,51 +42,51 @@
61076
61077 static inline __le64 __cpu_to_le64p(const __u64 *p)
61078 {
61079- return (__force __le64)*p;
61080+ return (__force const __le64)*p;
61081 }
61082 static inline __u64 __le64_to_cpup(const __le64 *p)
61083 {
61084- return (__force __u64)*p;
61085+ return (__force const __u64)*p;
61086 }
61087 static inline __le32 __cpu_to_le32p(const __u32 *p)
61088 {
61089- return (__force __le32)*p;
61090+ return (__force const __le32)*p;
61091 }
61092 static inline __u32 __le32_to_cpup(const __le32 *p)
61093 {
61094- return (__force __u32)*p;
61095+ return (__force const __u32)*p;
61096 }
61097 static inline __le16 __cpu_to_le16p(const __u16 *p)
61098 {
61099- return (__force __le16)*p;
61100+ return (__force const __le16)*p;
61101 }
61102 static inline __u16 __le16_to_cpup(const __le16 *p)
61103 {
61104- return (__force __u16)*p;
61105+ return (__force const __u16)*p;
61106 }
61107 static inline __be64 __cpu_to_be64p(const __u64 *p)
61108 {
61109- return (__force __be64)__swab64p(p);
61110+ return (__force const __be64)__swab64p(p);
61111 }
61112 static inline __u64 __be64_to_cpup(const __be64 *p)
61113 {
61114- return __swab64p((__u64 *)p);
61115+ return __swab64p((const __u64 *)p);
61116 }
61117 static inline __be32 __cpu_to_be32p(const __u32 *p)
61118 {
61119- return (__force __be32)__swab32p(p);
61120+ return (__force const __be32)__swab32p(p);
61121 }
61122 static inline __u32 __be32_to_cpup(const __be32 *p)
61123 {
61124- return __swab32p((__u32 *)p);
61125+ return __swab32p((const __u32 *)p);
61126 }
61127 static inline __be16 __cpu_to_be16p(const __u16 *p)
61128 {
61129- return (__force __be16)__swab16p(p);
61130+ return (__force const __be16)__swab16p(p);
61131 }
61132 static inline __u16 __be16_to_cpup(const __be16 *p)
61133 {
61134- return __swab16p((__u16 *)p);
61135+ return __swab16p((const __u16 *)p);
61136 }
61137 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
61138 #define __le64_to_cpus(x) do { (void)(x); } while (0)
fe2de317
MT
61139diff --git a/include/linux/cache.h b/include/linux/cache.h
61140index 4c57065..4307975 100644
61141--- a/include/linux/cache.h
61142+++ b/include/linux/cache.h
58c5fc13
MT
61143@@ -16,6 +16,10 @@
61144 #define __read_mostly
61145 #endif
61146
61147+#ifndef __read_only
61148+#define __read_only __read_mostly
61149+#endif
61150+
61151 #ifndef ____cacheline_aligned
61152 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
61153 #endif
fe2de317 61154diff --git a/include/linux/capability.h b/include/linux/capability.h
5e856224 61155index 12d52de..b5f7fa7 100644
fe2de317
MT
61156--- a/include/linux/capability.h
61157+++ b/include/linux/capability.h
5e856224
MT
61158@@ -548,6 +548,8 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
61159 extern bool capable(int cap);
66a7e928 61160 extern bool ns_capable(struct user_namespace *ns, int cap);
66a7e928 61161 extern bool nsown_capable(int cap);
66a7e928 61162+extern bool capable_nolog(int cap);
5e856224 61163+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
58c5fc13
MT
61164
61165 /* audit system wants to get cap info from files as well */
66a7e928 61166 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
fe2de317
MT
61167diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
61168index 04ffb2e..6799180 100644
61169--- a/include/linux/cleancache.h
61170+++ b/include/linux/cleancache.h
15a11c5b
MT
61171@@ -31,7 +31,7 @@ struct cleancache_ops {
61172 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
61173 void (*flush_inode)(int, struct cleancache_filekey);
61174 void (*flush_fs)(int);
61175-};
61176+} __no_const;
61177
61178 extern struct cleancache_ops
61179 cleancache_register_ops(struct cleancache_ops *ops);
fe2de317 61180diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
5e856224 61181index 2f40791..567b215 100644
fe2de317
MT
61182--- a/include/linux/compiler-gcc4.h
61183+++ b/include/linux/compiler-gcc4.h
5e856224
MT
61184@@ -32,6 +32,15 @@
61185 #define __linktime_error(message) __attribute__((__error__(message)))
15a11c5b
MT
61186
61187 #if __GNUC_MINOR__ >= 5
61188+
61189+#ifdef CONSTIFY_PLUGIN
61190+#define __no_const __attribute__((no_const))
61191+#define __do_const __attribute__((do_const))
61192+#endif
61193+
4c928ab7
MT
61194+#ifdef SIZE_OVERFLOW_PLUGIN
61195+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
61196+#endif
15a11c5b
MT
61197 /*
61198 * Mark a position in code as unreachable. This can be used to
61199 * suppress control flow warnings after asm blocks that transfer
5e856224 61200@@ -47,6 +56,11 @@
66a7e928 61201 #define __noclone __attribute__((__noclone__))
57199397 61202
ae4e228f 61203 #endif
66a7e928 61204+
58c5fc13
MT
61205+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
61206+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
61207+#define __bos0(ptr) __bos((ptr), 0)
61208+#define __bos1(ptr) __bos((ptr), 1)
61209 #endif
ae4e228f
MT
61210
61211 #if __GNUC_MINOR__ > 0
fe2de317 61212diff --git a/include/linux/compiler.h b/include/linux/compiler.h
5e856224 61213index 4a24354..ecaff7a 100644
fe2de317
MT
61214--- a/include/linux/compiler.h
61215+++ b/include/linux/compiler.h
6e9df6a3
MT
61216@@ -5,31 +5,62 @@
61217
61218 #ifdef __CHECKER__
61219 # define __user __attribute__((noderef, address_space(1)))
61220+# define __force_user __force __user
61221 # define __kernel __attribute__((address_space(0)))
61222+# define __force_kernel __force __kernel
61223 # define __safe __attribute__((safe))
61224 # define __force __attribute__((force))
61225 # define __nocast __attribute__((nocast))
61226 # define __iomem __attribute__((noderef, address_space(2)))
61227+# define __force_iomem __force __iomem
61228 # define __acquires(x) __attribute__((context(x,0,1)))
61229 # define __releases(x) __attribute__((context(x,1,0)))
61230 # define __acquire(x) __context__(x,1)
61231 # define __release(x) __context__(x,-1)
61232 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
61233 # define __percpu __attribute__((noderef, address_space(3)))
61234+# define __force_percpu __force __percpu
61235 #ifdef CONFIG_SPARSE_RCU_POINTER
61236 # define __rcu __attribute__((noderef, address_space(4)))
61237+# define __force_rcu __force __rcu
61238 #else
61239 # define __rcu
61240+# define __force_rcu
61241 #endif
61242 extern void __chk_user_ptr(const volatile void __user *);
61243 extern void __chk_io_ptr(const volatile void __iomem *);
61244+#elif defined(CHECKER_PLUGIN)
61245+//# define __user
61246+//# define __force_user
61247+//# define __kernel
61248+//# define __force_kernel
61249+# define __safe
61250+# define __force
61251+# define __nocast
61252+# define __iomem
61253+# define __force_iomem
61254+# define __chk_user_ptr(x) (void)0
61255+# define __chk_io_ptr(x) (void)0
61256+# define __builtin_warning(x, y...) (1)
61257+# define __acquires(x)
61258+# define __releases(x)
61259+# define __acquire(x) (void)0
61260+# define __release(x) (void)0
61261+# define __cond_lock(x,c) (c)
61262+# define __percpu
61263+# define __force_percpu
61264+# define __rcu
61265+# define __force_rcu
61266 #else
61267 # define __user
61268+# define __force_user
61269 # define __kernel
61270+# define __force_kernel
61271 # define __safe
61272 # define __force
61273 # define __nocast
61274 # define __iomem
61275+# define __force_iomem
61276 # define __chk_user_ptr(x) (void)0
61277 # define __chk_io_ptr(x) (void)0
61278 # define __builtin_warning(x, y...) (1)
fe2de317 61279@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
6e9df6a3
MT
61280 # define __release(x) (void)0
61281 # define __cond_lock(x,c) (c)
61282 # define __percpu
61283+# define __force_percpu
61284 # define __rcu
61285+# define __force_rcu
61286 #endif
61287
61288 #ifdef __KERNEL__
4c928ab7 61289@@ -264,6 +297,17 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
15a11c5b
MT
61290 # define __attribute_const__ /* unimplemented */
61291 #endif
61292
61293+#ifndef __no_const
61294+# define __no_const
61295+#endif
61296+
61297+#ifndef __do_const
61298+# define __do_const
61299+#endif
61300+
4c928ab7
MT
61301+#ifndef __size_overflow
61302+# define __size_overflow(...)
61303+#endif
15a11c5b
MT
61304 /*
61305 * Tell gcc if a function is cold. The compiler will assume any path
61306 * directly leading to the call is unlikely.
4c928ab7 61307@@ -273,6 +317,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58c5fc13
MT
61308 #define __cold
61309 #endif
61310
61311+#ifndef __alloc_size
15a11c5b 61312+#define __alloc_size(...)
58c5fc13
MT
61313+#endif
61314+
61315+#ifndef __bos
15a11c5b 61316+#define __bos(ptr, arg)
58c5fc13
MT
61317+#endif
61318+
61319+#ifndef __bos0
15a11c5b 61320+#define __bos0(ptr)
58c5fc13
MT
61321+#endif
61322+
61323+#ifndef __bos1
15a11c5b 61324+#define __bos1(ptr)
58c5fc13
MT
61325+#endif
61326+
61327 /* Simple shorthand for a section definition */
61328 #ifndef __section
61329 # define __section(S) __attribute__ ((__section__(#S)))
5e856224 61330@@ -308,6 +368,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
bc901d79
MT
61331 * use is to mediate communication between process-level code and irq/NMI
61332 * handlers, all running on the same CPU.
61333 */
61334-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
61335+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
61336+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
61337
61338 #endif /* __LINUX_COMPILER_H */
fe2de317
MT
61339diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
61340index e9eaec5..bfeb9bb 100644
61341--- a/include/linux/cpuset.h
61342+++ b/include/linux/cpuset.h
61343@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
bc901d79
MT
61344 * nodemask.
61345 */
61346 smp_mb();
61347- --ACCESS_ONCE(current->mems_allowed_change_disable);
61348+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
61349 }
61350
61351 static inline void set_mems_allowed(nodemask_t nodemask)
4c928ab7
MT
61352diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
61353index b936763..48685ee 100644
61354--- a/include/linux/crash_dump.h
61355+++ b/include/linux/crash_dump.h
61356@@ -14,7 +14,7 @@ extern unsigned long long elfcorehdr_addr;
61357 extern unsigned long long elfcorehdr_size;
61358
61359 extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
61360- unsigned long, int);
61361+ unsigned long, int) __size_overflow(3);
61362
61363 /* Architecture code defines this if there are other possible ELF
61364 * machine types, e.g. on bi-arch capable hardware. */
61365diff --git a/include/linux/cred.h b/include/linux/cred.h
5e856224 61366index adadf71..6af5560 100644
4c928ab7
MT
61367--- a/include/linux/cred.h
61368+++ b/include/linux/cred.h
61369@@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
61370 static inline void validate_process_creds(void)
61371 {
61372 }
61373+static inline void validate_task_creds(struct task_struct *task)
61374+{
61375+}
61376 #endif
61377
61378 /**
fe2de317 61379diff --git a/include/linux/crypto.h b/include/linux/crypto.h
4c928ab7 61380index 8a94217..15d49e3 100644
fe2de317
MT
61381--- a/include/linux/crypto.h
61382+++ b/include/linux/crypto.h
4c928ab7 61383@@ -365,7 +365,7 @@ struct cipher_tfm {
15a11c5b
MT
61384 const u8 *key, unsigned int keylen);
61385 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
61386 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
61387-};
61388+} __no_const;
61389
61390 struct hash_tfm {
61391 int (*init)(struct hash_desc *desc);
4c928ab7 61392@@ -386,13 +386,13 @@ struct compress_tfm {
15a11c5b
MT
61393 int (*cot_decompress)(struct crypto_tfm *tfm,
61394 const u8 *src, unsigned int slen,
61395 u8 *dst, unsigned int *dlen);
61396-};
61397+} __no_const;
61398
61399 struct rng_tfm {
61400 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
61401 unsigned int dlen);
61402 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
61403-};
61404+} __no_const;
61405
61406 #define crt_ablkcipher crt_u.ablkcipher
61407 #define crt_aead crt_u.aead
fe2de317
MT
61408diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
61409index 7925bf0..d5143d2 100644
61410--- a/include/linux/decompress/mm.h
61411+++ b/include/linux/decompress/mm.h
16454cff 61412@@ -77,7 +77,7 @@ static void free(void *where)
58c5fc13
MT
61413 * warnings when not needed (indeed large_malloc / large_free are not
61414 * needed by inflate */
61415
61416-#define malloc(a) kmalloc(a, GFP_KERNEL)
61417+#define malloc(a) kmalloc((a), GFP_KERNEL)
61418 #define free(a) kfree(a)
61419
61420 #define large_malloc(a) vmalloc(a)
fe2de317 61421diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
4c928ab7 61422index e13117c..e9fc938 100644
fe2de317
MT
61423--- a/include/linux/dma-mapping.h
61424+++ b/include/linux/dma-mapping.h
4c928ab7
MT
61425@@ -46,7 +46,7 @@ struct dma_map_ops {
61426 u64 (*get_required_mask)(struct device *dev);
61427 #endif
15a11c5b
MT
61428 int is_phys;
61429-};
61430+} __do_const;
ae4e228f
MT
61431
61432 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
15a11c5b 61433
fe2de317 61434diff --git a/include/linux/efi.h b/include/linux/efi.h
5e856224 61435index 7cce0ea..c2085e4 100644
fe2de317
MT
61436--- a/include/linux/efi.h
61437+++ b/include/linux/efi.h
5e856224 61438@@ -591,7 +591,7 @@ struct efivar_operations {
15a11c5b
MT
61439 efi_get_variable_t *get_variable;
61440 efi_get_next_variable_t *get_next_variable;
61441 efi_set_variable_t *set_variable;
61442-};
61443+} __no_const;
61444
61445 struct efivars {
61446 /*
fe2de317 61447diff --git a/include/linux/elf.h b/include/linux/elf.h
5e856224 61448index 999b4f5..57753b4 100644
fe2de317
MT
61449--- a/include/linux/elf.h
61450+++ b/include/linux/elf.h
5e856224 61451@@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
58c5fc13
MT
61452 #define PT_GNU_EH_FRAME 0x6474e550
61453
61454 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
61455+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
61456+
61457+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
61458+
61459+/* Constants for the e_flags field */
61460+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
61461+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
61462+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
61463+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
61464+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
61465+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
61466
df50ba0c
MT
61467 /*
61468 * Extended Numbering
5e856224 61469@@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
58c5fc13
MT
61470 #define DT_DEBUG 21
61471 #define DT_TEXTREL 22
61472 #define DT_JMPREL 23
61473+#define DT_FLAGS 30
61474+ #define DF_TEXTREL 0x00000004
61475 #define DT_ENCODING 32
61476 #define OLD_DT_LOOS 0x60000000
61477 #define DT_LOOS 0x6000000d
5e856224 61478@@ -243,6 +256,19 @@ typedef struct elf64_hdr {
58c5fc13
MT
61479 #define PF_W 0x2
61480 #define PF_X 0x1
61481
61482+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
61483+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
61484+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
61485+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
61486+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
61487+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
61488+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
61489+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
61490+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
61491+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
61492+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
61493+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
61494+
61495 typedef struct elf32_phdr{
61496 Elf32_Word p_type;
61497 Elf32_Off p_offset;
5e856224 61498@@ -335,6 +361,8 @@ typedef struct elf64_shdr {
58c5fc13
MT
61499 #define EI_OSABI 7
61500 #define EI_PAD 8
61501
61502+#define EI_PAX 14
61503+
61504 #define ELFMAG0 0x7f /* EI_MAG */
61505 #define ELFMAG1 'E'
61506 #define ELFMAG2 'L'
5e856224 61507@@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
58c5fc13
MT
61508 #define elf_note elf32_note
61509 #define elf_addr_t Elf32_Off
df50ba0c 61510 #define Elf_Half Elf32_Half
58c5fc13
MT
61511+#define elf_dyn Elf32_Dyn
61512
61513 #else
61514
5e856224 61515@@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
58c5fc13
MT
61516 #define elf_note elf64_note
61517 #define elf_addr_t Elf64_Off
df50ba0c 61518 #define Elf_Half Elf64_Half
58c5fc13
MT
61519+#define elf_dyn Elf64_Dyn
61520
61521 #endif
61522
fe2de317 61523diff --git a/include/linux/filter.h b/include/linux/filter.h
4c928ab7 61524index 8eeb205..d59bfa2 100644
fe2de317
MT
61525--- a/include/linux/filter.h
61526+++ b/include/linux/filter.h
61527@@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
883a9837
MT
61528
61529 struct sk_buff;
61530 struct sock;
61531+struct bpf_jit_work;
61532
61533 struct sk_filter
61534 {
61535@@ -141,6 +142,9 @@ struct sk_filter
61536 unsigned int len; /* Number of filter blocks */
61537 unsigned int (*bpf_func)(const struct sk_buff *skb,
61538 const struct sock_filter *filter);
61539+#ifdef CONFIG_BPF_JIT
61540+ struct bpf_jit_work *work;
61541+#endif
61542 struct rcu_head rcu;
61543 struct sock_filter insns[0];
61544 };
fe2de317
MT
61545diff --git a/include/linux/firewire.h b/include/linux/firewire.h
61546index 84ccf8e..2e9b14c 100644
61547--- a/include/linux/firewire.h
61548+++ b/include/linux/firewire.h
15a11c5b
MT
61549@@ -428,7 +428,7 @@ struct fw_iso_context {
61550 union {
61551 fw_iso_callback_t sc;
61552 fw_iso_mc_callback_t mc;
61553- } callback;
61554+ } __no_const callback;
61555 void *callback_data;
66a7e928 61556 };
15a11c5b 61557
fe2de317 61558diff --git a/include/linux/fs.h b/include/linux/fs.h
5e856224 61559index f4b6e06..d6ba573 100644
fe2de317
MT
61560--- a/include/linux/fs.h
61561+++ b/include/linux/fs.h
5e856224 61562@@ -1628,7 +1628,8 @@ struct file_operations {
fe2de317
MT
61563 int (*setlease)(struct file *, long, struct file_lock **);
61564 long (*fallocate)(struct file *file, int mode, loff_t offset,
61565 loff_t len);
61566-};
61567+} __do_const;
61568+typedef struct file_operations __no_const file_operations_no_const;
61569
61570 struct inode_operations {
61571 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
61572diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
61573index 003dc0f..3c4ea97 100644
61574--- a/include/linux/fs_struct.h
61575+++ b/include/linux/fs_struct.h
61576@@ -6,7 +6,7 @@
61577 #include <linux/seqlock.h>
61578
61579 struct fs_struct {
61580- int users;
61581+ atomic_t users;
61582 spinlock_t lock;
61583 seqcount_t seq;
61584 int umask;
61585diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
4c928ab7 61586index ce31408..b1ad003 100644
fe2de317
MT
61587--- a/include/linux/fscache-cache.h
61588+++ b/include/linux/fscache-cache.h
15a11c5b
MT
61589@@ -102,7 +102,7 @@ struct fscache_operation {
61590 fscache_operation_release_t release;
8308f9c9
MT
61591 };
61592
61593-extern atomic_t fscache_op_debug_id;
61594+extern atomic_unchecked_t fscache_op_debug_id;
61595 extern void fscache_op_work_func(struct work_struct *work);
61596
61597 extern void fscache_enqueue_operation(struct fscache_operation *);
fe2de317 61598@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
8308f9c9
MT
61599 {
61600 INIT_WORK(&op->work, fscache_op_work_func);
61601 atomic_set(&op->usage, 1);
61602- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
61603+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
61604 op->processor = processor;
61605 op->release = release;
61606 INIT_LIST_HEAD(&op->pend_link);
fe2de317
MT
61607diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
61608index 2a53f10..0187fdf 100644
61609--- a/include/linux/fsnotify.h
61610+++ b/include/linux/fsnotify.h
61611@@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
15a11c5b
MT
61612 */
61613 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
61614 {
61615- return kstrdup(name, GFP_KERNEL);
61616+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
61617 }
ae4e228f
MT
61618
61619 /*
4c928ab7
MT
61620diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
61621index 91d0e0a3..035666b 100644
61622--- a/include/linux/fsnotify_backend.h
61623+++ b/include/linux/fsnotify_backend.h
61624@@ -105,6 +105,7 @@ struct fsnotify_ops {
61625 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
61626 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
61627 };
61628+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
61629
61630 /*
61631 * A group is a "thing" that wants to receive notification about filesystem
fe2de317 61632diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
4c928ab7 61633index c3da42d..c70e0df 100644
fe2de317
MT
61634--- a/include/linux/ftrace_event.h
61635+++ b/include/linux/ftrace_event.h
6e9df6a3 61636@@ -97,7 +97,7 @@ struct trace_event_functions {
15a11c5b
MT
61637 trace_print_func raw;
61638 trace_print_func hex;
61639 trace_print_func binary;
61640-};
61641+} __no_const;
61642
61643 struct trace_event {
61644 struct hlist_node node;
4c928ab7 61645@@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
66a7e928
MT
61646 extern int trace_add_event_call(struct ftrace_event_call *call);
61647 extern void trace_remove_event_call(struct ftrace_event_call *call);
61648
61649-#define is_signed_type(type) (((type)(-1)) < 0)
61650+#define is_signed_type(type) (((type)(-1)) < (type)1)
61651
61652 int trace_set_clr_event(const char *system, const char *event, int set);
61653
fe2de317 61654diff --git a/include/linux/genhd.h b/include/linux/genhd.h
5e856224 61655index e61d319..0da8505 100644
fe2de317
MT
61656--- a/include/linux/genhd.h
61657+++ b/include/linux/genhd.h
4c928ab7 61658@@ -185,7 +185,7 @@ struct gendisk {
16454cff 61659 struct kobject *slave_dir;
58c5fc13
MT
61660
61661 struct timer_rand_state *random;
58c5fc13
MT
61662- atomic_t sync_io; /* RAID */
61663+ atomic_unchecked_t sync_io; /* RAID */
16454cff 61664 struct disk_events *ev;
58c5fc13
MT
61665 #ifdef CONFIG_BLK_DEV_INTEGRITY
61666 struct blk_integrity *integrity;
fe2de317
MT
61667diff --git a/include/linux/gracl.h b/include/linux/gracl.h
61668new file mode 100644
4c928ab7 61669index 0000000..8a130b6
fe2de317
MT
61670--- /dev/null
61671+++ b/include/linux/gracl.h
4c928ab7 61672@@ -0,0 +1,319 @@
58c5fc13
MT
61673+#ifndef GR_ACL_H
61674+#define GR_ACL_H
61675+
61676+#include <linux/grdefs.h>
61677+#include <linux/resource.h>
61678+#include <linux/capability.h>
61679+#include <linux/dcache.h>
61680+#include <asm/resource.h>
61681+
61682+/* Major status information */
61683+
4c928ab7
MT
61684+#define GR_VERSION "grsecurity 2.9"
61685+#define GRSECURITY_VERSION 0x2900
58c5fc13
MT
61686+
61687+enum {
61688+ GR_SHUTDOWN = 0,
61689+ GR_ENABLE = 1,
61690+ GR_SPROLE = 2,
61691+ GR_RELOAD = 3,
61692+ GR_SEGVMOD = 4,
61693+ GR_STATUS = 5,
61694+ GR_UNSPROLE = 6,
61695+ GR_PASSSET = 7,
61696+ GR_SPROLEPAM = 8,
61697+};
61698+
61699+/* Password setup definitions
61700+ * kernel/grhash.c */
61701+enum {
61702+ GR_PW_LEN = 128,
61703+ GR_SALT_LEN = 16,
61704+ GR_SHA_LEN = 32,
61705+};
61706+
61707+enum {
61708+ GR_SPROLE_LEN = 64,
61709+};
61710+
bc901d79
MT
61711+enum {
61712+ GR_NO_GLOB = 0,
61713+ GR_REG_GLOB,
61714+ GR_CREATE_GLOB
61715+};
61716+
58c5fc13
MT
61717+#define GR_NLIMITS 32
61718+
61719+/* Begin Data Structures */
61720+
61721+struct sprole_pw {
61722+ unsigned char *rolename;
61723+ unsigned char salt[GR_SALT_LEN];
61724+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
61725+};
61726+
61727+struct name_entry {
61728+ __u32 key;
61729+ ino_t inode;
61730+ dev_t device;
61731+ char *name;
61732+ __u16 len;
61733+ __u8 deleted;
61734+ struct name_entry *prev;
61735+ struct name_entry *next;
61736+};
61737+
61738+struct inodev_entry {
61739+ struct name_entry *nentry;
61740+ struct inodev_entry *prev;
61741+ struct inodev_entry *next;
61742+};
61743+
61744+struct acl_role_db {
61745+ struct acl_role_label **r_hash;
61746+ __u32 r_size;
61747+};
61748+
61749+struct inodev_db {
61750+ struct inodev_entry **i_hash;
61751+ __u32 i_size;
61752+};
61753+
61754+struct name_db {
61755+ struct name_entry **n_hash;
61756+ __u32 n_size;
61757+};
61758+
61759+struct crash_uid {
61760+ uid_t uid;
61761+ unsigned long expires;
61762+};
61763+
61764+struct gr_hash_struct {
61765+ void **table;
61766+ void **nametable;
61767+ void *first;
61768+ __u32 table_size;
61769+ __u32 used_size;
61770+ int type;
61771+};
61772+
61773+/* Userspace Grsecurity ACL data structures */
61774+
61775+struct acl_subject_label {
61776+ char *filename;
61777+ ino_t inode;
61778+ dev_t device;
61779+ __u32 mode;
61780+ kernel_cap_t cap_mask;
61781+ kernel_cap_t cap_lower;
df50ba0c 61782+ kernel_cap_t cap_invert_audit;
58c5fc13
MT
61783+
61784+ struct rlimit res[GR_NLIMITS];
61785+ __u32 resmask;
61786+
61787+ __u8 user_trans_type;
61788+ __u8 group_trans_type;
61789+ uid_t *user_transitions;
61790+ gid_t *group_transitions;
61791+ __u16 user_trans_num;
61792+ __u16 group_trans_num;
61793+
bc901d79 61794+ __u32 sock_families[2];
58c5fc13
MT
61795+ __u32 ip_proto[8];
61796+ __u32 ip_type;
61797+ struct acl_ip_label **ips;
61798+ __u32 ip_num;
61799+ __u32 inaddr_any_override;
61800+
61801+ __u32 crashes;
61802+ unsigned long expires;
61803+
61804+ struct acl_subject_label *parent_subject;
61805+ struct gr_hash_struct *hash;
61806+ struct acl_subject_label *prev;
61807+ struct acl_subject_label *next;
61808+
61809+ struct acl_object_label **obj_hash;
61810+ __u32 obj_hash_size;
61811+ __u16 pax_flags;
61812+};
61813+
61814+struct role_allowed_ip {
61815+ __u32 addr;
61816+ __u32 netmask;
61817+
61818+ struct role_allowed_ip *prev;
61819+ struct role_allowed_ip *next;
61820+};
61821+
61822+struct role_transition {
61823+ char *rolename;
61824+
61825+ struct role_transition *prev;
61826+ struct role_transition *next;
61827+};
61828+
61829+struct acl_role_label {
61830+ char *rolename;
61831+ uid_t uidgid;
61832+ __u16 roletype;
61833+
61834+ __u16 auth_attempts;
61835+ unsigned long expires;
61836+
61837+ struct acl_subject_label *root_label;
61838+ struct gr_hash_struct *hash;
61839+
61840+ struct acl_role_label *prev;
61841+ struct acl_role_label *next;
61842+
61843+ struct role_transition *transitions;
61844+ struct role_allowed_ip *allowed_ips;
61845+ uid_t *domain_children;
61846+ __u16 domain_child_num;
61847+
4c928ab7
MT
61848+ umode_t umask;
61849+
58c5fc13
MT
61850+ struct acl_subject_label **subj_hash;
61851+ __u32 subj_hash_size;
61852+};
61853+
61854+struct user_acl_role_db {
61855+ struct acl_role_label **r_table;
61856+ __u32 num_pointers; /* Number of allocations to track */
61857+ __u32 num_roles; /* Number of roles */
61858+ __u32 num_domain_children; /* Number of domain children */
61859+ __u32 num_subjects; /* Number of subjects */
61860+ __u32 num_objects; /* Number of objects */
61861+};
61862+
61863+struct acl_object_label {
61864+ char *filename;
61865+ ino_t inode;
61866+ dev_t device;
61867+ __u32 mode;
61868+
61869+ struct acl_subject_label *nested;
61870+ struct acl_object_label *globbed;
61871+
61872+ /* next two structures not used */
61873+
61874+ struct acl_object_label *prev;
61875+ struct acl_object_label *next;
61876+};
61877+
61878+struct acl_ip_label {
61879+ char *iface;
61880+ __u32 addr;
61881+ __u32 netmask;
61882+ __u16 low, high;
61883+ __u8 mode;
61884+ __u32 type;
61885+ __u32 proto[8];
61886+
61887+ /* next two structures not used */
61888+
61889+ struct acl_ip_label *prev;
61890+ struct acl_ip_label *next;
61891+};
61892+
61893+struct gr_arg {
61894+ struct user_acl_role_db role_db;
61895+ unsigned char pw[GR_PW_LEN];
61896+ unsigned char salt[GR_SALT_LEN];
61897+ unsigned char sum[GR_SHA_LEN];
61898+ unsigned char sp_role[GR_SPROLE_LEN];
61899+ struct sprole_pw *sprole_pws;
61900+ dev_t segv_device;
61901+ ino_t segv_inode;
61902+ uid_t segv_uid;
61903+ __u16 num_sprole_pws;
61904+ __u16 mode;
61905+};
61906+
61907+struct gr_arg_wrapper {
61908+ struct gr_arg *arg;
61909+ __u32 version;
61910+ __u32 size;
61911+};
61912+
61913+struct subject_map {
61914+ struct acl_subject_label *user;
61915+ struct acl_subject_label *kernel;
61916+ struct subject_map *prev;
61917+ struct subject_map *next;
61918+};
61919+
61920+struct acl_subj_map_db {
61921+ struct subject_map **s_hash;
61922+ __u32 s_size;
61923+};
61924+
61925+/* End Data Structures Section */
61926+
61927+/* Hash functions generated by empirical testing by Brad Spengler
61928+ Makes good use of the low bits of the inode. Generally 0-1 times
61929+ in loop for successful match. 0-3 for unsuccessful match.
61930+ Shift/add algorithm with modulus of table size and an XOR*/
61931+
61932+static __inline__ unsigned int
61933+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
61934+{
ae4e228f 61935+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
58c5fc13
MT
61936+}
61937+
61938+ static __inline__ unsigned int
61939+shash(const struct acl_subject_label *userp, const unsigned int sz)
61940+{
61941+ return ((const unsigned long)userp % sz);
61942+}
61943+
61944+static __inline__ unsigned int
61945+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
61946+{
61947+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
61948+}
61949+
61950+static __inline__ unsigned int
61951+nhash(const char *name, const __u16 len, const unsigned int sz)
61952+{
61953+ return full_name_hash((const unsigned char *)name, len) % sz;
61954+}
61955+
ae4e228f
MT
61956+#define FOR_EACH_ROLE_START(role) \
61957+ role = role_list; \
61958+ while (role) {
58c5fc13 61959+
ae4e228f
MT
61960+#define FOR_EACH_ROLE_END(role) \
61961+ role = role->prev; \
58c5fc13
MT
61962+ }
61963+
61964+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
61965+ subj = NULL; \
61966+ iter = 0; \
61967+ while (iter < role->subj_hash_size) { \
61968+ if (subj == NULL) \
61969+ subj = role->subj_hash[iter]; \
61970+ if (subj == NULL) { \
61971+ iter++; \
61972+ continue; \
61973+ }
61974+
61975+#define FOR_EACH_SUBJECT_END(subj,iter) \
61976+ subj = subj->next; \
61977+ if (subj == NULL) \
61978+ iter++; \
61979+ }
61980+
61981+
61982+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
61983+ subj = role->hash->first; \
61984+ while (subj != NULL) {
61985+
61986+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
61987+ subj = subj->next; \
61988+ }
61989+
61990+#endif
61991+
fe2de317
MT
61992diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
61993new file mode 100644
61994index 0000000..323ecf2
61995--- /dev/null
61996+++ b/include/linux/gralloc.h
58c5fc13
MT
61997@@ -0,0 +1,9 @@
61998+#ifndef __GRALLOC_H
61999+#define __GRALLOC_H
62000+
62001+void acl_free_all(void);
62002+int acl_alloc_stack_init(unsigned long size);
62003+void *acl_alloc(unsigned long len);
62004+void *acl_alloc_num(unsigned long num, unsigned long len);
62005+
62006+#endif
fe2de317
MT
62007diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
62008new file mode 100644
62009index 0000000..b30e9bc
62010--- /dev/null
62011+++ b/include/linux/grdefs.h
15a11c5b 62012@@ -0,0 +1,140 @@
58c5fc13
MT
62013+#ifndef GRDEFS_H
62014+#define GRDEFS_H
62015+
62016+/* Begin grsecurity status declarations */
62017+
62018+enum {
62019+ GR_READY = 0x01,
62020+ GR_STATUS_INIT = 0x00 // disabled state
62021+};
62022+
62023+/* Begin ACL declarations */
62024+
62025+/* Role flags */
62026+
62027+enum {
62028+ GR_ROLE_USER = 0x0001,
62029+ GR_ROLE_GROUP = 0x0002,
62030+ GR_ROLE_DEFAULT = 0x0004,
62031+ GR_ROLE_SPECIAL = 0x0008,
62032+ GR_ROLE_AUTH = 0x0010,
62033+ GR_ROLE_NOPW = 0x0020,
62034+ GR_ROLE_GOD = 0x0040,
62035+ GR_ROLE_LEARN = 0x0080,
62036+ GR_ROLE_TPE = 0x0100,
62037+ GR_ROLE_DOMAIN = 0x0200,
16454cff
MT
62038+ GR_ROLE_PAM = 0x0400,
62039+ GR_ROLE_PERSIST = 0x0800
58c5fc13
MT
62040+};
62041+
62042+/* ACL Subject and Object mode flags */
62043+enum {
62044+ GR_DELETED = 0x80000000
62045+};
62046+
62047+/* ACL Object-only mode flags */
62048+enum {
62049+ GR_READ = 0x00000001,
62050+ GR_APPEND = 0x00000002,
62051+ GR_WRITE = 0x00000004,
62052+ GR_EXEC = 0x00000008,
62053+ GR_FIND = 0x00000010,
62054+ GR_INHERIT = 0x00000020,
62055+ GR_SETID = 0x00000040,
62056+ GR_CREATE = 0x00000080,
62057+ GR_DELETE = 0x00000100,
62058+ GR_LINK = 0x00000200,
62059+ GR_AUDIT_READ = 0x00000400,
62060+ GR_AUDIT_APPEND = 0x00000800,
62061+ GR_AUDIT_WRITE = 0x00001000,
62062+ GR_AUDIT_EXEC = 0x00002000,
62063+ GR_AUDIT_FIND = 0x00004000,
62064+ GR_AUDIT_INHERIT= 0x00008000,
62065+ GR_AUDIT_SETID = 0x00010000,
62066+ GR_AUDIT_CREATE = 0x00020000,
62067+ GR_AUDIT_DELETE = 0x00040000,
62068+ GR_AUDIT_LINK = 0x00080000,
62069+ GR_PTRACERD = 0x00100000,
62070+ GR_NOPTRACE = 0x00200000,
62071+ GR_SUPPRESS = 0x00400000,
16454cff
MT
62072+ GR_NOLEARN = 0x00800000,
62073+ GR_INIT_TRANSFER= 0x01000000
58c5fc13
MT
62074+};
62075+
62076+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
62077+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
62078+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
62079+
62080+/* ACL subject-only mode flags */
62081+enum {
62082+ GR_KILL = 0x00000001,
62083+ GR_VIEW = 0x00000002,
62084+ GR_PROTECTED = 0x00000004,
62085+ GR_LEARN = 0x00000008,
62086+ GR_OVERRIDE = 0x00000010,
62087+ /* just a placeholder, this mode is only used in userspace */
62088+ GR_DUMMY = 0x00000020,
62089+ GR_PROTSHM = 0x00000040,
62090+ GR_KILLPROC = 0x00000080,
62091+ GR_KILLIPPROC = 0x00000100,
62092+ /* just a placeholder, this mode is only used in userspace */
62093+ GR_NOTROJAN = 0x00000200,
62094+ GR_PROTPROCFD = 0x00000400,
62095+ GR_PROCACCT = 0x00000800,
62096+ GR_RELAXPTRACE = 0x00001000,
62097+ GR_NESTED = 0x00002000,
62098+ GR_INHERITLEARN = 0x00004000,
62099+ GR_PROCFIND = 0x00008000,
62100+ GR_POVERRIDE = 0x00010000,
62101+ GR_KERNELAUTH = 0x00020000,
15a11c5b
MT
62102+ GR_ATSECURE = 0x00040000,
62103+ GR_SHMEXEC = 0x00080000
58c5fc13
MT
62104+};
62105+
62106+enum {
62107+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
62108+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
62109+ GR_PAX_ENABLE_MPROTECT = 0x0004,
62110+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
62111+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
62112+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
62113+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
62114+ GR_PAX_DISABLE_MPROTECT = 0x0400,
62115+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
62116+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
62117+};
62118+
62119+enum {
62120+ GR_ID_USER = 0x01,
62121+ GR_ID_GROUP = 0x02,
62122+};
62123+
62124+enum {
62125+ GR_ID_ALLOW = 0x01,
62126+ GR_ID_DENY = 0x02,
62127+};
62128+
62129+#define GR_CRASH_RES 31
62130+#define GR_UIDTABLE_MAX 500
62131+
62132+/* begin resource learning section */
62133+enum {
62134+ GR_RLIM_CPU_BUMP = 60,
62135+ GR_RLIM_FSIZE_BUMP = 50000,
62136+ GR_RLIM_DATA_BUMP = 10000,
62137+ GR_RLIM_STACK_BUMP = 1000,
62138+ GR_RLIM_CORE_BUMP = 10000,
62139+ GR_RLIM_RSS_BUMP = 500000,
62140+ GR_RLIM_NPROC_BUMP = 1,
62141+ GR_RLIM_NOFILE_BUMP = 5,
62142+ GR_RLIM_MEMLOCK_BUMP = 50000,
62143+ GR_RLIM_AS_BUMP = 500000,
62144+ GR_RLIM_LOCKS_BUMP = 2,
62145+ GR_RLIM_SIGPENDING_BUMP = 5,
62146+ GR_RLIM_MSGQUEUE_BUMP = 10000,
62147+ GR_RLIM_NICE_BUMP = 1,
62148+ GR_RLIM_RTPRIO_BUMP = 1,
62149+ GR_RLIM_RTTIME_BUMP = 1000000
62150+};
62151+
62152+#endif
fe2de317
MT
62153diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
62154new file mode 100644
4c928ab7 62155index 0000000..da390f1
fe2de317
MT
62156--- /dev/null
62157+++ b/include/linux/grinternal.h
4c928ab7 62158@@ -0,0 +1,221 @@
58c5fc13
MT
62159+#ifndef __GRINTERNAL_H
62160+#define __GRINTERNAL_H
62161+
62162+#ifdef CONFIG_GRKERNSEC
62163+
62164+#include <linux/fs.h>
62165+#include <linux/mnt_namespace.h>
62166+#include <linux/nsproxy.h>
62167+#include <linux/gracl.h>
62168+#include <linux/grdefs.h>
62169+#include <linux/grmsg.h>
62170+
62171+void gr_add_learn_entry(const char *fmt, ...)
62172+ __attribute__ ((format (printf, 1, 2)));
62173+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
62174+ const struct vfsmount *mnt);
62175+__u32 gr_check_create(const struct dentry *new_dentry,
62176+ const struct dentry *parent,
62177+ const struct vfsmount *mnt, const __u32 mode);
62178+int gr_check_protected_task(const struct task_struct *task);
62179+__u32 to_gr_audit(const __u32 reqmode);
62180+int gr_set_acls(const int type);
16454cff 62181+int gr_apply_subject_to_task(struct task_struct *task);
58c5fc13
MT
62182+int gr_acl_is_enabled(void);
62183+char gr_roletype_to_char(void);
62184+
62185+void gr_handle_alertkill(struct task_struct *task);
62186+char *gr_to_filename(const struct dentry *dentry,
62187+ const struct vfsmount *mnt);
62188+char *gr_to_filename1(const struct dentry *dentry,
62189+ const struct vfsmount *mnt);
62190+char *gr_to_filename2(const struct dentry *dentry,
62191+ const struct vfsmount *mnt);
62192+char *gr_to_filename3(const struct dentry *dentry,
62193+ const struct vfsmount *mnt);
62194+
4c928ab7 62195+extern int grsec_enable_ptrace_readexec;
58c5fc13
MT
62196+extern int grsec_enable_harden_ptrace;
62197+extern int grsec_enable_link;
62198+extern int grsec_enable_fifo;
62199+extern int grsec_enable_execve;
62200+extern int grsec_enable_shm;
62201+extern int grsec_enable_execlog;
62202+extern int grsec_enable_signal;
ae4e228f 62203+extern int grsec_enable_audit_ptrace;
58c5fc13
MT
62204+extern int grsec_enable_forkfail;
62205+extern int grsec_enable_time;
ae4e228f 62206+extern int grsec_enable_rofs;
58c5fc13 62207+extern int grsec_enable_chroot_shmat;
58c5fc13
MT
62208+extern int grsec_enable_chroot_mount;
62209+extern int grsec_enable_chroot_double;
62210+extern int grsec_enable_chroot_pivot;
62211+extern int grsec_enable_chroot_chdir;
62212+extern int grsec_enable_chroot_chmod;
62213+extern int grsec_enable_chroot_mknod;
62214+extern int grsec_enable_chroot_fchdir;
62215+extern int grsec_enable_chroot_nice;
62216+extern int grsec_enable_chroot_execlog;
62217+extern int grsec_enable_chroot_caps;
62218+extern int grsec_enable_chroot_sysctl;
62219+extern int grsec_enable_chroot_unix;
62220+extern int grsec_enable_tpe;
62221+extern int grsec_tpe_gid;
62222+extern int grsec_enable_tpe_all;
57199397 62223+extern int grsec_enable_tpe_invert;
58c5fc13
MT
62224+extern int grsec_enable_socket_all;
62225+extern int grsec_socket_all_gid;
62226+extern int grsec_enable_socket_client;
62227+extern int grsec_socket_client_gid;
62228+extern int grsec_enable_socket_server;
62229+extern int grsec_socket_server_gid;
62230+extern int grsec_audit_gid;
62231+extern int grsec_enable_group;
62232+extern int grsec_enable_audit_textrel;
6892158b 62233+extern int grsec_enable_log_rwxmaps;
58c5fc13
MT
62234+extern int grsec_enable_mount;
62235+extern int grsec_enable_chdir;
62236+extern int grsec_resource_logging;
ae4e228f
MT
62237+extern int grsec_enable_blackhole;
62238+extern int grsec_lastack_retries;
15a11c5b 62239+extern int grsec_enable_brute;
58c5fc13
MT
62240+extern int grsec_lock;
62241+
62242+extern spinlock_t grsec_alert_lock;
62243+extern unsigned long grsec_alert_wtime;
62244+extern unsigned long grsec_alert_fyet;
62245+
62246+extern spinlock_t grsec_audit_lock;
62247+
62248+extern rwlock_t grsec_exec_file_lock;
62249+
6892158b
MT
62250+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
62251+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
62252+ (tsk)->exec_file->f_vfsmnt) : "/")
58c5fc13 62253+
6892158b
MT
62254+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
62255+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
62256+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58c5fc13 62257+
6892158b
MT
62258+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
62259+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
62260+ (tsk)->exec_file->f_vfsmnt) : "/")
58c5fc13 62261+
6892158b
MT
62262+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
62263+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
62264+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58c5fc13 62265+
6892158b 62266+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58c5fc13 62267+
6892158b 62268+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58c5fc13 62269+
6892158b
MT
62270+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
62271+ (task)->pid, (cred)->uid, \
62272+ (cred)->euid, (cred)->gid, (cred)->egid, \
58c5fc13 62273+ gr_parent_task_fullpath(task), \
6892158b
MT
62274+ (task)->real_parent->comm, (task)->real_parent->pid, \
62275+ (pcred)->uid, (pcred)->euid, \
62276+ (pcred)->gid, (pcred)->egid
58c5fc13
MT
62277+
62278+#define GR_CHROOT_CAPS {{ \
62279+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
62280+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
62281+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
62282+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
62283+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
6e9df6a3
MT
62284+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
62285+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
58c5fc13
MT
62286+
62287+#define security_learn(normal_msg,args...) \
62288+({ \
62289+ read_lock(&grsec_exec_file_lock); \
62290+ gr_add_learn_entry(normal_msg "\n", ## args); \
62291+ read_unlock(&grsec_exec_file_lock); \
62292+})
62293+
62294+enum {
62295+ GR_DO_AUDIT,
62296+ GR_DONT_AUDIT,
16454cff 62297+ /* used for non-audit messages that we shouldn't kill the task on */
58c5fc13
MT
62298+ GR_DONT_AUDIT_GOOD
62299+};
62300+
62301+enum {
62302+ GR_TTYSNIFF,
62303+ GR_RBAC,
62304+ GR_RBAC_STR,
62305+ GR_STR_RBAC,
62306+ GR_RBAC_MODE2,
62307+ GR_RBAC_MODE3,
62308+ GR_FILENAME,
62309+ GR_SYSCTL_HIDDEN,
62310+ GR_NOARGS,
62311+ GR_ONE_INT,
62312+ GR_ONE_INT_TWO_STR,
62313+ GR_ONE_STR,
62314+ GR_STR_INT,
bc901d79 62315+ GR_TWO_STR_INT,
58c5fc13 62316+ GR_TWO_INT,
71d190be 62317+ GR_TWO_U64,
58c5fc13
MT
62318+ GR_THREE_INT,
62319+ GR_FIVE_INT_TWO_STR,
62320+ GR_TWO_STR,
62321+ GR_THREE_STR,
62322+ GR_FOUR_STR,
62323+ GR_STR_FILENAME,
62324+ GR_FILENAME_STR,
62325+ GR_FILENAME_TWO_INT,
62326+ GR_FILENAME_TWO_INT_STR,
62327+ GR_TEXTREL,
62328+ GR_PTRACE,
62329+ GR_RESOURCE,
62330+ GR_CAP,
62331+ GR_SIG,
62332+ GR_SIG2,
62333+ GR_CRASH1,
62334+ GR_CRASH2,
6892158b
MT
62335+ GR_PSACCT,
62336+ GR_RWXMAP
58c5fc13
MT
62337+};
62338+
62339+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
62340+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
62341+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
62342+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
62343+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
62344+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
62345+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
62346+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
62347+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
62348+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
62349+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
62350+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
62351+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
62352+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
71d190be 62353+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58c5fc13
MT
62354+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
62355+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
62356+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
bc901d79 62357+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58c5fc13
MT
62358+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
62359+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
62360+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
62361+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
62362+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
62363+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
62364+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
62365+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
62366+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
62367+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
62368+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
62369+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
62370+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
62371+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
62372+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
6892158b 62373+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58c5fc13
MT
62374+
62375+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
62376+
62377+#endif
62378+
62379+#endif
fe2de317
MT
62380diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
62381new file mode 100644
4c928ab7 62382index 0000000..ae576a1
fe2de317
MT
62383--- /dev/null
62384+++ b/include/linux/grmsg.h
4c928ab7 62385@@ -0,0 +1,109 @@
58c5fc13 62386+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
ae4e228f 62387+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58c5fc13
MT
62388+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
62389+#define GR_STOPMOD_MSG "denied modification of module state by "
ae4e228f
MT
62390+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
62391+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58c5fc13
MT
62392+#define GR_IOPERM_MSG "denied use of ioperm() by "
62393+#define GR_IOPL_MSG "denied use of iopl() by "
62394+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
62395+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
62396+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
71d190be 62397+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58c5fc13 62398+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
ae4e228f
MT
62399+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
62400+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58c5fc13
MT
62401+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
62402+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
62403+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
62404+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
62405+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
62406+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
62407+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
ae4e228f 62408+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58c5fc13
MT
62409+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
62410+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
62411+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
62412+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
62413+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
62414+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
62415+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
62416+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
ae4e228f 62417+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58c5fc13 62418+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58c5fc13 62419+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
4c928ab7 62420+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
58c5fc13
MT
62421+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
62422+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
62423+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
62424+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
62425+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
62426+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
62427+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
62428+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58c5fc13
MT
62429+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
62430+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
62431+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
62432+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
bc901d79 62433+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58c5fc13
MT
62434+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
62435+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
62436+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
4c928ab7 62437+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
58c5fc13
MT
62438+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
62439+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
62440+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
62441+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
62442+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
62443+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
62444+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
62445+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
62446+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
62447+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
62448+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
62449+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
62450+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
62451+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
62452+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
62453+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
62454+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58c5fc13
MT
62455+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
62456+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
6892158b 62457+#define GR_FAILFORK_MSG "failed fork with errno %s by "
58c5fc13
MT
62458+#define GR_NICE_CHROOT_MSG "denied priority change by "
62459+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
62460+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
62461+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
62462+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
62463+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
62464+#define GR_TIME_MSG "time set by "
62465+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
62466+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
62467+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
62468+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
bc901d79 62469+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58c5fc13
MT
62470+#define GR_BIND_MSG "denied bind() by "
62471+#define GR_CONNECT_MSG "denied connect() by "
ae4e228f
MT
62472+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
62473+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
62474+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58c5fc13
MT
62475+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
62476+#define GR_CAP_ACL_MSG "use of %s denied for "
15a11c5b 62477+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
df50ba0c 62478+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58c5fc13
MT
62479+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
62480+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
62481+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
62482+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
62483+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
62484+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
62485+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
62486+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
6892158b
MT
62487+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
62488+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58c5fc13 62489+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
ae4e228f
MT
62490+#define GR_VM86_MSG "denied use of vm86 by "
62491+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
4c928ab7 62492+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
16454cff 62493+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
4c928ab7 62494+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
fe2de317
MT
62495diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
62496new file mode 100644
5e856224 62497index 0000000..acd05db
fe2de317
MT
62498--- /dev/null
62499+++ b/include/linux/grsecurity.h
5e856224 62500@@ -0,0 +1,232 @@
58c5fc13
MT
62501+#ifndef GR_SECURITY_H
62502+#define GR_SECURITY_H
62503+#include <linux/fs.h>
62504+#include <linux/fs_struct.h>
62505+#include <linux/binfmts.h>
62506+#include <linux/gracl.h>
62507+
62508+/* notify of brain-dead configs */
15a11c5b
MT
62509+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62510+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
62511+#endif
58c5fc13
MT
62512+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
62513+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
62514+#endif
58c5fc13
MT
62515+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
62516+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
62517+#endif
62518+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
62519+#error "CONFIG_PAX enabled, but no PaX options are enabled."
62520+#endif
62521+
15a11c5b
MT
62522+#include <linux/compat.h>
62523+
62524+struct user_arg_ptr {
62525+#ifdef CONFIG_COMPAT
62526+ bool is_compat;
62527+#endif
62528+ union {
62529+ const char __user *const __user *native;
62530+#ifdef CONFIG_COMPAT
62531+ compat_uptr_t __user *compat;
62532+#endif
62533+ } ptr;
62534+};
62535+
71d190be 62536+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
58c5fc13 62537+void gr_handle_brute_check(void);
71d190be
MT
62538+void gr_handle_kernel_exploit(void);
62539+int gr_process_user_ban(void);
58c5fc13
MT
62540+
62541+char gr_roletype_to_char(void);
62542+
bc901d79
MT
62543+int gr_acl_enable_at_secure(void);
62544+
58c5fc13
MT
62545+int gr_check_user_change(int real, int effective, int fs);
62546+int gr_check_group_change(int real, int effective, int fs);
62547+
62548+void gr_del_task_from_ip_table(struct task_struct *p);
62549+
62550+int gr_pid_is_chrooted(struct task_struct *p);
57199397 62551+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
58c5fc13
MT
62552+int gr_handle_chroot_nice(void);
62553+int gr_handle_chroot_sysctl(const int op);
62554+int gr_handle_chroot_setpriority(struct task_struct *p,
62555+ const int niceval);
62556+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
62557+int gr_handle_chroot_chroot(const struct dentry *dentry,
62558+ const struct vfsmount *mnt);
58c5fc13
MT
62559+void gr_handle_chroot_chdir(struct path *path);
62560+int gr_handle_chroot_chmod(const struct dentry *dentry,
62561+ const struct vfsmount *mnt, const int mode);
62562+int gr_handle_chroot_mknod(const struct dentry *dentry,
62563+ const struct vfsmount *mnt, const int mode);
62564+int gr_handle_chroot_mount(const struct dentry *dentry,
62565+ const struct vfsmount *mnt,
62566+ const char *dev_name);
62567+int gr_handle_chroot_pivot(void);
15a11c5b 62568+int gr_handle_chroot_unix(const pid_t pid);
58c5fc13
MT
62569+
62570+int gr_handle_rawio(const struct inode *inode);
58c5fc13
MT
62571+
62572+void gr_handle_ioperm(void);
62573+void gr_handle_iopl(void);
62574+
4c928ab7
MT
62575+umode_t gr_acl_umask(void);
62576+
58c5fc13
MT
62577+int gr_tpe_allow(const struct file *file);
62578+
df50ba0c
MT
62579+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
62580+void gr_clear_chroot_entries(struct task_struct *task);
58c5fc13
MT
62581+
62582+void gr_log_forkfail(const int retval);
62583+void gr_log_timechange(void);
62584+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
62585+void gr_log_chdir(const struct dentry *dentry,
62586+ const struct vfsmount *mnt);
62587+void gr_log_chroot_exec(const struct dentry *dentry,
62588+ const struct vfsmount *mnt);
15a11c5b 62589+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
58c5fc13
MT
62590+void gr_log_remount(const char *devname, const int retval);
62591+void gr_log_unmount(const char *devname, const int retval);
62592+void gr_log_mount(const char *from, const char *to, const int retval);
62593+void gr_log_textrel(struct vm_area_struct *vma);
6892158b
MT
62594+void gr_log_rwxmmap(struct file *file);
62595+void gr_log_rwxmprotect(struct file *file);
58c5fc13
MT
62596+
62597+int gr_handle_follow_link(const struct inode *parent,
62598+ const struct inode *inode,
62599+ const struct dentry *dentry,
62600+ const struct vfsmount *mnt);
62601+int gr_handle_fifo(const struct dentry *dentry,
62602+ const struct vfsmount *mnt,
62603+ const struct dentry *dir, const int flag,
62604+ const int acc_mode);
62605+int gr_handle_hardlink(const struct dentry *dentry,
62606+ const struct vfsmount *mnt,
62607+ struct inode *inode,
62608+ const int mode, const char *to);
62609+
62610+int gr_is_capable(const int cap);
62611+int gr_is_capable_nolog(const int cap);
5e856224
MT
62612+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
62613+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
62614+
58c5fc13
MT
62615+void gr_learn_resource(const struct task_struct *task, const int limit,
62616+ const unsigned long wanted, const int gt);
62617+void gr_copy_label(struct task_struct *tsk);
62618+void gr_handle_crash(struct task_struct *task, const int sig);
62619+int gr_handle_signal(const struct task_struct *p, const int sig);
62620+int gr_check_crash_uid(const uid_t uid);
62621+int gr_check_protected_task(const struct task_struct *task);
57199397 62622+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
58c5fc13
MT
62623+int gr_acl_handle_mmap(const struct file *file,
62624+ const unsigned long prot);
62625+int gr_acl_handle_mprotect(const struct file *file,
62626+ const unsigned long prot);
62627+int gr_check_hidden_task(const struct task_struct *tsk);
62628+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
62629+ const struct vfsmount *mnt);
62630+__u32 gr_acl_handle_utime(const struct dentry *dentry,
62631+ const struct vfsmount *mnt);
62632+__u32 gr_acl_handle_access(const struct dentry *dentry,
62633+ const struct vfsmount *mnt, const int fmode);
58c5fc13 62634+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
4c928ab7 62635+ const struct vfsmount *mnt, umode_t *mode);
58c5fc13
MT
62636+__u32 gr_acl_handle_chown(const struct dentry *dentry,
62637+ const struct vfsmount *mnt);
bc901d79
MT
62638+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
62639+ const struct vfsmount *mnt);
58c5fc13
MT
62640+int gr_handle_ptrace(struct task_struct *task, const long request);
62641+int gr_handle_proc_ptrace(struct task_struct *task);
62642+__u32 gr_acl_handle_execve(const struct dentry *dentry,
62643+ const struct vfsmount *mnt);
62644+int gr_check_crash_exec(const struct file *filp);
62645+int gr_acl_is_enabled(void);
62646+void gr_set_kernel_label(struct task_struct *task);
62647+void gr_set_role_label(struct task_struct *task, const uid_t uid,
62648+ const gid_t gid);
62649+int gr_set_proc_label(const struct dentry *dentry,
62650+ const struct vfsmount *mnt,
4c928ab7 62651+ const int unsafe_flags);
58c5fc13
MT
62652+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
62653+ const struct vfsmount *mnt);
62654+__u32 gr_acl_handle_open(const struct dentry *dentry,
6e9df6a3 62655+ const struct vfsmount *mnt, int acc_mode);
58c5fc13
MT
62656+__u32 gr_acl_handle_creat(const struct dentry *dentry,
62657+ const struct dentry *p_dentry,
6e9df6a3
MT
62658+ const struct vfsmount *p_mnt,
62659+ int open_flags, int acc_mode, const int imode);
58c5fc13
MT
62660+void gr_handle_create(const struct dentry *dentry,
62661+ const struct vfsmount *mnt);
6e9df6a3
MT
62662+void gr_handle_proc_create(const struct dentry *dentry,
62663+ const struct inode *inode);
58c5fc13
MT
62664+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
62665+ const struct dentry *parent_dentry,
62666+ const struct vfsmount *parent_mnt,
62667+ const int mode);
62668+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
62669+ const struct dentry *parent_dentry,
62670+ const struct vfsmount *parent_mnt);
62671+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
62672+ const struct vfsmount *mnt);
62673+void gr_handle_delete(const ino_t ino, const dev_t dev);
62674+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
62675+ const struct vfsmount *mnt);
62676+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
62677+ const struct dentry *parent_dentry,
62678+ const struct vfsmount *parent_mnt,
62679+ const char *from);
62680+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
62681+ const struct dentry *parent_dentry,
62682+ const struct vfsmount *parent_mnt,
62683+ const struct dentry *old_dentry,
62684+ const struct vfsmount *old_mnt, const char *to);
62685+int gr_acl_handle_rename(struct dentry *new_dentry,
62686+ struct dentry *parent_dentry,
62687+ const struct vfsmount *parent_mnt,
62688+ struct dentry *old_dentry,
62689+ struct inode *old_parent_inode,
62690+ struct vfsmount *old_mnt, const char *newname);
62691+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
62692+ struct dentry *old_dentry,
62693+ struct dentry *new_dentry,
62694+ struct vfsmount *mnt, const __u8 replace);
62695+__u32 gr_check_link(const struct dentry *new_dentry,
62696+ const struct dentry *parent_dentry,
62697+ const struct vfsmount *parent_mnt,
62698+ const struct dentry *old_dentry,
62699+ const struct vfsmount *old_mnt);
62700+int gr_acl_handle_filldir(const struct file *file, const char *name,
62701+ const unsigned int namelen, const ino_t ino);
62702+
62703+__u32 gr_acl_handle_unix(const struct dentry *dentry,
62704+ const struct vfsmount *mnt);
62705+void gr_acl_handle_exit(void);
62706+void gr_acl_handle_psacct(struct task_struct *task, const long code);
62707+int gr_acl_handle_procpidmem(const struct task_struct *task);
ae4e228f
MT
62708+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
62709+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
62710+void gr_audit_ptrace(struct task_struct *task);
16454cff 62711+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
58c5fc13 62712+
4c928ab7
MT
62713+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
62714+
58c5fc13 62715+#ifdef CONFIG_GRKERNSEC
6892158b 62716+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
ae4e228f 62717+void gr_handle_vm86(void);
71d190be 62718+void gr_handle_mem_readwrite(u64 from, u64 to);
58c5fc13 62719+
4c928ab7
MT
62720+void gr_log_badprocpid(const char *entry);
62721+
58c5fc13 62722+extern int grsec_enable_dmesg;
df50ba0c 62723+extern int grsec_disable_privio;
15a11c5b
MT
62724+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62725+extern int grsec_enable_chroot_findtask;
62726+#endif
4c928ab7
MT
62727+#ifdef CONFIG_GRKERNSEC_SETXID
62728+extern int grsec_enable_setxid;
62729+#endif
58c5fc13
MT
62730+#endif
62731+
62732+#endif
fe2de317
MT
62733diff --git a/include/linux/grsock.h b/include/linux/grsock.h
62734new file mode 100644
62735index 0000000..e7ffaaf
62736--- /dev/null
62737+++ b/include/linux/grsock.h
ae4e228f
MT
62738@@ -0,0 +1,19 @@
62739+#ifndef __GRSOCK_H
62740+#define __GRSOCK_H
62741+
62742+extern void gr_attach_curr_ip(const struct sock *sk);
62743+extern int gr_handle_sock_all(const int family, const int type,
62744+ const int protocol);
62745+extern int gr_handle_sock_server(const struct sockaddr *sck);
df50ba0c 62746+extern int gr_handle_sock_server_other(const struct sock *sck);
ae4e228f
MT
62747+extern int gr_handle_sock_client(const struct sockaddr *sck);
62748+extern int gr_search_connect(struct socket * sock,
62749+ struct sockaddr_in * addr);
62750+extern int gr_search_bind(struct socket * sock,
62751+ struct sockaddr_in * addr);
62752+extern int gr_search_listen(struct socket * sock);
62753+extern int gr_search_accept(struct socket * sock);
62754+extern int gr_search_socket(const int domain, const int type,
62755+ const int protocol);
62756+
62757+#endif
fe2de317 62758diff --git a/include/linux/hid.h b/include/linux/hid.h
5e856224 62759index 3a95da6..51986f1 100644
fe2de317
MT
62760--- a/include/linux/hid.h
62761+++ b/include/linux/hid.h
5e856224 62762@@ -696,7 +696,7 @@ struct hid_ll_driver {
15a11c5b
MT
62763 unsigned int code, int value);
62764
62765 int (*parse)(struct hid_device *hdev);
62766-};
62767+} __no_const;
62768
62769 #define PM_HINT_FULLON 1<<5
62770 #define PM_HINT_NORMAL 1<<1
fe2de317
MT
62771diff --git a/include/linux/highmem.h b/include/linux/highmem.h
62772index 3a93f73..b19d0b3 100644
62773--- a/include/linux/highmem.h
62774+++ b/include/linux/highmem.h
62775@@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
58c5fc13
MT
62776 kunmap_atomic(kaddr, KM_USER0);
62777 }
62778
62779+static inline void sanitize_highpage(struct page *page)
62780+{
62781+ void *kaddr;
62782+ unsigned long flags;
62783+
62784+ local_irq_save(flags);
62785+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
62786+ clear_page(kaddr);
62787+ kunmap_atomic(kaddr, KM_CLEARPAGE);
62788+ local_irq_restore(flags);
62789+}
62790+
62791 static inline void zero_user_segments(struct page *page,
62792 unsigned start1, unsigned end1,
62793 unsigned start2, unsigned end2)
fe2de317 62794diff --git a/include/linux/i2c.h b/include/linux/i2c.h
5e856224 62795index 8e25a91..551b161 100644
fe2de317
MT
62796--- a/include/linux/i2c.h
62797+++ b/include/linux/i2c.h
4c928ab7 62798@@ -364,6 +364,7 @@ struct i2c_algorithm {
15a11c5b
MT
62799 /* To determine what the adapter supports */
62800 u32 (*functionality) (struct i2c_adapter *);
62801 };
62802+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
62803
62804 /*
62805 * i2c_adapter is the structure used to identify a physical i2c bus along
fe2de317
MT
62806diff --git a/include/linux/i2o.h b/include/linux/i2o.h
62807index a6deef4..c56a7f2 100644
62808--- a/include/linux/i2o.h
62809+++ b/include/linux/i2o.h
8308f9c9
MT
62810@@ -564,7 +564,7 @@ struct i2o_controller {
62811 struct i2o_device *exec; /* Executive */
62812 #if BITS_PER_LONG == 64
62813 spinlock_t context_list_lock; /* lock for context_list */
62814- atomic_t context_list_counter; /* needed for unique contexts */
62815+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
62816 struct list_head context_list; /* list of context id's
62817 and pointers */
62818 #endif
5e856224
MT
62819diff --git a/include/linux/if_team.h b/include/linux/if_team.h
62820index 58404b0..439ed95 100644
62821--- a/include/linux/if_team.h
62822+++ b/include/linux/if_team.h
62823@@ -64,6 +64,7 @@ struct team_mode_ops {
62824 void (*port_leave)(struct team *team, struct team_port *port);
62825 void (*port_change_mac)(struct team *team, struct team_port *port);
62826 };
62827+typedef struct team_mode_ops __no_const team_mode_ops_no_const;
62828
62829 enum team_option_type {
62830 TEAM_OPTION_TYPE_U32,
62831@@ -112,7 +113,7 @@ struct team {
62832 struct list_head option_list;
62833
62834 const struct team_mode *mode;
62835- struct team_mode_ops ops;
62836+ team_mode_ops_no_const ops;
62837 long mode_priv[TEAM_MODE_PRIV_LONGS];
62838 };
62839
fe2de317 62840diff --git a/include/linux/init.h b/include/linux/init.h
5e856224 62841index 6b95109..4aca62c 100644
fe2de317
MT
62842--- a/include/linux/init.h
62843+++ b/include/linux/init.h
5e856224 62844@@ -294,13 +294,13 @@ void __init parse_early_options(char *cmdline);
6892158b
MT
62845
62846 /* Each module must use one module_init(). */
62847 #define module_init(initfn) \
62848- static inline initcall_t __inittest(void) \
62849+ static inline __used initcall_t __inittest(void) \
62850 { return initfn; } \
62851 int init_module(void) __attribute__((alias(#initfn)));
62852
62853 /* This is only required if you want to be unloadable. */
62854 #define module_exit(exitfn) \
62855- static inline exitcall_t __exittest(void) \
62856+ static inline __used exitcall_t __exittest(void) \
62857 { return exitfn; } \
62858 void cleanup_module(void) __attribute__((alias(#exitfn)));
62859
fe2de317 62860diff --git a/include/linux/init_task.h b/include/linux/init_task.h
5e856224 62861index 9c66b1a..a3fdded 100644
fe2de317
MT
62862--- a/include/linux/init_task.h
62863+++ b/include/linux/init_task.h
5e856224 62864@@ -127,6 +127,12 @@ extern struct cred init_cred;
4c928ab7
MT
62865
62866 #define INIT_TASK_COMM "swapper"
71d190be
MT
62867
62868+#ifdef CONFIG_X86
62869+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
62870+#else
62871+#define INIT_TASK_THREAD_INFO
62872+#endif
62873+
62874 /*
15a11c5b
MT
62875 * INIT_TASK is used to set up the first task table, touch at
62876 * your own risk!. Base=0, limit=0x1fffff (=2MB)
5e856224 62877@@ -165,6 +171,7 @@ extern struct cred init_cred;
71d190be 62878 RCU_INIT_POINTER(.cred, &init_cred), \
4c928ab7 62879 .comm = INIT_TASK_COMM, \
71d190be
MT
62880 .thread = INIT_THREAD, \
62881+ INIT_TASK_THREAD_INFO \
62882 .fs = &init_fs, \
62883 .files = &init_files, \
62884 .signal = &init_signals, \
fe2de317 62885diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
4c928ab7 62886index e6ca56d..8583707 100644
fe2de317
MT
62887--- a/include/linux/intel-iommu.h
62888+++ b/include/linux/intel-iommu.h
15a11c5b
MT
62889@@ -296,7 +296,7 @@ struct iommu_flush {
62890 u8 fm, u64 type);
62891 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
62892 unsigned int size_order, u64 type);
62893-};
62894+} __no_const;
62895
62896 enum {
62897 SR_DMAR_FECTL_REG,
fe2de317 62898diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
4c928ab7 62899index a64b00e..464d8bc 100644
fe2de317
MT
62900--- a/include/linux/interrupt.h
62901+++ b/include/linux/interrupt.h
4c928ab7 62902@@ -441,7 +441,7 @@ enum
ae4e228f
MT
62903 /* map softirq index to softirq name. update 'softirq_to_name' in
62904 * kernel/softirq.c when adding a new softirq.
62905 */
62906-extern char *softirq_to_name[NR_SOFTIRQS];
62907+extern const char * const softirq_to_name[NR_SOFTIRQS];
62908
62909 /* softirq mask and active fields moved to irq_cpustat_t in
62910 * asm/hardirq.h to get better cache usage. KAO
4c928ab7 62911@@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
ae4e228f
MT
62912
62913 struct softirq_action
62914 {
62915- void (*action)(struct softirq_action *);
62916+ void (*action)(void);
62917 };
62918
62919 asmlinkage void do_softirq(void);
62920 asmlinkage void __do_softirq(void);
62921-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
62922+extern void open_softirq(int nr, void (*action)(void));
62923 extern void softirq_init(void);
bc901d79
MT
62924 static inline void __raise_softirq_irqoff(unsigned int nr)
62925 {
fe2de317 62926diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
4c928ab7 62927index 3875719..4cd454c 100644
fe2de317
MT
62928--- a/include/linux/kallsyms.h
62929+++ b/include/linux/kallsyms.h
58c5fc13
MT
62930@@ -15,7 +15,8 @@
62931
62932 struct module;
62933
62934-#ifdef CONFIG_KALLSYMS
bc901d79 62935+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
58c5fc13
MT
62936+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62937 /* Lookup the address for a symbol. Returns 0 if not found. */
62938 unsigned long kallsyms_lookup_name(const char *name);
62939
fe2de317 62940@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
58c5fc13
MT
62941 /* Stupid that this does nothing, but I didn't create this mess. */
62942 #define __print_symbol(fmt, addr)
62943 #endif /*CONFIG_KALLSYMS*/
bc901d79 62944+#else /* when included by kallsyms.c, vsnprintf.c, or
4c928ab7 62945+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
58c5fc13 62946+extern void __print_symbol(const char *fmt, unsigned long address);
66a7e928 62947+extern int sprint_backtrace(char *buffer, unsigned long address);
bc901d79
MT
62948+extern int sprint_symbol(char *buffer, unsigned long address);
62949+const char *kallsyms_lookup(unsigned long addr,
62950+ unsigned long *symbolsize,
62951+ unsigned long *offset,
62952+ char **modname, char *namebuf);
58c5fc13
MT
62953+#endif
62954
62955 /* This macro allows us to keep printk typechecking */
4c928ab7 62956 static __printf(1, 2)
fe2de317 62957diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
5e856224 62958index c4d2fc1..5df9c19 100644
fe2de317
MT
62959--- a/include/linux/kgdb.h
62960+++ b/include/linux/kgdb.h
8308f9c9
MT
62961@@ -53,7 +53,7 @@ extern int kgdb_connected;
62962 extern int kgdb_io_module_registered;
62963
62964 extern atomic_t kgdb_setting_breakpoint;
62965-extern atomic_t kgdb_cpu_doing_single_step;
62966+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
62967
62968 extern struct task_struct *kgdb_usethread;
62969 extern struct task_struct *kgdb_contthread;
5e856224 62970@@ -252,7 +252,7 @@ struct kgdb_arch {
15a11c5b
MT
62971 void (*disable_hw_break)(struct pt_regs *regs);
62972 void (*remove_all_hw_break)(void);
62973 void (*correct_hw_break)(void);
62974-};
62975+} __do_const;
ae4e228f 62976
15a11c5b
MT
62977 /**
62978 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
5e856224 62979@@ -277,7 +277,7 @@ struct kgdb_io {
15a11c5b
MT
62980 void (*pre_exception) (void);
62981 void (*post_exception) (void);
62982 int is_console;
62983-};
62984+} __do_const;
ae4e228f 62985
15a11c5b 62986 extern struct kgdb_arch arch_kgdb_ops;
ae4e228f 62987
fe2de317 62988diff --git a/include/linux/kmod.h b/include/linux/kmod.h
5e856224 62989index 0fb48ef..1b680b2 100644
fe2de317
MT
62990--- a/include/linux/kmod.h
62991+++ b/include/linux/kmod.h
62992@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
71d190be 62993 * usually useless though. */
4c928ab7
MT
62994 extern __printf(2, 3)
62995 int __request_module(bool wait, const char *name, ...);
62996+extern __printf(3, 4)
62997+int ___request_module(bool wait, char *param_name, const char *name, ...);
71d190be
MT
62998 #define request_module(mod...) __request_module(true, mod)
62999 #define request_module_nowait(mod...) __request_module(false, mod)
63000 #define try_then_request_module(x, mod...) \
5e856224
MT
63001diff --git a/include/linux/kref.h b/include/linux/kref.h
63002index 9c07dce..a92fa71 100644
63003--- a/include/linux/kref.h
63004+++ b/include/linux/kref.h
63005@@ -63,7 +63,7 @@ static inline void kref_get(struct kref *kref)
63006 static inline int kref_sub(struct kref *kref, unsigned int count,
63007 void (*release)(struct kref *kref))
63008 {
63009- WARN_ON(release == NULL);
63010+ BUG_ON(release == NULL);
63011
63012 if (atomic_sub_and_test((int) count, &kref->refcount)) {
63013 release(kref);
fe2de317 63014diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
5e856224 63015index 4c4e83d..5f16617 100644
fe2de317
MT
63016--- a/include/linux/kvm_host.h
63017+++ b/include/linux/kvm_host.h
5e856224 63018@@ -326,7 +326,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
58c5fc13
MT
63019 void vcpu_load(struct kvm_vcpu *vcpu);
63020 void vcpu_put(struct kvm_vcpu *vcpu);
63021
57199397
MT
63022-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
63023+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
58c5fc13
MT
63024 struct module *module);
63025 void kvm_exit(void);
63026
5e856224 63027@@ -416,20 +416,20 @@ void kvm_get_pfn(pfn_t pfn);
4c928ab7
MT
63028 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
63029 int len);
63030 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
63031- unsigned long len);
63032-int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
63033+ unsigned long len) __size_overflow(4);
63034+int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) __size_overflow(2,4);
63035 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
63036- void *data, unsigned long len);
63037+ void *data, unsigned long len) __size_overflow(4);
63038 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
63039 int offset, int len);
63040 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
63041- unsigned long len);
63042+ unsigned long len) __size_overflow(2,4);
63043 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
63044- void *data, unsigned long len);
63045+ void *data, unsigned long len) __size_overflow(4);
63046 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
63047 gpa_t gpa);
63048 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
63049-int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
63050+int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) __size_overflow(2,3);
63051 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
63052 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
63053 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
5e856224 63054@@ -485,7 +485,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
58c5fc13
MT
63055 struct kvm_guest_debug *dbg);
63056 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
63057
63058-int kvm_arch_init(void *opaque);
63059+int kvm_arch_init(const void *opaque);
63060 void kvm_arch_exit(void);
63061
63062 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
5e856224 63063@@ -727,7 +727,7 @@ int kvm_setup_default_irq_routing(struct kvm *kvm);
4c928ab7
MT
63064 int kvm_set_irq_routing(struct kvm *kvm,
63065 const struct kvm_irq_routing_entry *entries,
63066 unsigned nr,
63067- unsigned flags);
63068+ unsigned flags) __size_overflow(3);
63069 void kvm_free_irq_routing(struct kvm *kvm);
63070
63071 #else
fe2de317 63072diff --git a/include/linux/libata.h b/include/linux/libata.h
4c928ab7 63073index cafc09a..d7e7829 100644
fe2de317
MT
63074--- a/include/linux/libata.h
63075+++ b/include/linux/libata.h
6e9df6a3 63076@@ -909,7 +909,7 @@ struct ata_port_operations {
15a11c5b
MT
63077 * fields must be pointers.
63078 */
63079 const struct ata_port_operations *inherits;
63080-};
63081+} __do_const;
66a7e928 63082
15a11c5b 63083 struct ata_port_info {
ae4e228f 63084 unsigned long flags;
fe2de317
MT
63085diff --git a/include/linux/mca.h b/include/linux/mca.h
63086index 3797270..7765ede 100644
63087--- a/include/linux/mca.h
63088+++ b/include/linux/mca.h
15a11c5b
MT
63089@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
63090 int region);
63091 void * (*mca_transform_memory)(struct mca_device *,
63092 void *memory);
63093-};
63094+} __no_const;
63095
63096 struct mca_bus {
63097 u64 default_dma_mask;
fe2de317 63098diff --git a/include/linux/memory.h b/include/linux/memory.h
5e856224 63099index 1ac7f6e..a5794d0 100644
fe2de317
MT
63100--- a/include/linux/memory.h
63101+++ b/include/linux/memory.h
5e856224 63102@@ -143,7 +143,7 @@ struct memory_accessor {
15a11c5b
MT
63103 size_t count);
63104 ssize_t (*write)(struct memory_accessor *, const char *buf,
63105 off_t offset, size_t count);
63106-};
63107+} __no_const;
ae4e228f
MT
63108
63109 /*
15a11c5b 63110 * Kernel text modification mutex, used for code patching. Users of this lock
fe2de317 63111diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
4c928ab7 63112index 9970337..9444122 100644
fe2de317
MT
63113--- a/include/linux/mfd/abx500.h
63114+++ b/include/linux/mfd/abx500.h
4c928ab7 63115@@ -188,6 +188,7 @@ struct abx500_ops {
15a11c5b 63116 int (*event_registers_startup_state_get) (struct device *, u8 *);
66a7e928
MT
63117 int (*startup_irq_enabled) (struct device *, unsigned int);
63118 };
15a11c5b 63119+typedef struct abx500_ops __no_const abx500_ops_no_const;
66a7e928 63120
15a11c5b 63121 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
66a7e928 63122 void abx500_remove_ops(struct device *dev);
fe2de317 63123diff --git a/include/linux/mm.h b/include/linux/mm.h
5e856224 63124index 17b27cd..baea141 100644
fe2de317
MT
63125--- a/include/linux/mm.h
63126+++ b/include/linux/mm.h
4c928ab7 63127@@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
58c5fc13 63128
df50ba0c
MT
63129 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
63130 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
63131+
63132+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
63133+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
63134+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
63135+#else
63136 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
58c5fc13
MT
63137+#endif
63138+
df50ba0c
MT
63139 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
63140 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
63141
4c928ab7 63142@@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
bc901d79
MT
63143 int set_page_dirty_lock(struct page *page);
63144 int clear_page_dirty_for_io(struct page *page);
63145
63146-/* Is the vma a continuation of the stack vma above it? */
66a7e928 63147-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
bc901d79
MT
63148-{
63149- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
63150-}
66a7e928
MT
63151-
63152-static inline int stack_guard_page_start(struct vm_area_struct *vma,
63153- unsigned long addr)
63154-{
63155- return (vma->vm_flags & VM_GROWSDOWN) &&
63156- (vma->vm_start == addr) &&
63157- !vma_growsdown(vma->vm_prev, addr);
63158-}
63159-
63160-/* Is the vma a continuation of the stack vma below it? */
63161-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
63162-{
63163- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
63164-}
63165-
63166-static inline int stack_guard_page_end(struct vm_area_struct *vma,
63167- unsigned long addr)
63168-{
63169- return (vma->vm_flags & VM_GROWSUP) &&
63170- (vma->vm_end == addr) &&
63171- !vma_growsup(vma->vm_next, addr);
63172-}
bc901d79
MT
63173-
63174 extern unsigned long move_page_tables(struct vm_area_struct *vma,
63175 unsigned long old_addr, struct vm_area_struct *new_vma,
63176 unsigned long new_addr, unsigned long len);
4c928ab7 63177@@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
6e9df6a3
MT
63178 }
63179 #endif
58c5fc13 63180
6892158b 63181+#ifdef CONFIG_MMU
15a11c5b 63182+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
6892158b 63183+#else
15a11c5b 63184+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
6892158b
MT
63185+{
63186+ return __pgprot(0);
63187+}
63188+#endif
58c5fc13
MT
63189+
63190 int vma_wants_writenotify(struct vm_area_struct *vma);
63191
bc901d79 63192 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
5e856224
MT
63193@@ -1152,8 +1140,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
63194 {
63195 return 0;
63196 }
63197+
63198+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
63199+ unsigned long address)
63200+{
63201+ return 0;
63202+}
63203 #else
63204 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
63205+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
63206 #endif
63207
63208 #ifdef __PAGETABLE_PMD_FOLDED
63209@@ -1162,8 +1157,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
63210 {
63211 return 0;
63212 }
63213+
63214+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
63215+ unsigned long address)
63216+{
63217+ return 0;
63218+}
63219 #else
63220 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
63221+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
63222 #endif
63223
63224 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
63225@@ -1181,11 +1183,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
63226 NULL: pud_offset(pgd, address);
63227 }
63228
63229+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
63230+{
63231+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
63232+ NULL: pud_offset(pgd, address);
63233+}
63234+
63235 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
63236 {
63237 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
63238 NULL: pmd_offset(pud, address);
63239 }
63240+
63241+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
63242+{
63243+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
63244+ NULL: pmd_offset(pud, address);
63245+}
63246 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
63247
63248 #if USE_SPLIT_PTLOCKS
63249@@ -1409,6 +1423,7 @@ out:
58c5fc13
MT
63250 }
63251
63252 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
63253+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
63254
63255 extern unsigned long do_brk(unsigned long, unsigned long);
63256
5e856224 63257@@ -1466,6 +1481,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
58c5fc13
MT
63258 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
63259 struct vm_area_struct **pprev);
63260
63261+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
df50ba0c 63262+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
58c5fc13
MT
63263+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
63264+
63265 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
63266 NULL if none. Assume start_addr < end_addr. */
63267 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
5e856224
MT
63268@@ -1494,15 +1513,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
63269 return vma;
58c5fc13
MT
63270 }
63271
6892158b 63272-#ifdef CONFIG_MMU
58c5fc13 63273-pgprot_t vm_get_page_prot(unsigned long vm_flags);
6892158b
MT
63274-#else
63275-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
63276-{
63277- return __pgprot(0);
63278-}
63279-#endif
63280-
58c5fc13
MT
63281 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
63282 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
63283 unsigned long pfn, unsigned long size, pgprot_t);
5e856224 63284@@ -1606,7 +1616,7 @@ extern int unpoison_memory(unsigned long pfn);
ae4e228f
MT
63285 extern int sysctl_memory_failure_early_kill;
63286 extern int sysctl_memory_failure_recovery;
63287 extern void shake_page(struct page *p, int access);
63288-extern atomic_long_t mce_bad_pages;
63289+extern atomic_long_unchecked_t mce_bad_pages;
63290 extern int soft_offline_page(struct page *page, int flags);
66a7e928
MT
63291
63292 extern void dump_page(struct page *page);
5e856224
MT
63293@@ -1637,5 +1647,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
63294 static inline bool page_is_guard(struct page *page) { return false; }
63295 #endif /* CONFIG_DEBUG_PAGEALLOC */
df50ba0c 63296
58c5fc13
MT
63297+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63298+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
63299+#else
63300+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
63301+#endif
63302+
63303 #endif /* __KERNEL__ */
63304 #endif /* _LINUX_MM_H */
fe2de317 63305diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
5e856224 63306index 3cc3062..efeaeb7 100644
fe2de317
MT
63307--- a/include/linux/mm_types.h
63308+++ b/include/linux/mm_types.h
5e856224 63309@@ -252,6 +252,8 @@ struct vm_area_struct {
58c5fc13
MT
63310 #ifdef CONFIG_NUMA
63311 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
63312 #endif
63313+
63314+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
63315 };
63316
63317 struct core_thread {
5e856224
MT
63318@@ -326,7 +328,7 @@ struct mm_struct {
63319 unsigned long def_flags;
63320 unsigned long nr_ptes; /* Page table pages */
63321 unsigned long start_code, end_code, start_data, end_data;
63322- unsigned long start_brk, brk, start_stack;
63323+ unsigned long brk_gap, start_brk, brk, start_stack;
63324 unsigned long arg_start, arg_end, env_start, env_end;
63325
63326 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
63327@@ -388,6 +390,24 @@ struct mm_struct {
15a11c5b
MT
63328 #ifdef CONFIG_CPUMASK_OFFSTACK
63329 struct cpumask cpumask_allocation;
58c5fc13
MT
63330 #endif
63331+
5e856224 63332+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS) || defined(CONFIG_PAX_HAVE_ACL_FLAGS) || defined(CONFIG_PAX_HOOK_ACL_FLAGS)
58c5fc13
MT
63333+ unsigned long pax_flags;
63334+#endif
63335+
63336+#ifdef CONFIG_PAX_DLRESOLVE
63337+ unsigned long call_dl_resolve;
63338+#endif
63339+
63340+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
63341+ unsigned long call_syscall;
63342+#endif
63343+
63344+#ifdef CONFIG_PAX_ASLR
63345+ unsigned long delta_mmap; /* randomized offset */
63346+ unsigned long delta_stack; /* randomized offset */
63347+#endif
63348+
63349 };
63350
15a11c5b 63351 static inline void mm_init_cpumask(struct mm_struct *mm)
fe2de317
MT
63352diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
63353index 1d1b1e1..2a13c78 100644
63354--- a/include/linux/mmu_notifier.h
63355+++ b/include/linux/mmu_notifier.h
63356@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
ae4e228f
MT
63357 */
63358 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
63359 ({ \
63360- pte_t __pte; \
63361+ pte_t ___pte; \
63362 struct vm_area_struct *___vma = __vma; \
63363 unsigned long ___address = __address; \
63364- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
63365+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
63366 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
63367- __pte; \
63368+ ___pte; \
63369 })
63370
16454cff 63371 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
fe2de317 63372diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
5e856224 63373index 650ba2f..af0a58c 100644
fe2de317
MT
63374--- a/include/linux/mmzone.h
63375+++ b/include/linux/mmzone.h
5e856224 63376@@ -379,7 +379,7 @@ struct zone {
57199397
MT
63377 unsigned long flags; /* zone flags, see below */
63378
63379 /* Zone statistics */
63380- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63381+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63382
63383 /*
6892158b 63384 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
fe2de317 63385diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
5e856224 63386index 83ac071..2656e0e 100644
fe2de317
MT
63387--- a/include/linux/mod_devicetable.h
63388+++ b/include/linux/mod_devicetable.h
58c5fc13
MT
63389@@ -12,7 +12,7 @@
63390 typedef unsigned long kernel_ulong_t;
63391 #endif
63392
63393-#define PCI_ANY_ID (~0)
63394+#define PCI_ANY_ID ((__u16)~0)
63395
63396 struct pci_device_id {
63397 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
63398@@ -131,7 +131,7 @@ struct usb_device_id {
63399 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
63400 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
63401
63402-#define HID_ANY_ID (~0)
63403+#define HID_ANY_ID (~0U)
63404
63405 struct hid_device_id {
63406 __u16 bus;
fe2de317 63407diff --git a/include/linux/module.h b/include/linux/module.h
5e856224 63408index 4598bf0..e069d7f 100644
fe2de317
MT
63409--- a/include/linux/module.h
63410+++ b/include/linux/module.h
4c928ab7 63411@@ -17,6 +17,7 @@
15a11c5b
MT
63412 #include <linux/moduleparam.h>
63413 #include <linux/tracepoint.h>
4c928ab7 63414 #include <linux/export.h>
15a11c5b
MT
63415+#include <linux/fs.h>
63416
63417 #include <linux/percpu.h>
63418 #include <asm/module.h>
5e856224 63419@@ -275,19 +276,16 @@ struct module
58c5fc13
MT
63420 int (*init)(void);
63421
63422 /* If this is non-NULL, vfree after init() returns */
63423- void *module_init;
63424+ void *module_init_rx, *module_init_rw;
63425
63426 /* Here is the actual code + data, vfree'd on unload. */
63427- void *module_core;
63428+ void *module_core_rx, *module_core_rw;
63429
63430 /* Here are the sizes of the init and core sections */
63431- unsigned int init_size, core_size;
63432+ unsigned int init_size_rw, core_size_rw;
63433
63434 /* The size of the executable code in each section. */
63435- unsigned int init_text_size, core_text_size;
16454cff
MT
63436-
63437- /* Size of RO sections of the module (text+rodata) */
63438- unsigned int init_ro_size, core_ro_size;
58c5fc13
MT
63439+ unsigned int init_size_rx, core_size_rx;
63440
63441 /* Arch-specific module values */
63442 struct mod_arch_specific arch;
5e856224 63443@@ -343,6 +341,10 @@ struct module
15a11c5b
MT
63444 #ifdef CONFIG_EVENT_TRACING
63445 struct ftrace_event_call **trace_events;
63446 unsigned int num_trace_events;
63447+ struct file_operations trace_id;
63448+ struct file_operations trace_enable;
63449+ struct file_operations trace_format;
63450+ struct file_operations trace_filter;
63451 #endif
63452 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
63453 unsigned int num_ftrace_callsites;
5e856224 63454@@ -390,16 +392,46 @@ bool is_module_address(unsigned long addr);
df50ba0c 63455 bool is_module_percpu_address(unsigned long addr);
58c5fc13
MT
63456 bool is_module_text_address(unsigned long addr);
63457
63458+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
63459+{
63460+
63461+#ifdef CONFIG_PAX_KERNEXEC
63462+ if (ktla_ktva(addr) >= (unsigned long)start &&
63463+ ktla_ktva(addr) < (unsigned long)start + size)
63464+ return 1;
63465+#endif
63466+
63467+ return ((void *)addr >= start && (void *)addr < start + size);
63468+}
63469+
63470+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
63471+{
63472+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
63473+}
63474+
63475+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
63476+{
63477+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
63478+}
63479+
63480+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
63481+{
63482+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
63483+}
63484+
63485+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
63486+{
63487+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
63488+}
63489+
63490 static inline int within_module_core(unsigned long addr, struct module *mod)
63491 {
63492- return (unsigned long)mod->module_core <= addr &&
63493- addr < (unsigned long)mod->module_core + mod->core_size;
63494+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
63495 }
63496
63497 static inline int within_module_init(unsigned long addr, struct module *mod)
63498 {
63499- return (unsigned long)mod->module_init <= addr &&
63500- addr < (unsigned long)mod->module_init + mod->init_size;
63501+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
63502 }
63503
63504 /* Search for module by name: must hold module_mutex. */
fe2de317 63505diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
5e856224 63506index b2be02e..72d2f78 100644
fe2de317
MT
63507--- a/include/linux/moduleloader.h
63508+++ b/include/linux/moduleloader.h
4c928ab7 63509@@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
58c5fc13 63510
4c928ab7
MT
63511 /* Allocator used for allocating struct module, core sections and init
63512 sections. Returns NULL on failure. */
63513-void *module_alloc(unsigned long size);
63514+void *module_alloc(unsigned long size) __size_overflow(1);
63515+
58c5fc13 63516+#ifdef CONFIG_PAX_KERNEXEC
5e856224 63517+void *module_alloc_exec(unsigned long size) __size_overflow(1);
58c5fc13
MT
63518+#else
63519+#define module_alloc_exec(x) module_alloc(x)
63520+#endif
4c928ab7 63521
58c5fc13
MT
63522 /* Free memory returned from module_alloc. */
63523 void module_free(struct module *mod, void *module_region);
63524
63525+#ifdef CONFIG_PAX_KERNEXEC
63526+void module_free_exec(struct module *mod, void *module_region);
63527+#else
ae4e228f 63528+#define module_free_exec(x, y) module_free((x), (y))
58c5fc13
MT
63529+#endif
63530+
63531 /* Apply the given relocation to the (simplified) ELF. Return -error
63532 or 0. */
63533 int apply_relocate(Elf_Shdr *sechdrs,
fe2de317 63534diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
5e856224 63535index c47f4d6..23f9bdb 100644
fe2de317
MT
63536--- a/include/linux/moduleparam.h
63537+++ b/include/linux/moduleparam.h
4c928ab7 63538@@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
6892158b
MT
63539 * @len is usually just sizeof(string).
63540 */
63541 #define module_param_string(name, string, len, perm) \
63542- static const struct kparam_string __param_string_##name \
63543+ static const struct kparam_string __param_string_##name __used \
63544 = { len, string }; \
63545 __module_param_call(MODULE_PARAM_PREFIX, name, \
63546 &param_ops_string, \
5e856224 63547@@ -396,7 +396,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
6892158b
MT
63548 */
63549 #define module_param_array_named(name, array, type, nump, perm) \
5e856224 63550 param_check_##type(name, &(array)[0]); \
6892158b
MT
63551- static const struct kparam_array __param_arr_##name \
63552+ static const struct kparam_array __param_arr_##name __used \
15a11c5b
MT
63553 = { .max = ARRAY_SIZE(array), .num = nump, \
63554 .ops = &param_ops_##type, \
63555 .elemsize = sizeof(array[0]), .elem = array }; \
fe2de317
MT
63556diff --git a/include/linux/namei.h b/include/linux/namei.h
63557index ffc0213..2c1f2cb 100644
63558--- a/include/linux/namei.h
63559+++ b/include/linux/namei.h
66a7e928 63560@@ -24,7 +24,7 @@ struct nameidata {
16454cff 63561 unsigned seq;
58c5fc13
MT
63562 int last_type;
63563 unsigned depth;
63564- char *saved_names[MAX_NESTED_LINKS + 1];
63565+ const char *saved_names[MAX_NESTED_LINKS + 1];
63566
63567 /* Intent data */
63568 union {
6e9df6a3 63569@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
58c5fc13
MT
63570 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
63571 extern void unlock_rename(struct dentry *, struct dentry *);
63572
63573-static inline void nd_set_link(struct nameidata *nd, char *path)
63574+static inline void nd_set_link(struct nameidata *nd, const char *path)
63575 {
63576 nd->saved_names[nd->depth] = path;
63577 }
63578
63579-static inline char *nd_get_link(struct nameidata *nd)
ae4e228f 63580+static inline const char *nd_get_link(const struct nameidata *nd)
58c5fc13
MT
63581 {
63582 return nd->saved_names[nd->depth];
63583 }
fe2de317 63584diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
5e856224 63585index 7e472b7..212d381 100644
fe2de317
MT
63586--- a/include/linux/netdevice.h
63587+++ b/include/linux/netdevice.h
5e856224
MT
63588@@ -1002,6 +1002,7 @@ struct net_device_ops {
63589 int (*ndo_neigh_construct)(struct neighbour *n);
63590 void (*ndo_neigh_destroy)(struct neighbour *n);
15a11c5b
MT
63591 };
63592+typedef struct net_device_ops __no_const net_device_ops_no_const;
63593
63594 /*
63595 * The DEVICE structure.
5e856224 63596@@ -1063,7 +1064,7 @@ struct net_device {
4c928ab7
MT
63597 int iflink;
63598
63599 struct net_device_stats stats;
63600- atomic_long_t rx_dropped; /* dropped packets by core network
63601+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
63602 * Do not use this in drivers.
63603 */
63604
fe2de317
MT
63605diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
63606new file mode 100644
63607index 0000000..33f4af8
63608--- /dev/null
63609+++ b/include/linux/netfilter/xt_gradm.h
6892158b
MT
63610@@ -0,0 +1,9 @@
63611+#ifndef _LINUX_NETFILTER_XT_GRADM_H
63612+#define _LINUX_NETFILTER_XT_GRADM_H 1
63613+
63614+struct xt_gradm_mtinfo {
63615+ __u16 flags;
63616+ __u16 invflags;
63617+};
63618+
63619+#endif
fe2de317
MT
63620diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
63621index c65a18a..0c05f3a 100644
63622--- a/include/linux/of_pdt.h
63623+++ b/include/linux/of_pdt.h
15a11c5b
MT
63624@@ -32,7 +32,7 @@ struct of_pdt_ops {
63625
63626 /* return 0 on success; fill in 'len' with number of bytes in path */
63627 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
63628-};
63629+} __no_const;
63630
63631 extern void *prom_early_alloc(unsigned long size);
63632
fe2de317 63633diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
4c928ab7 63634index a4c5624..2dabfb7 100644
fe2de317
MT
63635--- a/include/linux/oprofile.h
63636+++ b/include/linux/oprofile.h
63637@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
ae4e228f
MT
63638 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
63639 char const * name, ulong * val);
58c5fc13 63640
ae4e228f
MT
63641-/** Create a file for read-only access to an atomic_t. */
63642+/** Create a file for read-only access to an atomic_unchecked_t. */
58c5fc13
MT
63643 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
63644- char const * name, atomic_t * val);
63645+ char const * name, atomic_unchecked_t * val);
63646
63647 /** create a directory */
63648 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
4c928ab7
MT
63649@@ -163,7 +163,7 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t co
63650 * Read an ASCII string for a number from a userspace buffer and fill *val on success.
63651 * Returns 0 on success, < 0 on error.
63652 */
63653-int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
63654+int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count) __size_overflow(3);
63655
63656 /** lock for read/write safety */
63657 extern raw_spinlock_t oprofilefs_lock;
fe2de317
MT
63658diff --git a/include/linux/padata.h b/include/linux/padata.h
63659index 4633b2f..988bc08 100644
63660--- a/include/linux/padata.h
63661+++ b/include/linux/padata.h
8308f9c9
MT
63662@@ -129,7 +129,7 @@ struct parallel_data {
63663 struct padata_instance *pinst;
63664 struct padata_parallel_queue __percpu *pqueue;
63665 struct padata_serial_queue __percpu *squeue;
63666- atomic_t seq_nr;
63667+ atomic_unchecked_t seq_nr;
63668 atomic_t reorder_objects;
63669 atomic_t refcnt;
63670 unsigned int max_seq_nr;
fe2de317 63671diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
5e856224 63672index abb2776..d8b8e15 100644
fe2de317
MT
63673--- a/include/linux/perf_event.h
63674+++ b/include/linux/perf_event.h
5e856224 63675@@ -750,8 +750,8 @@ struct perf_event {
8308f9c9
MT
63676
63677 enum perf_event_active_state state;
63678 unsigned int attach_state;
63679- local64_t count;
63680- atomic64_t child_count;
63681+ local64_t count; /* PaX: fix it one day */
63682+ atomic64_unchecked_t child_count;
63683
63684 /*
63685 * These are the total time in nanoseconds that the event
5e856224 63686@@ -802,8 +802,8 @@ struct perf_event {
8308f9c9
MT
63687 * These accumulate total time (in nanoseconds) that children
63688 * events have been enabled and running, respectively.
63689 */
63690- atomic64_t child_total_time_enabled;
63691- atomic64_t child_total_time_running;
63692+ atomic64_unchecked_t child_total_time_enabled;
63693+ atomic64_unchecked_t child_total_time_running;
63694
63695 /*
63696 * Protect attach/detach and child_list:
4c928ab7
MT
63697diff --git a/include/linux/personality.h b/include/linux/personality.h
63698index 8fc7dd1a..c19d89e 100644
63699--- a/include/linux/personality.h
63700+++ b/include/linux/personality.h
63701@@ -44,6 +44,7 @@ enum {
63702 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
63703 ADDR_NO_RANDOMIZE | \
63704 ADDR_COMPAT_LAYOUT | \
63705+ ADDR_LIMIT_3GB | \
63706 MMAP_PAGE_ZERO)
63707
63708 /*
fe2de317 63709diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
5e856224 63710index 0072a53..c5dcca5 100644
fe2de317
MT
63711--- a/include/linux/pipe_fs_i.h
63712+++ b/include/linux/pipe_fs_i.h
5e856224 63713@@ -47,9 +47,9 @@ struct pipe_buffer {
57199397 63714 struct pipe_inode_info {
ae4e228f 63715 wait_queue_head_t wait;
57199397 63716 unsigned int nrbufs, curbuf, buffers;
ae4e228f
MT
63717- unsigned int readers;
63718- unsigned int writers;
63719- unsigned int waiting_writers;
63720+ atomic_t readers;
63721+ atomic_t writers;
63722+ atomic_t waiting_writers;
63723 unsigned int r_counter;
63724 unsigned int w_counter;
57199397 63725 struct page *tmp_page;
fe2de317 63726diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
5e856224 63727index 609daae..5392427 100644
fe2de317
MT
63728--- a/include/linux/pm_runtime.h
63729+++ b/include/linux/pm_runtime.h
5e856224 63730@@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
bc901d79
MT
63731
63732 static inline void pm_runtime_mark_last_busy(struct device *dev)
63733 {
63734- ACCESS_ONCE(dev->power.last_busy) = jiffies;
63735+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
63736 }
63737
63738 #else /* !CONFIG_PM_RUNTIME */
fe2de317 63739diff --git a/include/linux/poison.h b/include/linux/poison.h
5e856224 63740index 2110a81..13a11bb 100644
fe2de317
MT
63741--- a/include/linux/poison.h
63742+++ b/include/linux/poison.h
ae4e228f 63743@@ -19,8 +19,8 @@
58c5fc13
MT
63744 * under normal circumstances, used to verify that nobody uses
63745 * non-initialized list entries.
63746 */
ae4e228f
MT
63747-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
63748-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
63749+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
63750+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
58c5fc13
MT
63751
63752 /********** include/linux/timer.h **********/
63753 /*
fe2de317
MT
63754diff --git a/include/linux/preempt.h b/include/linux/preempt.h
63755index 58969b2..ead129b 100644
63756--- a/include/linux/preempt.h
63757+++ b/include/linux/preempt.h
6e9df6a3 63758@@ -123,7 +123,7 @@ struct preempt_ops {
15a11c5b
MT
63759 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
63760 void (*sched_out)(struct preempt_notifier *notifier,
63761 struct task_struct *next);
63762-};
63763+} __no_const;
66a7e928 63764
15a11c5b
MT
63765 /**
63766 * preempt_notifier - key for installing preemption notifiers
fe2de317 63767diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
5e856224 63768index 85c5073..51fac8b 100644
fe2de317
MT
63769--- a/include/linux/proc_fs.h
63770+++ b/include/linux/proc_fs.h
5e856224 63771@@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
58c5fc13
MT
63772 return proc_create_data(name, mode, parent, proc_fops, NULL);
63773 }
63774
5e856224 63775+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
58c5fc13
MT
63776+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
63777+{
63778+#ifdef CONFIG_GRKERNSEC_PROC_USER
63779+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
63780+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63781+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
63782+#else
63783+ return proc_create_data(name, mode, parent, proc_fops, NULL);
63784+#endif
63785+}
58c5fc13
MT
63786+
63787 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
5e856224 63788 umode_t mode, struct proc_dir_entry *base,
58c5fc13 63789 read_proc_t *read_proc, void * data)
5e856224 63790@@ -258,7 +270,7 @@ union proc_op {
15a11c5b
MT
63791 int (*proc_show)(struct seq_file *m,
63792 struct pid_namespace *ns, struct pid *pid,
63793 struct task_struct *task);
63794-};
63795+} __no_const;
63796
63797 struct ctl_table_header;
63798 struct ctl_table;
fe2de317 63799diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
5e856224 63800index c2f1f6a..6fdb196 100644
fe2de317
MT
63801--- a/include/linux/ptrace.h
63802+++ b/include/linux/ptrace.h
5e856224
MT
63803@@ -199,9 +199,10 @@ static inline void ptrace_event(int event, unsigned long message)
63804 if (unlikely(ptrace_event_enabled(current, event))) {
63805 current->ptrace_message = message;
63806 ptrace_notify((event << 8) | SIGTRAP);
63807- } else if (event == PTRACE_EVENT_EXEC && unlikely(current->ptrace)) {
63808+ } else if (event == PTRACE_EVENT_EXEC) {
63809 /* legacy EXEC report via SIGTRAP */
63810- send_sig(SIGTRAP, current, 0);
63811+ if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
63812+ send_sig(SIGTRAP, current, 0);
63813 }
63814 }
63815
fe2de317 63816diff --git a/include/linux/random.h b/include/linux/random.h
4c928ab7 63817index 8f74538..02a1012 100644
fe2de317
MT
63818--- a/include/linux/random.h
63819+++ b/include/linux/random.h
15a11c5b 63820@@ -69,12 +69,17 @@ void srandom32(u32 seed);
57199397
MT
63821
63822 u32 prandom32(struct rnd_state *);
58c5fc13
MT
63823
63824+static inline unsigned long pax_get_random_long(void)
63825+{
63826+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
63827+}
63828+
57199397
MT
63829 /*
63830 * Handle minimum values for seeds
63831 */
63832 static inline u32 __seed(u32 x, u32 m)
63833 {
63834- return (x < m) ? x + m : x;
63835+ return (x <= m) ? x + m + 1 : x;
63836 }
58c5fc13 63837
57199397 63838 /**
fe2de317
MT
63839diff --git a/include/linux/reboot.h b/include/linux/reboot.h
63840index e0879a7..a12f962 100644
63841--- a/include/linux/reboot.h
63842+++ b/include/linux/reboot.h
63843@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
66a7e928
MT
63844 * Architecture-specific implementations of sys_reboot commands.
63845 */
63846
63847-extern void machine_restart(char *cmd);
63848-extern void machine_halt(void);
63849-extern void machine_power_off(void);
63850+extern void machine_restart(char *cmd) __noreturn;
63851+extern void machine_halt(void) __noreturn;
63852+extern void machine_power_off(void) __noreturn;
63853
63854 extern void machine_shutdown(void);
63855 struct pt_regs;
fe2de317 63856@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
66a7e928
MT
63857 */
63858
63859 extern void kernel_restart_prepare(char *cmd);
63860-extern void kernel_restart(char *cmd);
63861-extern void kernel_halt(void);
63862-extern void kernel_power_off(void);
63863+extern void kernel_restart(char *cmd) __noreturn;
63864+extern void kernel_halt(void) __noreturn;
63865+extern void kernel_power_off(void) __noreturn;
63866
63867 extern int C_A_D; /* for sysctl */
63868 void ctrl_alt_del(void);
6e9df6a3 63869@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
66a7e928
MT
63870 * Emergency restart, callable from an interrupt handler.
63871 */
63872
63873-extern void emergency_restart(void);
63874+extern void emergency_restart(void) __noreturn;
63875 #include <asm/emergency-restart.h>
63876
63877 #endif
fe2de317 63878diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
5e856224 63879index 2213ddc..650212a 100644
fe2de317
MT
63880--- a/include/linux/reiserfs_fs.h
63881+++ b/include/linux/reiserfs_fs.h
63882@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
58c5fc13
MT
63883 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
63884
63885 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
63886-#define get_generation(s) atomic_read (&fs_generation(s))
63887+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
63888 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
63889 #define __fs_changed(gen,s) (gen != get_generation (s))
ae4e228f 63890 #define fs_changed(gen,s) \
fe2de317 63891diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
5e856224 63892index 8c9e85c..1698e9a 100644
fe2de317
MT
63893--- a/include/linux/reiserfs_fs_sb.h
63894+++ b/include/linux/reiserfs_fs_sb.h
ae4e228f 63895@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
58c5fc13
MT
63896 /* Comment? -Hans */
63897 wait_queue_head_t s_wait;
63898 /* To be obsoleted soon by per buffer seals.. -Hans */
63899- atomic_t s_generation_counter; // increased by one every time the
63900+ atomic_unchecked_t s_generation_counter; // increased by one every time the
63901 // tree gets re-balanced
63902 unsigned long s_properties; /* File system properties. Currently holds
63903 on-disk FS format */
fe2de317 63904diff --git a/include/linux/relay.h b/include/linux/relay.h
5e856224 63905index a822fd7..62b70f6 100644
fe2de317
MT
63906--- a/include/linux/relay.h
63907+++ b/include/linux/relay.h
15a11c5b
MT
63908@@ -159,7 +159,7 @@ struct rchan_callbacks
63909 * The callback should return 0 if successful, negative if not.
63910 */
63911 int (*remove_buf_file)(struct dentry *dentry);
63912-};
63913+} __no_const;
63914
63915 /*
63916 * CONFIG_RELAY kernel API, kernel/relay.c
fe2de317
MT
63917diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
63918index c6c6084..5bf1212 100644
63919--- a/include/linux/rfkill.h
63920+++ b/include/linux/rfkill.h
15a11c5b
MT
63921@@ -147,6 +147,7 @@ struct rfkill_ops {
63922 void (*query)(struct rfkill *rfkill, void *data);
63923 int (*set_block)(void *data, bool blocked);
63924 };
63925+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
63926
63927 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
63928 /**
4c928ab7
MT
63929diff --git a/include/linux/rio.h b/include/linux/rio.h
63930index 4d50611..c6858a2 100644
63931--- a/include/linux/rio.h
63932+++ b/include/linux/rio.h
63933@@ -315,7 +315,7 @@ struct rio_ops {
63934 int mbox, void *buffer, size_t len);
63935 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
63936 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
63937-};
63938+} __no_const;
63939
63940 #define RIO_RESOURCE_MEM 0x00000100
63941 #define RIO_RESOURCE_DOORBELL 0x00000200
fe2de317 63942diff --git a/include/linux/rmap.h b/include/linux/rmap.h
5e856224 63943index 1cdd62a..e399f0d 100644
fe2de317
MT
63944--- a/include/linux/rmap.h
63945+++ b/include/linux/rmap.h
5e856224 63946@@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
57199397
MT
63947 void anon_vma_init(void); /* create anon_vma_cachep */
63948 int anon_vma_prepare(struct vm_area_struct *);
63949 void unlink_anon_vmas(struct vm_area_struct *);
63950-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
57199397 63951+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
5e856224
MT
63952 void anon_vma_moveto_tail(struct vm_area_struct *);
63953-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
57199397
MT
63954+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
63955 void __anon_vma_link(struct vm_area_struct *);
57199397 63956
66a7e928 63957 static inline void anon_vma_merge(struct vm_area_struct *vma,
fe2de317 63958diff --git a/include/linux/sched.h b/include/linux/sched.h
5e856224 63959index 0657368..765f70f 100644
fe2de317
MT
63960--- a/include/linux/sched.h
63961+++ b/include/linux/sched.h
4c928ab7 63962@@ -101,6 +101,7 @@ struct bio_list;
58c5fc13 63963 struct fs_struct;
ae4e228f 63964 struct perf_event_context;
66a7e928 63965 struct blk_plug;
58c5fc13
MT
63966+struct linux_binprm;
63967
63968 /*
63969 * List of flags we want to share for kernel threads,
5e856224 63970@@ -382,10 +383,13 @@ struct user_namespace;
57199397
MT
63971 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
63972
63973 extern int sysctl_max_map_count;
63974+extern unsigned long sysctl_heap_stack_gap;
63975
63976 #include <linux/aio.h>
63977
63978 #ifdef CONFIG_MMU
16454cff
MT
63979+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
63980+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
57199397
MT
63981 extern void arch_pick_mmap_layout(struct mm_struct *mm);
63982 extern unsigned long
63983 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
5e856224 63984@@ -631,6 +635,17 @@ struct signal_struct {
16454cff
MT
63985 #ifdef CONFIG_TASKSTATS
63986 struct taskstats *stats;
58c5fc13 63987 #endif
16454cff 63988+
58c5fc13
MT
63989+#ifdef CONFIG_GRKERNSEC
63990+ u32 curr_ip;
bc901d79 63991+ u32 saved_ip;
58c5fc13
MT
63992+ u32 gr_saddr;
63993+ u32 gr_daddr;
63994+ u16 gr_sport;
63995+ u16 gr_dport;
63996+ u8 used_accept:1;
63997+#endif
ae4e228f 63998+
16454cff
MT
63999 #ifdef CONFIG_AUDIT
64000 unsigned audit_tty;
64001 struct tty_audit_buf *tty_audit_buf;
5e856224 64002@@ -714,6 +729,11 @@ struct user_struct {
71d190be
MT
64003 struct key *session_keyring; /* UID's default session keyring */
64004 #endif
64005
64006+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64007+ unsigned int banned;
64008+ unsigned long ban_expires;
64009+#endif
64010+
64011 /* Hash table maintenance information */
64012 struct hlist_node uidhash_node;
64013 uid_t uid;
5e856224 64014@@ -1354,8 +1374,8 @@ struct task_struct {
58c5fc13
MT
64015 struct list_head thread_group;
64016
64017 struct completion *vfork_done; /* for vfork() */
64018- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
64019- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
64020+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
64021+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
64022
64023 cputime_t utime, stime, utimescaled, stimescaled;
64024 cputime_t gtime;
5e856224 64025@@ -1371,13 +1391,6 @@ struct task_struct {
58c5fc13
MT
64026 struct task_cputime cputime_expires;
64027 struct list_head cpu_timers[3];
64028
64029-/* process credentials */
bc901d79 64030- const struct cred __rcu *real_cred; /* objective and real subjective task
58c5fc13 64031- * credentials (COW) */
bc901d79 64032- const struct cred __rcu *cred; /* effective (overridable) subjective task
58c5fc13 64033- * credentials (COW) */
ae4e228f 64034- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
58c5fc13
MT
64035-
64036 char comm[TASK_COMM_LEN]; /* executable name excluding path
64037 - access with [gs]et_task_comm (which lock
64038 it with task_lock())
5e856224 64039@@ -1394,8 +1407,16 @@ struct task_struct {
71d190be
MT
64040 #endif
64041 /* CPU-specific state of this task */
bc901d79 64042 struct thread_struct thread;
71d190be
MT
64043+/* thread_info moved to task_struct */
64044+#ifdef CONFIG_X86
64045+ struct thread_info tinfo;
64046+#endif
bc901d79
MT
64047 /* filesystem information */
64048 struct fs_struct *fs;
58c5fc13 64049+
bc901d79 64050+ const struct cred __rcu *cred; /* effective (overridable) subjective task
58c5fc13 64051+ * credentials (COW) */
58c5fc13 64052+
bc901d79
MT
64053 /* open file information */
64054 struct files_struct *files;
64055 /* namespaces */
5e856224 64056@@ -1442,6 +1463,11 @@ struct task_struct {
bc901d79
MT
64057 struct rt_mutex_waiter *pi_blocked_on;
64058 #endif
ae4e228f 64059
bc901d79
MT
64060+/* process credentials */
64061+ const struct cred __rcu *real_cred; /* objective and real subjective task
ae4e228f 64062+ * credentials (COW) */
bc901d79 64063+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
ae4e228f 64064+
bc901d79
MT
64065 #ifdef CONFIG_DEBUG_MUTEXES
64066 /* mutex deadlock detection */
64067 struct mutex_waiter *blocked_on;
5e856224 64068@@ -1558,6 +1584,27 @@ struct task_struct {
ae4e228f
MT
64069 unsigned long default_timer_slack_ns;
64070
64071 struct list_head *scm_work_list;
58c5fc13
MT
64072+
64073+#ifdef CONFIG_GRKERNSEC
64074+ /* grsecurity */
4c928ab7
MT
64075+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64076+ u64 exec_id;
64077+#endif
64078+#ifdef CONFIG_GRKERNSEC_SETXID
64079+ const struct cred *delayed_cred;
64080+#endif
df50ba0c 64081+ struct dentry *gr_chroot_dentry;
58c5fc13
MT
64082+ struct acl_subject_label *acl;
64083+ struct acl_role_label *role;
64084+ struct file *exec_file;
64085+ u16 acl_role_id;
16454cff 64086+ /* is this the task that authenticated to the special role */
58c5fc13
MT
64087+ u8 acl_sp_role;
64088+ u8 is_writable;
64089+ u8 brute;
df50ba0c 64090+ u8 gr_is_chrooted;
58c5fc13
MT
64091+#endif
64092+
ae4e228f 64093 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
df50ba0c 64094 /* Index of current stored address in ret_stack */
ae4e228f 64095 int curr_ret_stack;
5e856224 64096@@ -1592,6 +1639,51 @@ struct task_struct {
ae4e228f 64097 #endif
58c5fc13
MT
64098 };
64099
64100+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
64101+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
64102+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
64103+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
64104+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
64105+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
64106+
64107+#ifdef CONFIG_PAX_SOFTMODE
15a11c5b 64108+extern int pax_softmode;
58c5fc13
MT
64109+#endif
64110+
64111+extern int pax_check_flags(unsigned long *);
64112+
64113+/* if tsk != current then task_lock must be held on it */
64114+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
64115+static inline unsigned long pax_get_flags(struct task_struct *tsk)
64116+{
64117+ if (likely(tsk->mm))
64118+ return tsk->mm->pax_flags;
64119+ else
64120+ return 0UL;
64121+}
64122+
64123+/* if tsk != current then task_lock must be held on it */
64124+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
64125+{
64126+ if (likely(tsk->mm)) {
64127+ tsk->mm->pax_flags = flags;
64128+ return 0;
64129+ }
64130+ return -EINVAL;
64131+}
64132+#endif
64133+
64134+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
64135+extern void pax_set_initial_flags(struct linux_binprm *bprm);
64136+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
64137+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
64138+#endif
64139+
15a11c5b 64140+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
6e9df6a3 64141+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
15a11c5b 64142+extern void pax_report_refcount_overflow(struct pt_regs *regs);
4c928ab7 64143+extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
58c5fc13
MT
64144+
64145 /* Future-safe accessor for struct task_struct's cpus_allowed. */
ae4e228f 64146 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
58c5fc13 64147
5e856224 64148@@ -2104,7 +2196,9 @@ void yield(void);
71d190be
MT
64149 extern struct exec_domain default_exec_domain;
64150
64151 union thread_union {
64152+#ifndef CONFIG_X86
64153 struct thread_info thread_info;
64154+#endif
64155 unsigned long stack[THREAD_SIZE/sizeof(long)];
64156 };
64157
5e856224 64158@@ -2137,6 +2231,7 @@ extern struct pid_namespace init_pid_ns;
15a11c5b
MT
64159 */
64160
64161 extern struct task_struct *find_task_by_vpid(pid_t nr);
64162+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
64163 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
64164 struct pid_namespace *ns);
64165
5e856224 64166@@ -2280,7 +2375,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
58c5fc13
MT
64167 extern void exit_itimers(struct signal_struct *);
64168 extern void flush_itimer_signals(void);
64169
5e856224 64170-extern void do_group_exit(int);
4c928ab7 64171+extern __noreturn void do_group_exit(int);
58c5fc13
MT
64172
64173 extern void daemonize(const char *, ...);
64174 extern int allow_signal(int);
5e856224 64175@@ -2478,13 +2573,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
58c5fc13
MT
64176
64177 #endif
64178
64179-static inline int object_is_on_stack(void *obj)
ae4e228f 64180+static inline int object_starts_on_stack(void *obj)
58c5fc13 64181 {
ae4e228f
MT
64182- void *stack = task_stack_page(current);
64183+ const void *stack = task_stack_page(current);
58c5fc13 64184
ae4e228f
MT
64185 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
64186 }
64187
57199397
MT
64188+#ifdef CONFIG_PAX_USERCOPY
64189+extern int object_is_on_stack(const void *obj, unsigned long len);
64190+#endif
ae4e228f
MT
64191+
64192 extern void thread_info_cache_init(void);
64193
64194 #ifdef CONFIG_DEBUG_STACK_USAGE
fe2de317
MT
64195diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
64196index 899fbb4..1cb4138 100644
64197--- a/include/linux/screen_info.h
64198+++ b/include/linux/screen_info.h
ae4e228f 64199@@ -43,7 +43,8 @@ struct screen_info {
58c5fc13
MT
64200 __u16 pages; /* 0x32 */
64201 __u16 vesa_attributes; /* 0x34 */
64202 __u32 capabilities; /* 0x36 */
64203- __u8 _reserved[6]; /* 0x3a */
64204+ __u16 vesapm_size; /* 0x3a */
64205+ __u8 _reserved[4]; /* 0x3c */
64206 } __attribute__((packed));
64207
64208 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
fe2de317 64209diff --git a/include/linux/security.h b/include/linux/security.h
5e856224 64210index 83c18e8..2d98860 100644
fe2de317
MT
64211--- a/include/linux/security.h
64212+++ b/include/linux/security.h
4c928ab7 64213@@ -37,6 +37,7 @@
58c5fc13 64214 #include <linux/xfrm.h>
df50ba0c 64215 #include <linux/slab.h>
4c928ab7 64216 #include <linux/xattr.h>
58c5fc13
MT
64217+#include <linux/grsecurity.h>
64218 #include <net/flow.h>
64219
64220 /* Maximum number of letters for an LSM name string */
fe2de317 64221diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
5e856224 64222index 44f1514..2bbf6c1 100644
fe2de317
MT
64223--- a/include/linux/seq_file.h
64224+++ b/include/linux/seq_file.h
4c928ab7
MT
64225@@ -24,6 +24,9 @@ struct seq_file {
64226 struct mutex lock;
64227 const struct seq_operations *op;
64228 int poll_event;
64229+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64230+ u64 exec_id;
64231+#endif
64232 void *private;
64233 };
64234
64235@@ -33,6 +36,7 @@ struct seq_operations {
15a11c5b
MT
64236 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
64237 int (*show) (struct seq_file *m, void *v);
64238 };
64239+typedef struct seq_operations __no_const seq_operations_no_const;
64240
64241 #define SEQ_SKIP 1
64242
fe2de317
MT
64243diff --git a/include/linux/shm.h b/include/linux/shm.h
64244index 92808b8..c28cac4 100644
64245--- a/include/linux/shm.h
64246+++ b/include/linux/shm.h
64247@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
15a11c5b 64248
6e9df6a3
MT
64249 /* The task created the shm object. NULL if the task is dead. */
64250 struct task_struct *shm_creator;
58c5fc13
MT
64251+#ifdef CONFIG_GRKERNSEC
64252+ time_t shm_createtime;
64253+ pid_t shm_lapid;
64254+#endif
64255 };
64256
64257 /* shm_mode upper byte flags */
fe2de317 64258diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
5e856224 64259index 42854ce..3b7d3c8 100644
fe2de317
MT
64260--- a/include/linux/skbuff.h
64261+++ b/include/linux/skbuff.h
5e856224 64262@@ -655,7 +655,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
bc901d79
MT
64263 */
64264 static inline int skb_queue_empty(const struct sk_buff_head *list)
64265 {
64266- return list->next == (struct sk_buff *)list;
64267+ return list->next == (const struct sk_buff *)list;
64268 }
64269
64270 /**
5e856224 64271@@ -668,7 +668,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
bc901d79
MT
64272 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
64273 const struct sk_buff *skb)
64274 {
64275- return skb->next == (struct sk_buff *)list;
64276+ return skb->next == (const struct sk_buff *)list;
64277 }
64278
64279 /**
5e856224 64280@@ -681,7 +681,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
bc901d79
MT
64281 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
64282 const struct sk_buff *skb)
64283 {
64284- return skb->prev == (struct sk_buff *)list;
64285+ return skb->prev == (const struct sk_buff *)list;
64286 }
64287
64288 /**
5e856224 64289@@ -1558,7 +1558,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
8308f9c9
MT
64290 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
64291 */
64292 #ifndef NET_SKB_PAD
64293-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
15a11c5b 64294+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
8308f9c9
MT
64295 #endif
64296
64297 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
fe2de317 64298diff --git a/include/linux/slab.h b/include/linux/slab.h
4c928ab7 64299index 573c809..07e1f43 100644
fe2de317
MT
64300--- a/include/linux/slab.h
64301+++ b/include/linux/slab.h
71d190be 64302@@ -11,12 +11,20 @@
ae4e228f
MT
64303
64304 #include <linux/gfp.h>
64305 #include <linux/types.h>
64306+#include <linux/err.h>
64307
64308 /*
64309 * Flags to pass to kmem_cache_create().
71d190be
MT
64310 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
64311 */
64312 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
64313+
64314+#ifdef CONFIG_PAX_USERCOPY
64315+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
64316+#else
64317+#define SLAB_USERCOPY 0x00000000UL
64318+#endif
64319+
64320 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
64321 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
64322 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
64323@@ -87,10 +95,13 @@
58c5fc13
MT
64324 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
64325 * Both make kfree a no-op.
64326 */
64327-#define ZERO_SIZE_PTR ((void *)16)
ae4e228f
MT
64328+#define ZERO_SIZE_PTR \
64329+({ \
64330+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
64331+ (void *)(-MAX_ERRNO-1L); \
64332+})
58c5fc13
MT
64333
64334-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
64335- (unsigned long)ZERO_SIZE_PTR)
df50ba0c 64336+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
58c5fc13
MT
64337
64338 /*
64339 * struct kmem_cache related prototypes
4c928ab7
MT
64340@@ -156,11 +167,12 @@ unsigned int kmem_cache_size(struct kmem_cache *);
64341 /*
64342 * Common kmalloc functions provided by all allocators
64343 */
64344-void * __must_check __krealloc(const void *, size_t, gfp_t);
64345-void * __must_check krealloc(const void *, size_t, gfp_t);
64346+void * __must_check __krealloc(const void *, size_t, gfp_t) __size_overflow(2);
64347+void * __must_check krealloc(const void *, size_t, gfp_t) __size_overflow(2);
58c5fc13
MT
64348 void kfree(const void *);
64349 void kzfree(const void *);
64350 size_t ksize(const void *);
64351+void check_object_size(const void *ptr, unsigned long n, bool to);
64352
64353 /*
64354 * Allocator specific definitions. These are mainly used to establish optimized
4c928ab7
MT
64355@@ -287,7 +299,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
64356 */
64357 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
64358 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
64359-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
64360+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
64361 #define kmalloc_track_caller(size, flags) \
64362 __kmalloc_track_caller(size, flags, _RET_IP_)
64363 #else
64364@@ -306,7 +318,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
64365 */
64366 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
64367 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
64368-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
64369+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
64370 #define kmalloc_node_track_caller(size, flags, node) \
64371 __kmalloc_node_track_caller(size, flags, node, \
64372 _RET_IP_)
fe2de317 64373diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
5e856224 64374index fbd1117..c0bd874 100644
fe2de317
MT
64375--- a/include/linux/slab_def.h
64376+++ b/include/linux/slab_def.h
5e856224 64377@@ -66,10 +66,10 @@ struct kmem_cache {
fe2de317
MT
64378 unsigned long node_allocs;
64379 unsigned long node_frees;
64380 unsigned long node_overflow;
64381- atomic_t allochit;
64382- atomic_t allocmiss;
64383- atomic_t freehit;
64384- atomic_t freemiss;
64385+ atomic_unchecked_t allochit;
64386+ atomic_unchecked_t allocmiss;
64387+ atomic_unchecked_t freehit;
64388+ atomic_unchecked_t freemiss;
64389
64390 /*
64391 * If debugging is enabled, then the allocator can add additional
5e856224 64392@@ -107,7 +107,7 @@ struct cache_sizes {
4c928ab7
MT
64393 extern struct cache_sizes malloc_sizes[];
64394
64395 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
64396-void *__kmalloc(size_t size, gfp_t flags);
64397+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
64398
64399 #ifdef CONFIG_TRACING
64400 extern void *kmem_cache_alloc_trace(size_t size,
5e856224 64401@@ -125,6 +125,7 @@ static inline size_t slab_buffer_size(struct kmem_cache *cachep)
4c928ab7
MT
64402 }
64403 #endif
64404
64405+static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
64406 static __always_inline void *kmalloc(size_t size, gfp_t flags)
64407 {
64408 struct kmem_cache *cachep;
5e856224 64409@@ -160,7 +161,7 @@ found:
4c928ab7
MT
64410 }
64411
64412 #ifdef CONFIG_NUMA
64413-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
64414+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64415 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
64416
64417 #ifdef CONFIG_TRACING
5e856224 64418@@ -179,6 +180,7 @@ kmem_cache_alloc_node_trace(size_t size,
4c928ab7
MT
64419 }
64420 #endif
64421
64422+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64423 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
64424 {
64425 struct kmem_cache *cachep;
64426diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
64427index 0ec00b3..65e7e0e 100644
64428--- a/include/linux/slob_def.h
64429+++ b/include/linux/slob_def.h
64430@@ -9,8 +9,9 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
64431 return kmem_cache_alloc_node(cachep, flags, -1);
64432 }
64433
64434-void *__kmalloc_node(size_t size, gfp_t flags, int node);
64435+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64436
64437+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64438 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
64439 {
64440 return __kmalloc_node(size, flags, node);
64441@@ -24,11 +25,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
64442 * kmalloc is the normal method of allocating memory
64443 * in the kernel.
64444 */
64445+static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
64446 static __always_inline void *kmalloc(size_t size, gfp_t flags)
64447 {
64448 return __kmalloc_node(size, flags, -1);
64449 }
64450
64451+static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
64452 static __always_inline void *__kmalloc(size_t size, gfp_t flags)
64453 {
64454 return kmalloc(size, flags);
fe2de317 64455diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
4c928ab7 64456index a32bcfd..d26bd6e 100644
fe2de317
MT
64457--- a/include/linux/slub_def.h
64458+++ b/include/linux/slub_def.h
4c928ab7 64459@@ -89,7 +89,7 @@ struct kmem_cache {
58c5fc13
MT
64460 struct kmem_cache_order_objects max;
64461 struct kmem_cache_order_objects min;
64462 gfp_t allocflags; /* gfp flags to use on each alloc */
64463- int refcount; /* Refcount for slab cache destroy */
64464+ atomic_t refcount; /* Refcount for slab cache destroy */
64465 void (*ctor)(void *);
64466 int inuse; /* Offset to metadata */
64467 int align; /* Alignment */
4c928ab7
MT
64468@@ -204,6 +204,7 @@ static __always_inline int kmalloc_index(size_t size)
64469 * This ought to end up with a global pointer to the right cache
64470 * in kmalloc_caches.
64471 */
64472+static __always_inline struct kmem_cache *kmalloc_slab(size_t size) __size_overflow(1);
64473 static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
64474 {
64475 int index = kmalloc_index(size);
64476@@ -215,9 +216,11 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
15a11c5b
MT
64477 }
64478
64479 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
64480-void *__kmalloc(size_t size, gfp_t flags);
4c928ab7 64481+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
15a11c5b
MT
64482
64483 static __always_inline void *
4c928ab7
MT
64484+kmalloc_order(size_t size, gfp_t flags, unsigned int order) __size_overflow(1);
64485+static __always_inline void *
15a11c5b 64486 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
4c928ab7
MT
64487 {
64488 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
64489@@ -256,12 +259,14 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
64490 }
64491 #endif
64492
64493+static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
64494 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
64495 {
64496 unsigned int order = get_order(size);
64497 return kmalloc_order_trace(size, flags, order);
64498 }
64499
64500+static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
64501 static __always_inline void *kmalloc(size_t size, gfp_t flags)
64502 {
64503 if (__builtin_constant_p(size)) {
64504@@ -281,7 +286,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
64505 }
64506
64507 #ifdef CONFIG_NUMA
64508-void *__kmalloc_node(size_t size, gfp_t flags, int node);
64509+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64510 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
64511
64512 #ifdef CONFIG_TRACING
64513@@ -298,6 +303,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
64514 }
64515 #endif
64516
64517+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64518 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
64519 {
64520 if (__builtin_constant_p(size) &&
fe2de317
MT
64521diff --git a/include/linux/sonet.h b/include/linux/sonet.h
64522index de8832d..0147b46 100644
64523--- a/include/linux/sonet.h
64524+++ b/include/linux/sonet.h
58c5fc13 64525@@ -61,7 +61,7 @@ struct sonet_stats {
6e9df6a3 64526 #include <linux/atomic.h>
58c5fc13
MT
64527
64528 struct k_sonet_stats {
64529-#define __HANDLE_ITEM(i) atomic_t i
64530+#define __HANDLE_ITEM(i) atomic_unchecked_t i
64531 __SONET_ITEMS
64532 #undef __HANDLE_ITEM
64533 };
5e856224
MT
64534diff --git a/include/linux/stddef.h b/include/linux/stddef.h
64535index 6a40c76..1747b67 100644
64536--- a/include/linux/stddef.h
64537+++ b/include/linux/stddef.h
64538@@ -3,14 +3,10 @@
64539
64540 #include <linux/compiler.h>
64541
64542+#ifdef __KERNEL__
64543+
64544 #undef NULL
64545-#if defined(__cplusplus)
64546-#define NULL 0
64547-#else
64548 #define NULL ((void *)0)
64549-#endif
64550-
64551-#ifdef __KERNEL__
64552
64553 enum {
64554 false = 0,
fe2de317 64555diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
5e856224 64556index 2c5993a..b0e79f0 100644
fe2de317
MT
64557--- a/include/linux/sunrpc/clnt.h
64558+++ b/include/linux/sunrpc/clnt.h
4c928ab7 64559@@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
bc901d79
MT
64560 {
64561 switch (sap->sa_family) {
64562 case AF_INET:
64563- return ntohs(((struct sockaddr_in *)sap)->sin_port);
64564+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
64565 case AF_INET6:
64566- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
64567+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
64568 }
64569 return 0;
64570 }
4c928ab7 64571@@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
bc901d79
MT
64572 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
64573 const struct sockaddr *src)
64574 {
64575- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
64576+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
64577 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
64578
64579 dsin->sin_family = ssin->sin_family;
4c928ab7 64580@@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
bc901d79
MT
64581 if (sa->sa_family != AF_INET6)
64582 return 0;
64583
64584- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
64585+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
64586 }
64587
64588 #endif /* __KERNEL__ */
fe2de317
MT
64589diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
64590index e775689..9e206d9 100644
64591--- a/include/linux/sunrpc/sched.h
64592+++ b/include/linux/sunrpc/sched.h
6e9df6a3
MT
64593@@ -105,6 +105,7 @@ struct rpc_call_ops {
64594 void (*rpc_call_done)(struct rpc_task *, void *);
64595 void (*rpc_release)(void *);
64596 };
64597+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
64598
64599 struct rpc_task_setup {
64600 struct rpc_task *task;
fe2de317
MT
64601diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
64602index c14fe86..393245e 100644
64603--- a/include/linux/sunrpc/svc_rdma.h
64604+++ b/include/linux/sunrpc/svc_rdma.h
8308f9c9
MT
64605@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
64606 extern unsigned int svcrdma_max_requests;
64607 extern unsigned int svcrdma_max_req_size;
64608
64609-extern atomic_t rdma_stat_recv;
64610-extern atomic_t rdma_stat_read;
64611-extern atomic_t rdma_stat_write;
64612-extern atomic_t rdma_stat_sq_starve;
64613-extern atomic_t rdma_stat_rq_starve;
64614-extern atomic_t rdma_stat_rq_poll;
64615-extern atomic_t rdma_stat_rq_prod;
64616-extern atomic_t rdma_stat_sq_poll;
64617-extern atomic_t rdma_stat_sq_prod;
64618+extern atomic_unchecked_t rdma_stat_recv;
64619+extern atomic_unchecked_t rdma_stat_read;
64620+extern atomic_unchecked_t rdma_stat_write;
64621+extern atomic_unchecked_t rdma_stat_sq_starve;
64622+extern atomic_unchecked_t rdma_stat_rq_starve;
64623+extern atomic_unchecked_t rdma_stat_rq_poll;
64624+extern atomic_unchecked_t rdma_stat_rq_prod;
64625+extern atomic_unchecked_t rdma_stat_sq_poll;
64626+extern atomic_unchecked_t rdma_stat_sq_prod;
64627
64628 #define RPCRDMA_VERSION 1
64629
fe2de317 64630diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
5e856224 64631index bb9127d..34ab358 100644
fe2de317
MT
64632--- a/include/linux/sysctl.h
64633+++ b/include/linux/sysctl.h
ae4e228f 64634@@ -155,7 +155,11 @@ enum
58c5fc13
MT
64635 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
64636 };
64637
64638-
64639+#ifdef CONFIG_PAX_SOFTMODE
64640+enum {
64641+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
64642+};
64643+#endif
64644
64645 /* CTL_VM names: */
64646 enum
4c928ab7 64647@@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
bc901d79
MT
64648
64649 extern int proc_dostring(struct ctl_table *, int,
64650 void __user *, size_t *, loff_t *);
64651+extern int proc_dostring_modpriv(struct ctl_table *, int,
64652+ void __user *, size_t *, loff_t *);
64653 extern int proc_dointvec(struct ctl_table *, int,
64654 void __user *, size_t *, loff_t *);
64655 extern int proc_dointvec_minmax(struct ctl_table *, int,
4c928ab7
MT
64656diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
64657index a71a292..51bd91d 100644
64658--- a/include/linux/tracehook.h
64659+++ b/include/linux/tracehook.h
64660@@ -54,12 +54,12 @@ struct linux_binprm;
64661 /*
64662 * ptrace report for syscall entry and exit looks identical.
64663 */
64664-static inline void ptrace_report_syscall(struct pt_regs *regs)
64665+static inline int ptrace_report_syscall(struct pt_regs *regs)
64666 {
64667 int ptrace = current->ptrace;
64668
64669 if (!(ptrace & PT_PTRACED))
64670- return;
64671+ return 0;
64672
64673 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
64674
64675@@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
64676 send_sig(current->exit_code, current, 1);
64677 current->exit_code = 0;
64678 }
64679+
64680+ return fatal_signal_pending(current);
64681 }
64682
64683 /**
64684@@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
64685 static inline __must_check int tracehook_report_syscall_entry(
64686 struct pt_regs *regs)
64687 {
64688- ptrace_report_syscall(regs);
64689- return 0;
64690+ return ptrace_report_syscall(regs);
64691 }
64692
64693 /**
fe2de317
MT
64694diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
64695index ff7dc08..893e1bd 100644
64696--- a/include/linux/tty_ldisc.h
64697+++ b/include/linux/tty_ldisc.h
16454cff 64698@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
58c5fc13
MT
64699
64700 struct module *owner;
64701
64702- int refcount;
64703+ atomic_t refcount;
64704 };
64705
64706 struct tty_ldisc {
fe2de317 64707diff --git a/include/linux/types.h b/include/linux/types.h
5e856224 64708index e5fa503..df6e8a4 100644
fe2de317
MT
64709--- a/include/linux/types.h
64710+++ b/include/linux/types.h
5e856224 64711@@ -214,10 +214,26 @@ typedef struct {
57199397 64712 int counter;
58c5fc13
MT
64713 } atomic_t;
64714
64715+#ifdef CONFIG_PAX_REFCOUNT
64716+typedef struct {
57199397 64717+ int counter;
58c5fc13
MT
64718+} atomic_unchecked_t;
64719+#else
64720+typedef atomic_t atomic_unchecked_t;
64721+#endif
64722+
64723 #ifdef CONFIG_64BIT
64724 typedef struct {
57199397 64725 long counter;
58c5fc13
MT
64726 } atomic64_t;
64727+
64728+#ifdef CONFIG_PAX_REFCOUNT
64729+typedef struct {
57199397 64730+ long counter;
58c5fc13
MT
64731+} atomic64_unchecked_t;
64732+#else
64733+typedef atomic64_t atomic64_unchecked_t;
64734+#endif
64735 #endif
64736
6892158b 64737 struct list_head {
fe2de317 64738diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
4c928ab7 64739index 5ca0951..53a2fff 100644
fe2de317
MT
64740--- a/include/linux/uaccess.h
64741+++ b/include/linux/uaccess.h
64742@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
58c5fc13
MT
64743 long ret; \
64744 mm_segment_t old_fs = get_fs(); \
64745 \
64746- set_fs(KERNEL_DS); \
64747 pagefault_disable(); \
6e9df6a3 64748- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
58c5fc13 64749- pagefault_enable(); \
6e9df6a3
MT
64750+ set_fs(KERNEL_DS); \
64751+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
58c5fc13
MT
64752 set_fs(old_fs); \
64753+ pagefault_enable(); \
64754 ret; \
64755 })
64756
4c928ab7
MT
64757@@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *dst, const void *src, size_t size);
64758 * Safely write to address @dst from the buffer at @src. If a kernel fault
64759 * happens, handle that and return -EFAULT.
64760 */
64761-extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
64762+extern long notrace probe_kernel_write(void *dst, const void *src, size_t size) __size_overflow(3);
64763 extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
64764
64765 #endif /* __LINUX_UACCESS_H__ */
fe2de317
MT
64766diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
64767index 99c1b4d..bb94261 100644
64768--- a/include/linux/unaligned/access_ok.h
64769+++ b/include/linux/unaligned/access_ok.h
bc901d79
MT
64770@@ -6,32 +6,32 @@
64771
64772 static inline u16 get_unaligned_le16(const void *p)
64773 {
64774- return le16_to_cpup((__le16 *)p);
64775+ return le16_to_cpup((const __le16 *)p);
64776 }
64777
64778 static inline u32 get_unaligned_le32(const void *p)
64779 {
64780- return le32_to_cpup((__le32 *)p);
64781+ return le32_to_cpup((const __le32 *)p);
64782 }
64783
64784 static inline u64 get_unaligned_le64(const void *p)
64785 {
64786- return le64_to_cpup((__le64 *)p);
64787+ return le64_to_cpup((const __le64 *)p);
64788 }
64789
64790 static inline u16 get_unaligned_be16(const void *p)
64791 {
64792- return be16_to_cpup((__be16 *)p);
64793+ return be16_to_cpup((const __be16 *)p);
64794 }
64795
64796 static inline u32 get_unaligned_be32(const void *p)
64797 {
64798- return be32_to_cpup((__be32 *)p);
64799+ return be32_to_cpup((const __be32 *)p);
64800 }
64801
64802 static inline u64 get_unaligned_be64(const void *p)
64803 {
64804- return be64_to_cpup((__be64 *)p);
64805+ return be64_to_cpup((const __be64 *)p);
64806 }
64807
64808 static inline void put_unaligned_le16(u16 val, void *p)
4c928ab7 64809diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
5e856224 64810index 0d3f988..000f101 100644
4c928ab7
MT
64811--- a/include/linux/usb/renesas_usbhs.h
64812+++ b/include/linux/usb/renesas_usbhs.h
64813@@ -39,7 +39,7 @@ enum {
64814 */
64815 struct renesas_usbhs_driver_callback {
64816 int (*notify_hotplug)(struct platform_device *pdev);
64817-};
64818+} __no_const;
64819
64820 /*
64821 * callback functions for platform
5e856224 64822@@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
4c928ab7
MT
64823 * VBUS control is needed for Host
64824 */
64825 int (*set_vbus)(struct platform_device *pdev, int enable);
64826-};
64827+} __no_const;
64828
64829 /*
64830 * parameters for renesas usbhs
fe2de317 64831diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
4c928ab7 64832index 6f8fbcf..8259001 100644
fe2de317
MT
64833--- a/include/linux/vermagic.h
64834+++ b/include/linux/vermagic.h
4c928ab7 64835@@ -25,9 +25,35 @@
6e9df6a3
MT
64836 #define MODULE_ARCH_VERMAGIC ""
64837 #endif
64838
64839+#ifdef CONFIG_PAX_REFCOUNT
64840+#define MODULE_PAX_REFCOUNT "REFCOUNT "
64841+#else
64842+#define MODULE_PAX_REFCOUNT ""
64843+#endif
64844+
64845+#ifdef CONSTIFY_PLUGIN
64846+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
64847+#else
64848+#define MODULE_CONSTIFY_PLUGIN ""
64849+#endif
64850+
64851+#ifdef STACKLEAK_PLUGIN
64852+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
64853+#else
64854+#define MODULE_STACKLEAK_PLUGIN ""
64855+#endif
64856+
64857+#ifdef CONFIG_GRKERNSEC
64858+#define MODULE_GRSEC "GRSEC "
64859+#else
64860+#define MODULE_GRSEC ""
64861+#endif
64862+
64863 #define VERMAGIC_STRING \
64864 UTS_RELEASE " " \
64865 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
64866 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
64867- MODULE_ARCH_VERMAGIC
64868+ MODULE_ARCH_VERMAGIC \
64869+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
64870+ MODULE_GRSEC
64871
fe2de317 64872diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
5e856224 64873index dcdfc2b..cce598d 100644
fe2de317
MT
64874--- a/include/linux/vmalloc.h
64875+++ b/include/linux/vmalloc.h
64876@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
58c5fc13
MT
64877 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
64878 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
6e9df6a3 64879 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
58c5fc13 64880+
df50ba0c 64881+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
6e9df6a3 64882+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
58c5fc13
MT
64883+#endif
64884+
64885 /* bits [20..32] reserved for arch specific ioremap internals */
64886
64887 /*
4c928ab7
MT
64888@@ -51,18 +56,18 @@ static inline void vmalloc_init(void)
64889 }
bc901d79 64890 #endif
58c5fc13 64891
4c928ab7
MT
64892-extern void *vmalloc(unsigned long size);
64893-extern void *vzalloc(unsigned long size);
64894-extern void *vmalloc_user(unsigned long size);
64895-extern void *vmalloc_node(unsigned long size, int node);
64896-extern void *vzalloc_node(unsigned long size, int node);
64897-extern void *vmalloc_exec(unsigned long size);
64898-extern void *vmalloc_32(unsigned long size);
64899-extern void *vmalloc_32_user(unsigned long size);
64900-extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
64901+extern void *vmalloc(unsigned long size) __size_overflow(1);
64902+extern void *vzalloc(unsigned long size) __size_overflow(1);
64903+extern void *vmalloc_user(unsigned long size) __size_overflow(1);
64904+extern void *vmalloc_node(unsigned long size, int node) __size_overflow(1);
64905+extern void *vzalloc_node(unsigned long size, int node) __size_overflow(1);
64906+extern void *vmalloc_exec(unsigned long size) __size_overflow(1);
64907+extern void *vmalloc_32(unsigned long size) __size_overflow(1);
64908+extern void *vmalloc_32_user(unsigned long size) __size_overflow(1);
64909+extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) __size_overflow(1);
64910 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
64911 unsigned long start, unsigned long end, gfp_t gfp_mask,
64912- pgprot_t prot, int node, void *caller);
64913+ pgprot_t prot, int node, void *caller) __size_overflow(1);
64914 extern void vfree(const void *addr);
64915
64916 extern void *vmap(struct page **pages, unsigned int count,
64917@@ -123,8 +128,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
64918 extern void free_vm_area(struct vm_struct *area);
64919
64920 /* for /dev/kmem */
64921-extern long vread(char *buf, char *addr, unsigned long count);
64922-extern long vwrite(char *buf, char *addr, unsigned long count);
64923+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
64924+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
64925
64926 /*
64927 * Internals. Dont't use..
fe2de317
MT
64928diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
64929index 65efb92..137adbb 100644
64930--- a/include/linux/vmstat.h
64931+++ b/include/linux/vmstat.h
64932@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
57199397
MT
64933 /*
64934 * Zone based page accounting with per cpu differentials.
64935 */
64936-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64937+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64938
64939 static inline void zone_page_state_add(long x, struct zone *zone,
64940 enum zone_stat_item item)
64941 {
64942- atomic_long_add(x, &zone->vm_stat[item]);
64943- atomic_long_add(x, &vm_stat[item]);
64944+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
64945+ atomic_long_add_unchecked(x, &vm_stat[item]);
64946 }
64947
64948 static inline unsigned long global_page_state(enum zone_stat_item item)
64949 {
64950- long x = atomic_long_read(&vm_stat[item]);
64951+ long x = atomic_long_read_unchecked(&vm_stat[item]);
64952 #ifdef CONFIG_SMP
64953 if (x < 0)
64954 x = 0;
fe2de317 64955@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
57199397
MT
64956 static inline unsigned long zone_page_state(struct zone *zone,
64957 enum zone_stat_item item)
64958 {
64959- long x = atomic_long_read(&zone->vm_stat[item]);
64960+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
64961 #ifdef CONFIG_SMP
64962 if (x < 0)
64963 x = 0;
fe2de317 64964@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
6892158b
MT
64965 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
64966 enum zone_stat_item item)
64967 {
64968- long x = atomic_long_read(&zone->vm_stat[item]);
64969+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
64970
64971 #ifdef CONFIG_SMP
64972 int cpu;
fe2de317 64973@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
57199397
MT
64974
64975 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
64976 {
64977- atomic_long_inc(&zone->vm_stat[item]);
64978- atomic_long_inc(&vm_stat[item]);
64979+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
64980+ atomic_long_inc_unchecked(&vm_stat[item]);
64981 }
64982
64983 static inline void __inc_zone_page_state(struct page *page,
fe2de317 64984@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
57199397
MT
64985
64986 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
64987 {
64988- atomic_long_dec(&zone->vm_stat[item]);
64989- atomic_long_dec(&vm_stat[item]);
64990+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
64991+ atomic_long_dec_unchecked(&vm_stat[item]);
64992 }
64993
64994 static inline void __dec_zone_page_state(struct page *page,
4c928ab7
MT
64995diff --git a/include/linux/xattr.h b/include/linux/xattr.h
64996index e5d1220..ef6e406 100644
64997--- a/include/linux/xattr.h
64998+++ b/include/linux/xattr.h
64999@@ -57,6 +57,11 @@
65000 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
65001 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
65002
65003+/* User namespace */
65004+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
65005+#define XATTR_PAX_FLAGS_SUFFIX "flags"
65006+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
65007+
65008 #ifdef __KERNEL__
65009
65010 #include <linux/types.h>
fe2de317
MT
65011diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
65012index 4aeff96..b378cdc 100644
65013--- a/include/media/saa7146_vv.h
65014+++ b/include/media/saa7146_vv.h
15a11c5b
MT
65015@@ -163,7 +163,7 @@ struct saa7146_ext_vv
65016 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
65017
65018 /* the extension can override this */
65019- struct v4l2_ioctl_ops ops;
65020+ v4l2_ioctl_ops_no_const ops;
65021 /* pointer to the saa7146 core ops */
65022 const struct v4l2_ioctl_ops *core_ops;
65023
fe2de317
MT
65024diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
65025index c7c40f1..4f01585 100644
65026--- a/include/media/v4l2-dev.h
65027+++ b/include/media/v4l2-dev.h
65028@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
6e9df6a3
MT
65029
65030
65031 struct v4l2_file_operations {
65032- struct module *owner;
65033+ struct module * const owner;
65034 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
65035 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
65036 unsigned int (*poll) (struct file *, struct poll_table_struct *);
65037@@ -68,6 +68,7 @@ struct v4l2_file_operations {
65038 int (*open) (struct file *);
65039 int (*release) (struct file *);
65040 };
65041+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
65042
65043 /*
65044 * Newer version of video_device, handled by videodev2.c
fe2de317 65045diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
5e856224 65046index 3f5d60f..44210ed 100644
fe2de317
MT
65047--- a/include/media/v4l2-ioctl.h
65048+++ b/include/media/v4l2-ioctl.h
5e856224 65049@@ -278,7 +278,7 @@ struct v4l2_ioctl_ops {
15a11c5b
MT
65050 long (*vidioc_default) (struct file *file, void *fh,
65051 bool valid_prio, int cmd, void *arg);
65052 };
6e9df6a3 65053-
15a11c5b
MT
65054+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
65055
15a11c5b 65056 /* v4l debugging and diagnostics */
6e9df6a3 65057
fe2de317 65058diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
4c928ab7 65059index 8d55251..dfe5b0a 100644
fe2de317
MT
65060--- a/include/net/caif/caif_hsi.h
65061+++ b/include/net/caif/caif_hsi.h
4c928ab7 65062@@ -98,7 +98,7 @@ struct cfhsi_drv {
6e9df6a3
MT
65063 void (*rx_done_cb) (struct cfhsi_drv *drv);
65064 void (*wake_up_cb) (struct cfhsi_drv *drv);
65065 void (*wake_down_cb) (struct cfhsi_drv *drv);
65066-};
65067+} __no_const;
65068
65069 /* Structure implemented by HSI device. */
65070 struct cfhsi_dev {
fe2de317
MT
65071diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
65072index 9e5425b..8136ffc 100644
65073--- a/include/net/caif/cfctrl.h
65074+++ b/include/net/caif/cfctrl.h
15a11c5b
MT
65075@@ -52,7 +52,7 @@ struct cfctrl_rsp {
65076 void (*radioset_rsp)(void);
65077 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
65078 struct cflayer *client_layer);
65079-};
65080+} __no_const;
65081
65082 /* Link Setup Parameters for CAIF-Links. */
65083 struct cfctrl_link_param {
8308f9c9
MT
65084@@ -101,8 +101,8 @@ struct cfctrl_request_info {
65085 struct cfctrl {
65086 struct cfsrvl serv;
65087 struct cfctrl_rsp res;
65088- atomic_t req_seq_no;
65089- atomic_t rsp_seq_no;
65090+ atomic_unchecked_t req_seq_no;
65091+ atomic_unchecked_t rsp_seq_no;
65092 struct list_head list;
65093 /* Protects from simultaneous access to first_req list */
65094 spinlock_t info_list_lock;
fe2de317 65095diff --git a/include/net/flow.h b/include/net/flow.h
5e856224 65096index 6c469db..7743b8e 100644
fe2de317
MT
65097--- a/include/net/flow.h
65098+++ b/include/net/flow.h
5e856224 65099@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
8308f9c9
MT
65100
65101 extern void flow_cache_flush(void);
4c928ab7 65102 extern void flow_cache_flush_deferred(void);
8308f9c9
MT
65103-extern atomic_t flow_cache_genid;
65104+extern atomic_unchecked_t flow_cache_genid;
65105
66a7e928 65106 #endif
fe2de317 65107diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
5e856224 65108index b94765e..053f68b 100644
fe2de317
MT
65109--- a/include/net/inetpeer.h
65110+++ b/include/net/inetpeer.h
65111@@ -48,8 +48,8 @@ struct inet_peer {
6892158b
MT
65112 */
65113 union {
65114 struct {
66a7e928
MT
65115- atomic_t rid; /* Frag reception counter */
65116- atomic_t ip_id_count; /* IP ID for the next packet */
65117+ atomic_unchecked_t rid; /* Frag reception counter */
65118+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
65119 __u32 tcp_ts;
65120 __u32 tcp_ts_stamp;
6e9df6a3 65121 };
5e856224 65122@@ -115,11 +115,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
6892158b
MT
65123 more++;
65124 inet_peer_refcheck(p);
6e9df6a3
MT
65125 do {
65126- old = atomic_read(&p->ip_id_count);
65127+ old = atomic_read_unchecked(&p->ip_id_count);
65128 new = old + more;
65129 if (!new)
65130 new = 1;
65131- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
65132+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
65133 return new;
65134 }
65135
fe2de317
MT
65136diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
65137index 10422ef..662570f 100644
65138--- a/include/net/ip_fib.h
65139+++ b/include/net/ip_fib.h
65140@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
66a7e928
MT
65141
65142 #define FIB_RES_SADDR(net, res) \
65143 ((FIB_RES_NH(res).nh_saddr_genid == \
65144- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
65145+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
65146 FIB_RES_NH(res).nh_saddr : \
65147 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
65148 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
fe2de317 65149diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
5e856224 65150index ebe517f..1bd286b 100644
fe2de317
MT
65151--- a/include/net/ip_vs.h
65152+++ b/include/net/ip_vs.h
15a11c5b 65153@@ -509,7 +509,7 @@ struct ip_vs_conn {
8308f9c9
MT
65154 struct ip_vs_conn *control; /* Master control connection */
65155 atomic_t n_control; /* Number of controlled ones */
65156 struct ip_vs_dest *dest; /* real server */
65157- atomic_t in_pkts; /* incoming packet counter */
65158+ atomic_unchecked_t in_pkts; /* incoming packet counter */
65159
65160 /* packet transmitter for different forwarding methods. If it
65161 mangles the packet, it must return NF_DROP or better NF_STOLEN,
15a11c5b 65162@@ -647,7 +647,7 @@ struct ip_vs_dest {
8308f9c9 65163 __be16 port; /* port number of the server */
66a7e928 65164 union nf_inet_addr addr; /* IP address of the server */
8308f9c9
MT
65165 volatile unsigned flags; /* dest status flags */
65166- atomic_t conn_flags; /* flags to copy to conn */
65167+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
65168 atomic_t weight; /* server weight */
65169
65170 atomic_t refcnt; /* reference counter */
fe2de317
MT
65171diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
65172index 69b610a..fe3962c 100644
65173--- a/include/net/irda/ircomm_core.h
65174+++ b/include/net/irda/ircomm_core.h
15a11c5b
MT
65175@@ -51,7 +51,7 @@ typedef struct {
65176 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
65177 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
65178 struct ircomm_info *);
65179-} call_t;
65180+} __no_const call_t;
65181
65182 struct ircomm_cb {
65183 irda_queue_t queue;
fe2de317
MT
65184diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
65185index 59ba38bc..d515662 100644
65186--- a/include/net/irda/ircomm_tty.h
65187+++ b/include/net/irda/ircomm_tty.h
c52201e0
MT
65188@@ -35,6 +35,7 @@
65189 #include <linux/termios.h>
65190 #include <linux/timer.h>
65191 #include <linux/tty.h> /* struct tty_struct */
65192+#include <asm/local.h>
65193
65194 #include <net/irda/irias_object.h>
65195 #include <net/irda/ircomm_core.h>
65196@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
58c5fc13
MT
65197 unsigned short close_delay;
65198 unsigned short closing_wait; /* time to wait before closing */
65199
65200- int open_count;
65201- int blocked_open; /* # of blocked opens */
c52201e0
MT
65202+ local_t open_count;
65203+ local_t blocked_open; /* # of blocked opens */
58c5fc13
MT
65204
65205 /* Protect concurent access to :
65206 * o self->open_count
fe2de317 65207diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
5e856224 65208index 0954ec9..7413562 100644
fe2de317
MT
65209--- a/include/net/iucv/af_iucv.h
65210+++ b/include/net/iucv/af_iucv.h
5e856224 65211@@ -138,7 +138,7 @@ struct iucv_sock {
8308f9c9
MT
65212 struct iucv_sock_list {
65213 struct hlist_head head;
65214 rwlock_t lock;
65215- atomic_t autobind_name;
65216+ atomic_unchecked_t autobind_name;
65217 };
65218
65219 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
fe2de317 65220diff --git a/include/net/neighbour.h b/include/net/neighbour.h
5e856224 65221index 34c996f..bb3b4d4 100644
fe2de317
MT
65222--- a/include/net/neighbour.h
65223+++ b/include/net/neighbour.h
5e856224 65224@@ -123,7 +123,7 @@ struct neigh_ops {
6e9df6a3
MT
65225 void (*error_report)(struct neighbour *, struct sk_buff *);
65226 int (*output)(struct neighbour *, struct sk_buff *);
65227 int (*connected_output)(struct neighbour *, struct sk_buff *);
15a11c5b
MT
65228-};
65229+} __do_const;
ae4e228f
MT
65230
65231 struct pneigh_entry {
15a11c5b 65232 struct pneigh_entry *next;
fe2de317 65233diff --git a/include/net/netlink.h b/include/net/netlink.h
4c928ab7 65234index cb1f350..3279d2c 100644
fe2de317
MT
65235--- a/include/net/netlink.h
65236+++ b/include/net/netlink.h
4c928ab7 65237@@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
bc901d79
MT
65238 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
65239 {
65240 if (mark)
65241- skb_trim(skb, (unsigned char *) mark - skb->data);
65242+ skb_trim(skb, (const unsigned char *) mark - skb->data);
65243 }
65244
65245 /**
fe2de317 65246diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
5e856224 65247index bbd023a..97c6d0d 100644
fe2de317
MT
65248--- a/include/net/netns/ipv4.h
65249+++ b/include/net/netns/ipv4.h
5e856224 65250@@ -57,8 +57,8 @@ struct netns_ipv4 {
15a11c5b 65251 unsigned int sysctl_ping_group_range[2];
5e856224 65252 long sysctl_tcp_mem[3];
8308f9c9
MT
65253
65254- atomic_t rt_genid;
66a7e928 65255- atomic_t dev_addr_genid;
8308f9c9 65256+ atomic_unchecked_t rt_genid;
66a7e928 65257+ atomic_unchecked_t dev_addr_genid;
8308f9c9
MT
65258
65259 #ifdef CONFIG_IP_MROUTE
65260 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
fe2de317 65261diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
5e856224 65262index d368561..96aaa17 100644
fe2de317
MT
65263--- a/include/net/sctp/sctp.h
65264+++ b/include/net/sctp/sctp.h
6e9df6a3 65265@@ -318,9 +318,9 @@ do { \
58c5fc13
MT
65266
65267 #else /* SCTP_DEBUG */
65268
65269-#define SCTP_DEBUG_PRINTK(whatever...)
bc901d79 65270-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
58c5fc13
MT
65271-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
65272+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
bc901d79 65273+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
58c5fc13
MT
65274+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
65275 #define SCTP_ENABLE_DEBUG
65276 #define SCTP_DISABLE_DEBUG
65277 #define SCTP_ASSERT(expr, str, func)
fe2de317 65278diff --git a/include/net/sock.h b/include/net/sock.h
5e856224 65279index 91c1c8b..15ae923 100644
fe2de317
MT
65280--- a/include/net/sock.h
65281+++ b/include/net/sock.h
5e856224 65282@@ -299,7 +299,7 @@ struct sock {
8308f9c9
MT
65283 #ifdef CONFIG_RPS
65284 __u32 sk_rxhash;
65285 #endif
65286- atomic_t sk_drops;
65287+ atomic_unchecked_t sk_drops;
65288 int sk_rcvbuf;
65289
65290 struct sk_filter __rcu *sk_filter;
5e856224 65291@@ -1660,7 +1660,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
15a11c5b
MT
65292 }
65293
65294 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
65295- char __user *from, char *to,
65296+ char __user *from, unsigned char *to,
65297 int copy, int offset)
65298 {
65299 if (skb->ip_summed == CHECKSUM_NONE) {
fe2de317 65300diff --git a/include/net/tcp.h b/include/net/tcp.h
5e856224 65301index 2d80c29..aa07caf 100644
fe2de317
MT
65302--- a/include/net/tcp.h
65303+++ b/include/net/tcp.h
5e856224 65304@@ -1426,7 +1426,7 @@ struct tcp_seq_afinfo {
4c928ab7
MT
65305 char *name;
65306 sa_family_t family;
65307 const struct file_operations *seq_fops;
65308- struct seq_operations seq_ops;
65309+ seq_operations_no_const seq_ops;
ae4e228f 65310 };
16454cff 65311
15a11c5b 65312 struct tcp_iter_state {
fe2de317 65313diff --git a/include/net/udp.h b/include/net/udp.h
5e856224 65314index e39592f..fef9680 100644
fe2de317
MT
65315--- a/include/net/udp.h
65316+++ b/include/net/udp.h
5e856224 65317@@ -243,7 +243,7 @@ struct udp_seq_afinfo {
4c928ab7
MT
65318 sa_family_t family;
65319 struct udp_table *udp_table;
65320 const struct file_operations *seq_fops;
65321- struct seq_operations seq_ops;
65322+ seq_operations_no_const seq_ops;
ae4e228f 65323 };
16454cff 65324
15a11c5b 65325 struct udp_iter_state {
fe2de317 65326diff --git a/include/net/xfrm.h b/include/net/xfrm.h
5e856224 65327index 89174e2..1f82598 100644
fe2de317
MT
65328--- a/include/net/xfrm.h
65329+++ b/include/net/xfrm.h
66a7e928 65330@@ -505,7 +505,7 @@ struct xfrm_policy {
8308f9c9
MT
65331 struct timer_list timer;
65332
65333 struct flow_cache_object flo;
65334- atomic_t genid;
65335+ atomic_unchecked_t genid;
65336 u32 priority;
65337 u32 index;
65338 struct xfrm_mark mark;
fe2de317 65339diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
4c928ab7 65340index 1a046b1..ee0bef0 100644
fe2de317
MT
65341--- a/include/rdma/iw_cm.h
65342+++ b/include/rdma/iw_cm.h
4c928ab7 65343@@ -122,7 +122,7 @@ struct iw_cm_verbs {
15a11c5b
MT
65344 int backlog);
65345
65346 int (*destroy_listen)(struct iw_cm_id *cm_id);
65347-};
65348+} __no_const;
65349
65350 /**
65351 * iw_create_cm_id - Create an IW CM identifier.
fe2de317 65352diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
5e856224 65353index 6a3922f..0b73022 100644
fe2de317
MT
65354--- a/include/scsi/libfc.h
65355+++ b/include/scsi/libfc.h
4c928ab7 65356@@ -748,6 +748,7 @@ struct libfc_function_template {
15a11c5b
MT
65357 */
65358 void (*disc_stop_final) (struct fc_lport *);
65359 };
65360+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
65361
65362 /**
65363 * struct fc_disc - Discovery context
4c928ab7 65364@@ -851,7 +852,7 @@ struct fc_lport {
15a11c5b
MT
65365 struct fc_vport *vport;
65366
65367 /* Operational Information */
65368- struct libfc_function_template tt;
65369+ libfc_function_template_no_const tt;
65370 u8 link_up;
65371 u8 qfull;
65372 enum fc_lport_state state;
fe2de317 65373diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
5e856224 65374index 77273f2..dd4031f 100644
fe2de317
MT
65375--- a/include/scsi/scsi_device.h
65376+++ b/include/scsi/scsi_device.h
8308f9c9
MT
65377@@ -161,9 +161,9 @@ struct scsi_device {
65378 unsigned int max_device_blocked; /* what device_blocked counts down from */
65379 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
65380
65381- atomic_t iorequest_cnt;
65382- atomic_t iodone_cnt;
65383- atomic_t ioerr_cnt;
65384+ atomic_unchecked_t iorequest_cnt;
65385+ atomic_unchecked_t iodone_cnt;
65386+ atomic_unchecked_t ioerr_cnt;
65387
65388 struct device sdev_gendev,
65389 sdev_dev;
fe2de317
MT
65390diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
65391index 2a65167..91e01f8 100644
65392--- a/include/scsi/scsi_transport_fc.h
65393+++ b/include/scsi/scsi_transport_fc.h
15a11c5b
MT
65394@@ -711,7 +711,7 @@ struct fc_function_template {
65395 unsigned long show_host_system_hostname:1;
66a7e928 65396
15a11c5b
MT
65397 unsigned long disable_target_scan:1;
65398-};
65399+} __do_const;
66a7e928 65400
66a7e928 65401
15a11c5b 65402 /**
fe2de317
MT
65403diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
65404index 030b87c..98a6954 100644
65405--- a/include/sound/ak4xxx-adda.h
65406+++ b/include/sound/ak4xxx-adda.h
15a11c5b
MT
65407@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
65408 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
65409 unsigned char val);
65410 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
65411-};
65412+} __no_const;
65413
65414 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
65415
fe2de317
MT
65416diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
65417index 8c05e47..2b5df97 100644
65418--- a/include/sound/hwdep.h
65419+++ b/include/sound/hwdep.h
15a11c5b
MT
65420@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
65421 struct snd_hwdep_dsp_status *status);
65422 int (*dsp_load)(struct snd_hwdep *hw,
65423 struct snd_hwdep_dsp_image *image);
65424-};
65425+} __no_const;
65426
65427 struct snd_hwdep {
65428 struct snd_card *card;
fe2de317 65429diff --git a/include/sound/info.h b/include/sound/info.h
5e856224 65430index 9ca1a49..aba1728 100644
fe2de317
MT
65431--- a/include/sound/info.h
65432+++ b/include/sound/info.h
15a11c5b
MT
65433@@ -44,7 +44,7 @@ struct snd_info_entry_text {
65434 struct snd_info_buffer *buffer);
65435 void (*write)(struct snd_info_entry *entry,
65436 struct snd_info_buffer *buffer);
65437-};
65438+} __no_const;
65439
65440 struct snd_info_entry_ops {
65441 int (*open)(struct snd_info_entry *entry,
fe2de317 65442diff --git a/include/sound/pcm.h b/include/sound/pcm.h
4c928ab7 65443index 0cf91b2..b70cae4 100644
fe2de317
MT
65444--- a/include/sound/pcm.h
65445+++ b/include/sound/pcm.h
15a11c5b
MT
65446@@ -81,6 +81,7 @@ struct snd_pcm_ops {
65447 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
65448 int (*ack)(struct snd_pcm_substream *substream);
65449 };
65450+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
66a7e928 65451
15a11c5b
MT
65452 /*
65453 *
fe2de317
MT
65454diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
65455index af1b49e..a5d55a5 100644
65456--- a/include/sound/sb16_csp.h
65457+++ b/include/sound/sb16_csp.h
15a11c5b
MT
65458@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
65459 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
65460 int (*csp_stop) (struct snd_sb_csp * p);
65461 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
65462-};
65463+} __no_const;
66a7e928 65464
15a11c5b
MT
65465 /*
65466 * CSP private data
fe2de317 65467diff --git a/include/sound/soc.h b/include/sound/soc.h
5e856224 65468index 0992dff..bb366fe 100644
fe2de317
MT
65469--- a/include/sound/soc.h
65470+++ b/include/sound/soc.h
5e856224 65471@@ -682,7 +682,7 @@ struct snd_soc_platform_driver {
6e9df6a3
MT
65472 /* platform IO - used for platform DAPM */
65473 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
65474 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
15a11c5b
MT
65475-};
65476+} __do_const;
65477
65478 struct snd_soc_platform {
65479 const char *name;
5e856224
MT
65480@@ -852,7 +852,7 @@ struct snd_soc_pcm_runtime {
65481 struct snd_soc_dai_link *dai_link;
65482 struct mutex pcm_mutex;
65483 enum snd_soc_pcm_subclass pcm_subclass;
65484- struct snd_pcm_ops ops;
65485+ snd_pcm_ops_no_const ops;
65486
65487 unsigned int complete:1;
65488 unsigned int dev_registered:1;
fe2de317
MT
65489diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
65490index 444cd6b..3327cc5 100644
65491--- a/include/sound/ymfpci.h
65492+++ b/include/sound/ymfpci.h
8308f9c9
MT
65493@@ -358,7 +358,7 @@ struct snd_ymfpci {
65494 spinlock_t reg_lock;
65495 spinlock_t voice_lock;
65496 wait_queue_head_t interrupt_sleep;
65497- atomic_t interrupt_sleep_count;
65498+ atomic_unchecked_t interrupt_sleep_count;
65499 struct snd_info_entry *proc_entry;
65500 const struct firmware *dsp_microcode;
65501 const struct firmware *controller_microcode;
fe2de317 65502diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
5e856224 65503index fe73eb8..56388b1 100644
fe2de317
MT
65504--- a/include/target/target_core_base.h
65505+++ b/include/target/target_core_base.h
5e856224 65506@@ -443,7 +443,7 @@ struct t10_reservation_ops {
15a11c5b
MT
65507 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
65508 int (*t10_pr_register)(struct se_cmd *);
65509 int (*t10_pr_clear)(struct se_cmd *);
65510-};
65511+} __no_const;
65512
6e9df6a3 65513 struct t10_reservation {
15a11c5b 65514 /* Reservation effects all target ports */
5e856224 65515@@ -561,8 +561,8 @@ struct se_cmd {
4c928ab7 65516 atomic_t t_se_count;
8308f9c9
MT
65517 atomic_t t_task_cdbs_left;
65518 atomic_t t_task_cdbs_ex_left;
8308f9c9
MT
65519- atomic_t t_task_cdbs_sent;
65520- atomic_t t_transport_aborted;
65521+ atomic_unchecked_t t_task_cdbs_sent;
65522+ atomic_unchecked_t t_transport_aborted;
65523 atomic_t t_transport_active;
65524 atomic_t t_transport_complete;
65525 atomic_t t_transport_queue_active;
5e856224
MT
65526@@ -799,7 +799,7 @@ struct se_device {
65527 spinlock_t stats_lock;
4c928ab7 65528 /* Active commands on this virtual SE device */
8308f9c9 65529 atomic_t simple_cmds;
8308f9c9
MT
65530- atomic_t dev_ordered_id;
65531+ atomic_unchecked_t dev_ordered_id;
8308f9c9 65532 atomic_t execute_tasks;
4c928ab7
MT
65533 atomic_t dev_ordered_sync;
65534 atomic_t dev_qf_count;
fe2de317
MT
65535diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
65536index 1c09820..7f5ec79 100644
65537--- a/include/trace/events/irq.h
65538+++ b/include/trace/events/irq.h
bc901d79 65539@@ -36,7 +36,7 @@ struct softirq_action;
ae4e228f
MT
65540 */
65541 TRACE_EVENT(irq_handler_entry,
65542
65543- TP_PROTO(int irq, struct irqaction *action),
65544+ TP_PROTO(int irq, const struct irqaction *action),
65545
65546 TP_ARGS(irq, action),
65547
bc901d79 65548@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
ae4e228f
MT
65549 */
65550 TRACE_EVENT(irq_handler_exit,
65551
65552- TP_PROTO(int irq, struct irqaction *action, int ret),
65553+ TP_PROTO(int irq, const struct irqaction *action, int ret),
65554
65555 TP_ARGS(irq, action, ret),
65556
fe2de317 65557diff --git a/include/video/udlfb.h b/include/video/udlfb.h
4c928ab7 65558index c41f308..6918de3 100644
fe2de317
MT
65559--- a/include/video/udlfb.h
65560+++ b/include/video/udlfb.h
4c928ab7 65561@@ -52,10 +52,10 @@ struct dlfb_data {
8308f9c9 65562 u32 pseudo_palette[256];
4c928ab7 65563 int blank_mode; /*one of FB_BLANK_ */
8308f9c9
MT
65564 /* blit-only rendering path metrics, exposed through sysfs */
65565- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
65566- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
65567- atomic_t bytes_sent; /* to usb, after compression including overhead */
65568- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
65569+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
65570+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
65571+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
65572+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
65573 };
65574
65575 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
fe2de317
MT
65576diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
65577index 0993a22..32ba2fe 100644
65578--- a/include/video/uvesafb.h
65579+++ b/include/video/uvesafb.h
58c5fc13
MT
65580@@ -177,6 +177,7 @@ struct uvesafb_par {
65581 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
65582 u8 pmi_setpal; /* PMI for palette changes */
65583 u16 *pmi_base; /* protected mode interface location */
65584+ u8 *pmi_code; /* protected mode code location */
65585 void *pmi_start;
65586 void *pmi_pal;
65587 u8 *vbe_state_orig; /*
fe2de317 65588diff --git a/init/Kconfig b/init/Kconfig
5e856224 65589index 3f42cd6..613f41d 100644
fe2de317
MT
65590--- a/init/Kconfig
65591+++ b/init/Kconfig
5e856224
MT
65592@@ -799,6 +799,7 @@ endif # CGROUPS
65593
65594 config CHECKPOINT_RESTORE
65595 bool "Checkpoint/restore support" if EXPERT
65596+ depends on !GRKERNSEC
65597 default n
65598 help
65599 Enables additional kernel features in a sake of checkpoint/restore.
65600@@ -1249,7 +1250,7 @@ config SLUB_DEBUG
fe2de317
MT
65601
65602 config COMPAT_BRK
65603 bool "Disable heap randomization"
65604- default y
65605+ default n
65606 help
65607 Randomizing heap placement makes heap exploits harder, but it
65608 also breaks ancient binaries (including anything libc5 based).
65609diff --git a/init/do_mounts.c b/init/do_mounts.c
5e856224 65610index 2974c8b..0b863ae 100644
fe2de317
MT
65611--- a/init/do_mounts.c
65612+++ b/init/do_mounts.c
5e856224 65613@@ -326,11 +326,11 @@ static void __init get_fs_names(char *page)
58c5fc13
MT
65614 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
65615 {
5e856224 65616 struct super_block *s;
58c5fc13 65617- int err = sys_mount(name, "/root", fs, flags, data);
6e9df6a3 65618+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
58c5fc13
MT
65619 if (err)
65620 return err;
65621
6e9df6a3 65622- sys_chdir((const char __user __force *)"/root");
5e856224
MT
65623+ sys_chdir((const char __force_user *)"/root");
65624 s = current->fs->pwd.dentry->d_sb;
65625 ROOT_DEV = s->s_dev;
6e9df6a3 65626 printk(KERN_INFO
5e856224 65627@@ -450,18 +450,18 @@ void __init change_floppy(char *fmt, ...)
58c5fc13
MT
65628 va_start(args, fmt);
65629 vsprintf(buf, fmt, args);
65630 va_end(args);
65631- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
65632+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
65633 if (fd >= 0) {
65634 sys_ioctl(fd, FDEJECT, 0);
65635 sys_close(fd);
65636 }
65637 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
65638- fd = sys_open("/dev/console", O_RDWR, 0);
df50ba0c 65639+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
58c5fc13
MT
65640 if (fd >= 0) {
65641 sys_ioctl(fd, TCGETS, (long)&termios);
65642 termios.c_lflag &= ~ICANON;
65643 sys_ioctl(fd, TCSETSF, (long)&termios);
65644- sys_read(fd, &c, 1);
65645+ sys_read(fd, (char __user *)&c, 1);
65646 termios.c_lflag |= ICANON;
65647 sys_ioctl(fd, TCSETSF, (long)&termios);
65648 sys_close(fd);
5e856224 65649@@ -555,6 +555,6 @@ void __init prepare_namespace(void)
58c5fc13
MT
65650 mount_root();
65651 out:
ae4e228f 65652 devtmpfs_mount("dev");
58c5fc13 65653- sys_mount(".", "/", NULL, MS_MOVE, NULL);
6e9df6a3
MT
65654- sys_chroot((const char __user __force *)".");
65655+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
65656+ sys_chroot((const char __force_user *)".");
58c5fc13 65657 }
fe2de317
MT
65658diff --git a/init/do_mounts.h b/init/do_mounts.h
65659index f5b978a..69dbfe8 100644
65660--- a/init/do_mounts.h
65661+++ b/init/do_mounts.h
58c5fc13
MT
65662@@ -15,15 +15,15 @@ extern int root_mountflags;
65663
65664 static inline int create_dev(char *name, dev_t dev)
65665 {
65666- sys_unlink(name);
65667- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
6e9df6a3
MT
65668+ sys_unlink((char __force_user *)name);
65669+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
58c5fc13
MT
65670 }
65671
65672 #if BITS_PER_LONG == 32
65673 static inline u32 bstat(char *name)
65674 {
65675 struct stat64 stat;
65676- if (sys_stat64(name, &stat) != 0)
6e9df6a3
MT
65677+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
65678 return 0;
65679 if (!S_ISBLK(stat.st_mode))
65680 return 0;
65681@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
65682 static inline u32 bstat(char *name)
65683 {
65684 struct stat stat;
65685- if (sys_newstat(name, &stat) != 0)
65686+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
58c5fc13
MT
65687 return 0;
65688 if (!S_ISBLK(stat.st_mode))
65689 return 0;
fe2de317
MT
65690diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
65691index 3098a38..253064e 100644
65692--- a/init/do_mounts_initrd.c
65693+++ b/init/do_mounts_initrd.c
6892158b 65694@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
58c5fc13
MT
65695 create_dev("/dev/root.old", Root_RAM0);
65696 /* mount initrd on rootfs' /root */
65697 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
65698- sys_mkdir("/old", 0700);
65699- root_fd = sys_open("/", 0, 0);
65700- old_fd = sys_open("/old", 0, 0);
6e9df6a3
MT
65701+ sys_mkdir((const char __force_user *)"/old", 0700);
65702+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
65703+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
58c5fc13
MT
65704 /* move initrd over / and chdir/chroot in initrd root */
65705- sys_chdir("/root");
65706- sys_mount(".", "/", NULL, MS_MOVE, NULL);
65707- sys_chroot(".");
6e9df6a3
MT
65708+ sys_chdir((const char __force_user *)"/root");
65709+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
65710+ sys_chroot((const char __force_user *)".");
58c5fc13
MT
65711
65712 /*
65713 * In case that a resume from disk is carried out by linuxrc or one of
6892158b 65714@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
58c5fc13
MT
65715
65716 /* move initrd to rootfs' /old */
65717 sys_fchdir(old_fd);
65718- sys_mount("/", ".", NULL, MS_MOVE, NULL);
6e9df6a3 65719+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
58c5fc13
MT
65720 /* switch root and cwd back to / of rootfs */
65721 sys_fchdir(root_fd);
65722- sys_chroot(".");
6e9df6a3 65723+ sys_chroot((const char __force_user *)".");
58c5fc13
MT
65724 sys_close(old_fd);
65725 sys_close(root_fd);
65726
65727 if (new_decode_dev(real_root_dev) == Root_RAM0) {
65728- sys_chdir("/old");
6e9df6a3 65729+ sys_chdir((const char __force_user *)"/old");
58c5fc13
MT
65730 return;
65731 }
65732
6892158b 65733@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
58c5fc13
MT
65734 mount_root();
65735
65736 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
65737- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
6e9df6a3 65738+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
58c5fc13
MT
65739 if (!error)
65740 printk("okay\n");
65741 else {
65742- int fd = sys_open("/dev/root.old", O_RDWR, 0);
6e9df6a3 65743+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
58c5fc13
MT
65744 if (error == -ENOENT)
65745 printk("/initrd does not exist. Ignored.\n");
65746 else
65747 printk("failed\n");
65748 printk(KERN_NOTICE "Unmounting old root\n");
65749- sys_umount("/old", MNT_DETACH);
6e9df6a3 65750+ sys_umount((char __force_user *)"/old", MNT_DETACH);
58c5fc13
MT
65751 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
65752 if (fd < 0) {
65753 error = fd;
6892158b 65754@@ -116,11 +116,11 @@ int __init initrd_load(void)
58c5fc13
MT
65755 * mounted in the normal path.
65756 */
65757 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
65758- sys_unlink("/initrd.image");
6e9df6a3 65759+ sys_unlink((const char __force_user *)"/initrd.image");
58c5fc13
MT
65760 handle_initrd();
65761 return 1;
65762 }
65763 }
65764- sys_unlink("/initrd.image");
6e9df6a3 65765+ sys_unlink((const char __force_user *)"/initrd.image");
58c5fc13
MT
65766 return 0;
65767 }
fe2de317
MT
65768diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
65769index 32c4799..c27ee74 100644
65770--- a/init/do_mounts_md.c
65771+++ b/init/do_mounts_md.c
58c5fc13
MT
65772@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
65773 partitioned ? "_d" : "", minor,
65774 md_setup_args[ent].device_names);
65775
65776- fd = sys_open(name, 0, 0);
6e9df6a3 65777+ fd = sys_open((char __force_user *)name, 0, 0);
58c5fc13
MT
65778 if (fd < 0) {
65779 printk(KERN_ERR "md: open failed - cannot start "
65780 "array %s\n", name);
65781@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
65782 * array without it
65783 */
65784 sys_close(fd);
65785- fd = sys_open(name, 0, 0);
6e9df6a3 65786+ fd = sys_open((char __force_user *)name, 0, 0);
58c5fc13
MT
65787 sys_ioctl(fd, BLKRRPART, 0);
65788 }
65789 sys_close(fd);
6e9df6a3
MT
65790@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
65791
65792 wait_for_device_probe();
65793
65794- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
65795+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
65796 if (fd >= 0) {
65797 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
65798 sys_close(fd);
fe2de317 65799diff --git a/init/initramfs.c b/init/initramfs.c
5e856224 65800index 8216c30..25e8e32 100644
fe2de317
MT
65801--- a/init/initramfs.c
65802+++ b/init/initramfs.c
ae4e228f
MT
65803@@ -74,7 +74,7 @@ static void __init free_hash(void)
65804 }
65805 }
65806
65807-static long __init do_utime(char __user *filename, time_t mtime)
65808+static long __init do_utime(__force char __user *filename, time_t mtime)
65809 {
65810 struct timespec t[2];
65811
65812@@ -109,7 +109,7 @@ static void __init dir_utime(void)
65813 struct dir_entry *de, *tmp;
65814 list_for_each_entry_safe(de, tmp, &dir_list, list) {
65815 list_del(&de->list);
65816- do_utime(de->name, de->mtime);
6e9df6a3 65817+ do_utime((char __force_user *)de->name, de->mtime);
ae4e228f
MT
65818 kfree(de->name);
65819 kfree(de);
65820 }
58c5fc13
MT
65821@@ -271,7 +271,7 @@ static int __init maybe_link(void)
65822 if (nlink >= 2) {
65823 char *old = find_link(major, minor, ino, mode, collected);
65824 if (old)
65825- return (sys_link(old, collected) < 0) ? -1 : 1;
6e9df6a3 65826+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
58c5fc13
MT
65827 }
65828 return 0;
65829 }
5e856224 65830@@ -280,11 +280,11 @@ static void __init clean_path(char *path, umode_t mode)
58c5fc13
MT
65831 {
65832 struct stat st;
65833
65834- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
6e9df6a3 65835+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
58c5fc13
MT
65836 if (S_ISDIR(st.st_mode))
65837- sys_rmdir(path);
6e9df6a3 65838+ sys_rmdir((char __force_user *)path);
58c5fc13
MT
65839 else
65840- sys_unlink(path);
6e9df6a3 65841+ sys_unlink((char __force_user *)path);
58c5fc13
MT
65842 }
65843 }
65844
65845@@ -305,7 +305,7 @@ static int __init do_name(void)
65846 int openflags = O_WRONLY|O_CREAT;
65847 if (ml != 1)
65848 openflags |= O_TRUNC;
65849- wfd = sys_open(collected, openflags, mode);
6e9df6a3 65850+ wfd = sys_open((char __force_user *)collected, openflags, mode);
58c5fc13
MT
65851
65852 if (wfd >= 0) {
65853 sys_fchown(wfd, uid, gid);
ae4e228f 65854@@ -317,17 +317,17 @@ static int __init do_name(void)
58c5fc13
MT
65855 }
65856 }
65857 } else if (S_ISDIR(mode)) {
65858- sys_mkdir(collected, mode);
65859- sys_chown(collected, uid, gid);
65860- sys_chmod(collected, mode);
6e9df6a3
MT
65861+ sys_mkdir((char __force_user *)collected, mode);
65862+ sys_chown((char __force_user *)collected, uid, gid);
65863+ sys_chmod((char __force_user *)collected, mode);
58c5fc13
MT
65864 dir_add(collected, mtime);
65865 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
65866 S_ISFIFO(mode) || S_ISSOCK(mode)) {
65867 if (maybe_link() == 0) {
65868- sys_mknod(collected, mode, rdev);
65869- sys_chown(collected, uid, gid);
65870- sys_chmod(collected, mode);
ae4e228f 65871- do_utime(collected, mtime);
6e9df6a3
MT
65872+ sys_mknod((char __force_user *)collected, mode, rdev);
65873+ sys_chown((char __force_user *)collected, uid, gid);
65874+ sys_chmod((char __force_user *)collected, mode);
65875+ do_utime((char __force_user *)collected, mtime);
58c5fc13
MT
65876 }
65877 }
ae4e228f
MT
65878 return 0;
65879@@ -336,15 +336,15 @@ static int __init do_name(void)
58c5fc13
MT
65880 static int __init do_copy(void)
65881 {
65882 if (count >= body_len) {
65883- sys_write(wfd, victim, body_len);
6e9df6a3 65884+ sys_write(wfd, (char __force_user *)victim, body_len);
58c5fc13 65885 sys_close(wfd);
ae4e228f 65886- do_utime(vcollected, mtime);
6e9df6a3 65887+ do_utime((char __force_user *)vcollected, mtime);
58c5fc13 65888 kfree(vcollected);
ae4e228f 65889 eat(body_len);
58c5fc13
MT
65890 state = SkipIt;
65891 return 0;
65892 } else {
65893- sys_write(wfd, victim, count);
6e9df6a3 65894+ sys_write(wfd, (char __force_user *)victim, count);
58c5fc13
MT
65895 body_len -= count;
65896 eat(count);
65897 return 1;
ae4e228f 65898@@ -355,9 +355,9 @@ static int __init do_symlink(void)
58c5fc13
MT
65899 {
65900 collected[N_ALIGN(name_len) + body_len] = '\0';
65901 clean_path(collected, 0);
65902- sys_symlink(collected + N_ALIGN(name_len), collected);
65903- sys_lchown(collected, uid, gid);
ae4e228f 65904- do_utime(collected, mtime);
6e9df6a3
MT
65905+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
65906+ sys_lchown((char __force_user *)collected, uid, gid);
65907+ do_utime((char __force_user *)collected, mtime);
58c5fc13
MT
65908 state = SkipIt;
65909 next_state = Reset;
ae4e228f 65910 return 0;
fe2de317 65911diff --git a/init/main.c b/init/main.c
5e856224 65912index ff49a6d..5fa0429 100644
fe2de317
MT
65913--- a/init/main.c
65914+++ b/init/main.c
65915@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
58c5fc13
MT
65916 extern void tc_init(void);
65917 #endif
58c5fc13 65918
16454cff
MT
65919+extern void grsecurity_init(void);
65920+
65921 /*
65922 * Debug helper: via this flag we know that we are in 'early bootup code'
65923 * where only the boot processor is running with IRQ disabled. This means
fe2de317 65924@@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
58c5fc13
MT
65925
65926 __setup("reset_devices", set_reset_devices);
65927
df50ba0c 65928+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
bc901d79
MT
65929+extern char pax_enter_kernel_user[];
65930+extern char pax_exit_kernel_user[];
df50ba0c
MT
65931+extern pgdval_t clone_pgd_mask;
65932+#endif
65933+
65934+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
58c5fc13
MT
65935+static int __init setup_pax_nouderef(char *str)
65936+{
df50ba0c 65937+#ifdef CONFIG_X86_32
58c5fc13 65938+ unsigned int cpu;
66a7e928 65939+ struct desc_struct *gdt;
58c5fc13 65940+
4c928ab7 65941+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
66a7e928
MT
65942+ gdt = get_cpu_gdt_table(cpu);
65943+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
65944+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
65945+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
65946+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
58c5fc13 65947+ }
bc901d79 65948+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
df50ba0c 65949+#else
6892158b
MT
65950+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
65951+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
df50ba0c
MT
65952+ clone_pgd_mask = ~(pgdval_t)0UL;
65953+#endif
58c5fc13
MT
65954+
65955+ return 0;
65956+}
65957+early_param("pax_nouderef", setup_pax_nouderef);
65958+#endif
65959+
65960+#ifdef CONFIG_PAX_SOFTMODE
15a11c5b 65961+int pax_softmode;
58c5fc13
MT
65962+
65963+static int __init setup_pax_softmode(char *str)
65964+{
65965+ get_option(&str, &pax_softmode);
65966+ return 1;
65967+}
65968+__setup("pax_softmode=", setup_pax_softmode);
65969+#endif
65970+
6892158b
MT
65971 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
65972 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
58c5fc13 65973 static const char *panic_later, *panic_param;
5e856224 65974@@ -675,6 +720,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
58c5fc13
MT
65975 {
65976 int count = preempt_count();
6892158b 65977 int ret;
58c5fc13
MT
65978+ const char *msg1 = "", *msg2 = "";
65979
6892158b
MT
65980 if (initcall_debug)
65981 ret = do_one_initcall_debug(fn);
5e856224 65982@@ -687,15 +733,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
6892158b 65983 sprintf(msgbuf, "error code %d ", ret);
58c5fc13
MT
65984
65985 if (preempt_count() != count) {
65986- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
65987+ msg1 = " preemption imbalance";
65988 preempt_count() = count;
65989 }
65990 if (irqs_disabled()) {
65991- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
65992+ msg2 = " disabled interrupts";
65993 local_irq_enable();
65994 }
65995- if (msgbuf[0]) {
65996- printk("initcall %pF returned with %s\n", fn, msgbuf);
65997+ if (msgbuf[0] || *msg1 || *msg2) {
65998+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
65999 }
66000
6892158b 66001 return ret;
5e856224 66002@@ -814,7 +860,7 @@ static int __init kernel_init(void * unused)
df50ba0c
MT
66003 do_basic_setup();
66004
66005 /* Open the /dev/console on the rootfs, this should never fail */
66006- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
6e9df6a3 66007+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
df50ba0c
MT
66008 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
66009
66010 (void) sys_dup(0);
5e856224 66011@@ -827,11 +873,13 @@ static int __init kernel_init(void * unused)
ae4e228f
MT
66012 if (!ramdisk_execute_command)
66013 ramdisk_execute_command = "/init";
66014
66015- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
6e9df6a3 66016+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
ae4e228f 66017 ramdisk_execute_command = NULL;
58c5fc13
MT
66018 prepare_namespace();
66019 }
66020
66021+ grsecurity_init();
66022+
66023 /*
66024 * Ok, we have completed the initial bootup, and
66025 * we're essentially up and running. Get rid of the
fe2de317 66026diff --git a/ipc/mqueue.c b/ipc/mqueue.c
5e856224 66027index 86ee272..773d937 100644
fe2de317
MT
66028--- a/ipc/mqueue.c
66029+++ b/ipc/mqueue.c
66030@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
6e9df6a3
MT
66031 mq_bytes = (mq_msg_tblsz +
66032 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
66033
66034+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
66035 spin_lock(&mq_lock);
66036 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
5e856224 66037 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
fe2de317 66038diff --git a/ipc/msg.c b/ipc/msg.c
4c928ab7 66039index 7385de2..a8180e08 100644
fe2de317
MT
66040--- a/ipc/msg.c
66041+++ b/ipc/msg.c
66042@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
15a11c5b
MT
66043 return security_msg_queue_associate(msq, msgflg);
66044 }
66045
66046+static struct ipc_ops msg_ops = {
66047+ .getnew = newque,
66048+ .associate = msg_security,
66049+ .more_checks = NULL
66050+};
66051+
66052 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
66053 {
66054 struct ipc_namespace *ns;
66055- struct ipc_ops msg_ops;
66056 struct ipc_params msg_params;
66057
66058 ns = current->nsproxy->ipc_ns;
66059
66060- msg_ops.getnew = newque;
66061- msg_ops.associate = msg_security;
66062- msg_ops.more_checks = NULL;
66063-
66064 msg_params.key = key;
66065 msg_params.flg = msgflg;
66066
fe2de317 66067diff --git a/ipc/sem.c b/ipc/sem.c
4c928ab7 66068index 5215a81..cfc0cac 100644
fe2de317
MT
66069--- a/ipc/sem.c
66070+++ b/ipc/sem.c
4c928ab7 66071@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
15a11c5b
MT
66072 return 0;
66073 }
66074
66075+static struct ipc_ops sem_ops = {
66076+ .getnew = newary,
66077+ .associate = sem_security,
66078+ .more_checks = sem_more_checks
66079+};
66080+
66081 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
66082 {
66083 struct ipc_namespace *ns;
66084- struct ipc_ops sem_ops;
66085 struct ipc_params sem_params;
66086
66087 ns = current->nsproxy->ipc_ns;
4c928ab7 66088@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
15a11c5b
MT
66089 if (nsems < 0 || nsems > ns->sc_semmsl)
66090 return -EINVAL;
66091
66092- sem_ops.getnew = newary;
66093- sem_ops.associate = sem_security;
66094- sem_ops.more_checks = sem_more_checks;
66095-
66096 sem_params.key = key;
66097 sem_params.flg = semflg;
66098 sem_params.u.nsems = nsems;
fe2de317 66099diff --git a/ipc/shm.c b/ipc/shm.c
4c928ab7 66100index b76be5b..859e750 100644
fe2de317
MT
66101--- a/ipc/shm.c
66102+++ b/ipc/shm.c
66103@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
58c5fc13
MT
66104 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
66105 #endif
66106
66107+#ifdef CONFIG_GRKERNSEC
66108+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
66109+ const time_t shm_createtime, const uid_t cuid,
66110+ const int shmid);
66111+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
66112+ const time_t shm_createtime);
66113+#endif
66114+
66115 void shm_init_ns(struct ipc_namespace *ns)
66116 {
66117 ns->shm_ctlmax = SHMMAX;
fe2de317 66118@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
58c5fc13
MT
66119 shp->shm_lprid = 0;
66120 shp->shm_atim = shp->shm_dtim = 0;
66121 shp->shm_ctim = get_seconds();
66122+#ifdef CONFIG_GRKERNSEC
66123+ {
66124+ struct timespec timeval;
66125+ do_posix_clock_monotonic_gettime(&timeval);
66126+
66127+ shp->shm_createtime = timeval.tv_sec;
66128+ }
66129+#endif
66130 shp->shm_segsz = size;
66131 shp->shm_nattch = 0;
66132 shp->shm_file = file;
fe2de317 66133@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
15a11c5b
MT
66134 return 0;
66135 }
66136
66137+static struct ipc_ops shm_ops = {
66138+ .getnew = newseg,
66139+ .associate = shm_security,
66140+ .more_checks = shm_more_checks
66141+};
66142+
66143 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
66144 {
66145 struct ipc_namespace *ns;
66146- struct ipc_ops shm_ops;
66147 struct ipc_params shm_params;
66148
66149 ns = current->nsproxy->ipc_ns;
66150
66151- shm_ops.getnew = newseg;
66152- shm_ops.associate = shm_security;
66153- shm_ops.more_checks = shm_more_checks;
66154-
66155 shm_params.key = key;
66156 shm_params.flg = shmflg;
66157 shm_params.u.size = size;
4c928ab7
MT
66158@@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
66159 f_mode = FMODE_READ | FMODE_WRITE;
66160 }
66161 if (shmflg & SHM_EXEC) {
66162+
66163+#ifdef CONFIG_PAX_MPROTECT
66164+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
66165+ goto out;
66166+#endif
66167+
66168 prot |= PROT_EXEC;
66169 acc_mode |= S_IXUGO;
66170 }
66171@@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
58c5fc13
MT
66172 if (err)
66173 goto out_unlock;
66174
66175+#ifdef CONFIG_GRKERNSEC
66176+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
66177+ shp->shm_perm.cuid, shmid) ||
66178+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
66179+ err = -EACCES;
66180+ goto out_unlock;
66181+ }
66182+#endif
66183+
ae4e228f
MT
66184 path = shp->shm_file->f_path;
66185 path_get(&path);
58c5fc13
MT
66186 shp->shm_nattch++;
66187+#ifdef CONFIG_GRKERNSEC
66188+ shp->shm_lapid = current->pid;
66189+#endif
66190 size = i_size_read(path.dentry->d_inode);
66191 shm_unlock(shp);
66192
fe2de317 66193diff --git a/kernel/acct.c b/kernel/acct.c
5e856224 66194index 02e6167..54824f7 100644
fe2de317
MT
66195--- a/kernel/acct.c
66196+++ b/kernel/acct.c
5e856224 66197@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
58c5fc13
MT
66198 */
66199 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
66200 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
66201- file->f_op->write(file, (char *)&ac,
6e9df6a3 66202+ file->f_op->write(file, (char __force_user *)&ac,
58c5fc13
MT
66203 sizeof(acct_t), &file->f_pos);
66204 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
66205 set_fs(fs);
fe2de317 66206diff --git a/kernel/audit.c b/kernel/audit.c
5e856224 66207index bb0eb5b..cf2a03a 100644
fe2de317
MT
66208--- a/kernel/audit.c
66209+++ b/kernel/audit.c
6e9df6a3 66210@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
8308f9c9
MT
66211 3) suppressed due to audit_rate_limit
66212 4) suppressed due to audit_backlog_limit
66213 */
66214-static atomic_t audit_lost = ATOMIC_INIT(0);
66215+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
66216
66217 /* The netlink socket. */
66218 static struct sock *audit_sock;
6e9df6a3 66219@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
8308f9c9
MT
66220 unsigned long now;
66221 int print;
66222
66223- atomic_inc(&audit_lost);
66224+ atomic_inc_unchecked(&audit_lost);
66225
66226 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
66227
6e9df6a3 66228@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
8308f9c9
MT
66229 printk(KERN_WARNING
66230 "audit: audit_lost=%d audit_rate_limit=%d "
66231 "audit_backlog_limit=%d\n",
66232- atomic_read(&audit_lost),
66233+ atomic_read_unchecked(&audit_lost),
66234 audit_rate_limit,
66235 audit_backlog_limit);
66236 audit_panic(message);
fe2de317 66237@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
8308f9c9
MT
66238 status_set.pid = audit_pid;
66239 status_set.rate_limit = audit_rate_limit;
66240 status_set.backlog_limit = audit_backlog_limit;
66241- status_set.lost = atomic_read(&audit_lost);
66242+ status_set.lost = atomic_read_unchecked(&audit_lost);
66243 status_set.backlog = skb_queue_len(&audit_skb_queue);
66244 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
66245 &status_set, sizeof(status_set));
fe2de317 66246diff --git a/kernel/auditsc.c b/kernel/auditsc.c
5e856224 66247index af1de0f..06dfe57 100644
fe2de317
MT
66248--- a/kernel/auditsc.c
66249+++ b/kernel/auditsc.c
5e856224 66250@@ -2288,7 +2288,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
8308f9c9
MT
66251 }
66252
66253 /* global counter which is incremented every time something logs in */
66254-static atomic_t session_id = ATOMIC_INIT(0);
66255+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
66256
66257 /**
5e856224
MT
66258 * audit_set_loginuid - set current task's audit_context loginuid
66259@@ -2312,7 +2312,7 @@ int audit_set_loginuid(uid_t loginuid)
66260 return -EPERM;
66261 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
8308f9c9 66262
5e856224
MT
66263- sessionid = atomic_inc_return(&session_id);
66264+ sessionid = atomic_inc_return_unchecked(&session_id);
8308f9c9 66265 if (context && context->in_syscall) {
5e856224
MT
66266 struct audit_buffer *ab;
66267
fe2de317 66268diff --git a/kernel/capability.c b/kernel/capability.c
5e856224 66269index 3f1adb6..c564db0 100644
fe2de317
MT
66270--- a/kernel/capability.c
66271+++ b/kernel/capability.c
66272@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
ae4e228f
MT
66273 * before modification is attempted and the application
66274 * fails.
66275 */
66276+ if (tocopy > ARRAY_SIZE(kdata))
66277+ return -EFAULT;
66278+
66279 if (copy_to_user(dataptr, kdata, tocopy
66280 * sizeof(struct __user_cap_data_struct))) {
66281 return -EFAULT;
5e856224
MT
66282@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
66283 int ret;
66284
66285 rcu_read_lock();
66286- ret = security_capable(__task_cred(t), ns, cap);
66287+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
66288+ gr_task_is_capable(t, __task_cred(t), cap);
66289 rcu_read_unlock();
66290
66291- return (ret == 0);
66292+ return ret;
66293 }
66294
66295 /**
66296@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
66297 int ret;
66298
66299 rcu_read_lock();
66300- ret = security_capable_noaudit(__task_cred(t), ns, cap);
66301+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
66302 rcu_read_unlock();
66303
66304- return (ret == 0);
66305+ return ret;
66306 }
66307
66308 /**
66309@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
58c5fc13
MT
66310 BUG();
66311 }
66312
5e856224
MT
66313- if (security_capable(current_cred(), ns, cap) == 0) {
66314+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
ae4e228f 66315 current->flags |= PF_SUPERPRIV;
66a7e928 66316 return true;
ae4e228f 66317 }
5e856224 66318@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
ae4e228f 66319 }
66a7e928
MT
66320 EXPORT_SYMBOL(ns_capable);
66321
66322+bool ns_capable_nolog(struct user_namespace *ns, int cap)
bc901d79
MT
66323+{
66324+ if (unlikely(!cap_valid(cap))) {
66325+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
66326+ BUG();
66327+ }
66328+
5e856224 66329+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
bc901d79 66330+ current->flags |= PF_SUPERPRIV;
66a7e928 66331+ return true;
bc901d79 66332+ }
66a7e928 66333+ return false;
bc901d79 66334+}
66a7e928 66335+EXPORT_SYMBOL(ns_capable_nolog);
66a7e928
MT
66336+
66337 /**
5e856224
MT
66338 * capable - Determine if the current task has a superior capability in effect
66339 * @cap: The capability to be tested for
66340@@ -408,6 +427,12 @@ bool capable(int cap)
66a7e928 66341 }
5e856224 66342 EXPORT_SYMBOL(capable);
66a7e928 66343
5e856224 66344+bool capable_nolog(int cap)
66a7e928 66345+{
5e856224 66346+ return ns_capable_nolog(&init_user_ns, cap);
66a7e928 66347+}
5e856224 66348+EXPORT_SYMBOL(capable_nolog);
66a7e928
MT
66349+
66350 /**
66351 * nsown_capable - Check superior capability to one's own user_ns
66352 * @cap: The capability in question
fe2de317 66353diff --git a/kernel/compat.c b/kernel/compat.c
4c928ab7 66354index f346ced..aa2b1f4 100644
fe2de317
MT
66355--- a/kernel/compat.c
66356+++ b/kernel/compat.c
57199397
MT
66357@@ -13,6 +13,7 @@
66358
66359 #include <linux/linkage.h>
66360 #include <linux/compat.h>
66361+#include <linux/module.h>
66362 #include <linux/errno.h>
66363 #include <linux/time.h>
66364 #include <linux/signal.h>
4c928ab7 66365@@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
6e9df6a3
MT
66366 mm_segment_t oldfs;
66367 long ret;
66368
66369- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
66370+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
66371 oldfs = get_fs();
66372 set_fs(KERNEL_DS);
66373 ret = hrtimer_nanosleep_restart(restart);
4c928ab7 66374@@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
6e9df6a3
MT
66375 oldfs = get_fs();
66376 set_fs(KERNEL_DS);
66377 ret = hrtimer_nanosleep(&tu,
66378- rmtp ? (struct timespec __user *)&rmt : NULL,
66379+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
66380 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
66381 set_fs(oldfs);
66382
4c928ab7 66383@@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
6e9df6a3
MT
66384 mm_segment_t old_fs = get_fs();
66385
66386 set_fs(KERNEL_DS);
66387- ret = sys_sigpending((old_sigset_t __user *) &s);
66388+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
66389 set_fs(old_fs);
66390 if (ret == 0)
66391 ret = put_user(s, set);
4c928ab7 66392@@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
6e9df6a3
MT
66393 old_fs = get_fs();
66394 set_fs(KERNEL_DS);
66395 ret = sys_sigprocmask(how,
66396- set ? (old_sigset_t __user *) &s : NULL,
66397- oset ? (old_sigset_t __user *) &s : NULL);
66398+ set ? (old_sigset_t __force_user *) &s : NULL,
66399+ oset ? (old_sigset_t __force_user *) &s : NULL);
66400 set_fs(old_fs);
66401 if (ret == 0)
66402 if (oset)
4c928ab7 66403@@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
6e9df6a3
MT
66404 mm_segment_t old_fs = get_fs();
66405
66406 set_fs(KERNEL_DS);
66407- ret = sys_old_getrlimit(resource, &r);
66408+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
66409 set_fs(old_fs);
66410
66411 if (!ret) {
4c928ab7 66412@@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
6e9df6a3
MT
66413 mm_segment_t old_fs = get_fs();
66414
66415 set_fs(KERNEL_DS);
66416- ret = sys_getrusage(who, (struct rusage __user *) &r);
66417+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
66418 set_fs(old_fs);
66419
66420 if (ret)
4c928ab7 66421@@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
6e9df6a3
MT
66422 set_fs (KERNEL_DS);
66423 ret = sys_wait4(pid,
66424 (stat_addr ?
66425- (unsigned int __user *) &status : NULL),
66426- options, (struct rusage __user *) &r);
66427+ (unsigned int __force_user *) &status : NULL),
66428+ options, (struct rusage __force_user *) &r);
66429 set_fs (old_fs);
66430
66431 if (ret > 0) {
4c928ab7 66432@@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
6e9df6a3
MT
66433 memset(&info, 0, sizeof(info));
66434
66435 set_fs(KERNEL_DS);
66436- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
66437- uru ? (struct rusage __user *)&ru : NULL);
66438+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
66439+ uru ? (struct rusage __force_user *)&ru : NULL);
66440 set_fs(old_fs);
66441
66442 if ((ret < 0) || (info.si_signo == 0))
4c928ab7 66443@@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
6e9df6a3
MT
66444 oldfs = get_fs();
66445 set_fs(KERNEL_DS);
66446 err = sys_timer_settime(timer_id, flags,
66447- (struct itimerspec __user *) &newts,
66448- (struct itimerspec __user *) &oldts);
66449+ (struct itimerspec __force_user *) &newts,
66450+ (struct itimerspec __force_user *) &oldts);
66451 set_fs(oldfs);
66452 if (!err && old && put_compat_itimerspec(old, &oldts))
66453 return -EFAULT;
4c928ab7 66454@@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
6e9df6a3
MT
66455 oldfs = get_fs();
66456 set_fs(KERNEL_DS);
66457 err = sys_timer_gettime(timer_id,
66458- (struct itimerspec __user *) &ts);
66459+ (struct itimerspec __force_user *) &ts);
66460 set_fs(oldfs);
66461 if (!err && put_compat_itimerspec(setting, &ts))
66462 return -EFAULT;
4c928ab7 66463@@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
6e9df6a3
MT
66464 oldfs = get_fs();
66465 set_fs(KERNEL_DS);
66466 err = sys_clock_settime(which_clock,
66467- (struct timespec __user *) &ts);
66468+ (struct timespec __force_user *) &ts);
66469 set_fs(oldfs);
66470 return err;
66471 }
4c928ab7 66472@@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
6e9df6a3
MT
66473 oldfs = get_fs();
66474 set_fs(KERNEL_DS);
66475 err = sys_clock_gettime(which_clock,
66476- (struct timespec __user *) &ts);
66477+ (struct timespec __force_user *) &ts);
66478 set_fs(oldfs);
66479 if (!err && put_compat_timespec(&ts, tp))
66480 return -EFAULT;
4c928ab7 66481@@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
6e9df6a3
MT
66482
66483 oldfs = get_fs();
66484 set_fs(KERNEL_DS);
66485- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
66486+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
66487 set_fs(oldfs);
66488
66489 err = compat_put_timex(utp, &txc);
4c928ab7 66490@@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
6e9df6a3
MT
66491 oldfs = get_fs();
66492 set_fs(KERNEL_DS);
66493 err = sys_clock_getres(which_clock,
66494- (struct timespec __user *) &ts);
66495+ (struct timespec __force_user *) &ts);
66496 set_fs(oldfs);
66497 if (!err && tp && put_compat_timespec(&ts, tp))
66498 return -EFAULT;
4c928ab7 66499@@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
6e9df6a3
MT
66500 long err;
66501 mm_segment_t oldfs;
66502 struct timespec tu;
66503- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
66504+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
66505
66506- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
66507+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
66508 oldfs = get_fs();
66509 set_fs(KERNEL_DS);
66510 err = clock_nanosleep_restart(restart);
4c928ab7 66511@@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
6e9df6a3
MT
66512 oldfs = get_fs();
66513 set_fs(KERNEL_DS);
66514 err = sys_clock_nanosleep(which_clock, flags,
66515- (struct timespec __user *) &in,
66516- (struct timespec __user *) &out);
66517+ (struct timespec __force_user *) &in,
66518+ (struct timespec __force_user *) &out);
66519 set_fs(oldfs);
66520
66521 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
fe2de317
MT
66522diff --git a/kernel/configs.c b/kernel/configs.c
66523index 42e8fa0..9e7406b 100644
66524--- a/kernel/configs.c
66525+++ b/kernel/configs.c
bc901d79 66526@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
58c5fc13
MT
66527 struct proc_dir_entry *entry;
66528
66529 /* create the current config file */
66530+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
66531+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
66532+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
66533+ &ikconfig_file_ops);
66534+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66535+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
66536+ &ikconfig_file_ops);
66537+#endif
66538+#else
66539 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
66540 &ikconfig_file_ops);
66541+#endif
66542+
66543 if (!entry)
66544 return -ENOMEM;
66545
fe2de317 66546diff --git a/kernel/cred.c b/kernel/cred.c
5e856224 66547index 48c6fd3..8398912 100644
fe2de317
MT
66548--- a/kernel/cred.c
66549+++ b/kernel/cred.c
4c928ab7
MT
66550@@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
66551 validate_creds(cred);
66552 put_cred(cred);
66553 }
66a7e928 66554+
4c928ab7
MT
66555+#ifdef CONFIG_GRKERNSEC_SETXID
66556+ cred = (struct cred *) tsk->delayed_cred;
66557+ if (cred) {
66558+ tsk->delayed_cred = NULL;
66559+ validate_creds(cred);
66560+ put_cred(cred);
66561+ }
66562+#endif
66563 }
66a7e928 66564
4c928ab7 66565 /**
5e856224 66566@@ -472,7 +481,7 @@ error_put:
4c928ab7
MT
66567 * Always returns 0 thus allowing this function to be tail-called at the end
66568 * of, say, sys_setgid().
66569 */
66570-int commit_creds(struct cred *new)
66571+static int __commit_creds(struct cred *new)
66a7e928 66572 {
66a7e928
MT
66573 struct task_struct *task = current;
66574 const struct cred *old = task->real_cred;
5e856224 66575@@ -491,6 +500,8 @@ int commit_creds(struct cred *new)
58c5fc13
MT
66576
66577 get_cred(new); /* we will require a ref for the subj creds too */
66578
66579+ gr_set_role_label(task, new->uid, new->gid);
66580+
66581 /* dumpability changes */
66582 if (old->euid != new->euid ||
66583 old->egid != new->egid ||
5e856224 66584@@ -540,6 +551,101 @@ int commit_creds(struct cred *new)
4c928ab7
MT
66585 put_cred(old);
66586 return 0;
66587 }
66588+#ifdef CONFIG_GRKERNSEC_SETXID
66589+extern int set_user(struct cred *new);
66a7e928 66590+
4c928ab7
MT
66591+void gr_delayed_cred_worker(void)
66592+{
66593+ const struct cred *new = current->delayed_cred;
66594+ struct cred *ncred;
66a7e928 66595+
4c928ab7 66596+ current->delayed_cred = NULL;
66a7e928 66597+
4c928ab7
MT
66598+ if (current_uid() && new != NULL) {
66599+ // from doing get_cred on it when queueing this
66600+ put_cred(new);
66601+ return;
66602+ } else if (new == NULL)
66603+ return;
66a7e928 66604+
4c928ab7
MT
66605+ ncred = prepare_creds();
66606+ if (!ncred)
66607+ goto die;
66608+ // uids
66609+ ncred->uid = new->uid;
66610+ ncred->euid = new->euid;
66611+ ncred->suid = new->suid;
66612+ ncred->fsuid = new->fsuid;
66613+ // gids
66614+ ncred->gid = new->gid;
66615+ ncred->egid = new->egid;
66616+ ncred->sgid = new->sgid;
66617+ ncred->fsgid = new->fsgid;
66618+ // groups
66619+ if (set_groups(ncred, new->group_info) < 0) {
66620+ abort_creds(ncred);
66621+ goto die;
66622+ }
66623+ // caps
66624+ ncred->securebits = new->securebits;
66625+ ncred->cap_inheritable = new->cap_inheritable;
66626+ ncred->cap_permitted = new->cap_permitted;
66627+ ncred->cap_effective = new->cap_effective;
66628+ ncred->cap_bset = new->cap_bset;
66629+
66630+ if (set_user(ncred)) {
66631+ abort_creds(ncred);
66632+ goto die;
66633+ }
66a7e928 66634+
4c928ab7
MT
66635+ // from doing get_cred on it when queueing this
66636+ put_cred(new);
66a7e928 66637+
4c928ab7
MT
66638+ __commit_creds(ncred);
66639+ return;
66640+die:
66641+ // from doing get_cred on it when queueing this
66642+ put_cred(new);
66643+ do_group_exit(SIGKILL);
66644+}
66645+#endif
66646+
66647+int commit_creds(struct cred *new)
66648+{
66649+#ifdef CONFIG_GRKERNSEC_SETXID
5e856224
MT
66650+ int ret;
66651+ int schedule_it = 0;
4c928ab7
MT
66652+ struct task_struct *t;
66653+
66654+ /* we won't get called with tasklist_lock held for writing
66655+ and interrupts disabled as the cred struct in that case is
66656+ init_cred
66657+ */
66658+ if (grsec_enable_setxid && !current_is_single_threaded() &&
66659+ !current_uid() && new->uid) {
5e856224
MT
66660+ schedule_it = 1;
66661+ }
66662+ ret = __commit_creds(new);
66663+ if (schedule_it) {
4c928ab7
MT
66664+ rcu_read_lock();
66665+ read_lock(&tasklist_lock);
66666+ for (t = next_thread(current); t != current;
66667+ t = next_thread(t)) {
66668+ if (t->delayed_cred == NULL) {
66669+ t->delayed_cred = get_cred(new);
5e856224 66670+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
4c928ab7
MT
66671+ set_tsk_need_resched(t);
66672+ }
66673+ }
66674+ read_unlock(&tasklist_lock);
66675+ rcu_read_unlock();
66676+ }
5e856224
MT
66677+ return ret;
66678+#else
4c928ab7 66679+ return __commit_creds(new);
5e856224 66680+#endif
4c928ab7
MT
66681+}
66682+
66683 EXPORT_SYMBOL(commit_creds);
66684
66685 /**
fe2de317 66686diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
5e856224 66687index 7fda904..59f620c 100644
fe2de317
MT
66688--- a/kernel/debug/debug_core.c
66689+++ b/kernel/debug/debug_core.c
66690@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
8308f9c9
MT
66691 */
66692 static atomic_t masters_in_kgdb;
66693 static atomic_t slaves_in_kgdb;
66694-static atomic_t kgdb_break_tasklet_var;
66695+static atomic_unchecked_t kgdb_break_tasklet_var;
66696 atomic_t kgdb_setting_breakpoint;
66697
66698 struct task_struct *kgdb_usethread;
66699@@ -129,7 +129,7 @@ int kgdb_single_step;
66700 static pid_t kgdb_sstep_pid;
66701
66702 /* to keep track of the CPU which is doing the single stepping*/
66703-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
66704+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
66705
66706 /*
66707 * If you are debugging a problem where roundup (the collection of
5e856224 66708@@ -537,7 +537,7 @@ return_normal:
8308f9c9
MT
66709 * kernel will only try for the value of sstep_tries before
66710 * giving up and continuing on.
66711 */
66712- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
66713+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
66714 (kgdb_info[cpu].task &&
66715 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
66716 atomic_set(&kgdb_active, -1);
5e856224 66717@@ -631,8 +631,8 @@ cpu_master_loop:
8308f9c9
MT
66718 }
66719
66720 kgdb_restore:
66721- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
66722- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
66723+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
66724+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
66725 if (kgdb_info[sstep_cpu].task)
66726 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
66727 else
5e856224 66728@@ -829,18 +829,18 @@ static void kgdb_unregister_callbacks(void)
8308f9c9
MT
66729 static void kgdb_tasklet_bpt(unsigned long ing)
66730 {
66731 kgdb_breakpoint();
66732- atomic_set(&kgdb_break_tasklet_var, 0);
66733+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
66734 }
66735
66736 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
66737
66738 void kgdb_schedule_breakpoint(void)
66739 {
66740- if (atomic_read(&kgdb_break_tasklet_var) ||
66741+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
66742 atomic_read(&kgdb_active) != -1 ||
66743 atomic_read(&kgdb_setting_breakpoint))
66744 return;
66745- atomic_inc(&kgdb_break_tasklet_var);
66746+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
66747 tasklet_schedule(&kgdb_tasklet_breakpoint);
66748 }
66749 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
fe2de317 66750diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
5e856224 66751index e2ae734..08a4c5c 100644
fe2de317
MT
66752--- a/kernel/debug/kdb/kdb_main.c
66753+++ b/kernel/debug/kdb/kdb_main.c
66754@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
57199397
MT
66755 list_for_each_entry(mod, kdb_modules, list) {
66756
66757 kdb_printf("%-20s%8u 0x%p ", mod->name,
66758- mod->core_size, (void *)mod);
66759+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
66760 #ifdef CONFIG_MODULE_UNLOAD
5e856224 66761 kdb_printf("%4ld ", module_refcount(mod));
57199397 66762 #endif
fe2de317 66763@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
57199397
MT
66764 kdb_printf(" (Loading)");
66765 else
66766 kdb_printf(" (Live)");
66767- kdb_printf(" 0x%p", mod->module_core);
66768+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
66769
66770 #ifdef CONFIG_MODULE_UNLOAD
66771 {
fe2de317 66772diff --git a/kernel/events/core.c b/kernel/events/core.c
5e856224 66773index 1b5c081..c375f83 100644
fe2de317
MT
66774--- a/kernel/events/core.c
66775+++ b/kernel/events/core.c
4c928ab7 66776@@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
15a11c5b
MT
66777 return 0;
66778 }
66779
66780-static atomic64_t perf_event_id;
66781+static atomic64_unchecked_t perf_event_id;
66782
66783 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
66784 enum event_type_t event_type);
5e856224 66785@@ -2581,7 +2581,7 @@ static void __perf_event_read(void *info)
15a11c5b
MT
66786
66787 static inline u64 perf_event_count(struct perf_event *event)
66788 {
66789- return local64_read(&event->count) + atomic64_read(&event->child_count);
66790+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
66791 }
66792
66793 static u64 perf_event_read(struct perf_event *event)
5e856224 66794@@ -2897,9 +2897,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
15a11c5b
MT
66795 mutex_lock(&event->child_mutex);
66796 total += perf_event_read(event);
66797 *enabled += event->total_time_enabled +
66798- atomic64_read(&event->child_total_time_enabled);
66799+ atomic64_read_unchecked(&event->child_total_time_enabled);
66800 *running += event->total_time_running +
66801- atomic64_read(&event->child_total_time_running);
66802+ atomic64_read_unchecked(&event->child_total_time_running);
66803
66804 list_for_each_entry(child, &event->child_list, child_list) {
66805 total += perf_event_read(child);
5e856224 66806@@ -3306,10 +3306,10 @@ void perf_event_update_userpage(struct perf_event *event)
15a11c5b
MT
66807 userpg->offset -= local64_read(&event->hw.prev_count);
66808
6e9df6a3 66809 userpg->time_enabled = enabled +
15a11c5b
MT
66810- atomic64_read(&event->child_total_time_enabled);
66811+ atomic64_read_unchecked(&event->child_total_time_enabled);
66812
6e9df6a3 66813 userpg->time_running = running +
15a11c5b
MT
66814- atomic64_read(&event->child_total_time_running);
66815+ atomic64_read_unchecked(&event->child_total_time_running);
66816
66817 barrier();
66818 ++userpg->lock;
5e856224 66819@@ -3738,11 +3738,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
15a11c5b
MT
66820 values[n++] = perf_event_count(event);
66821 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
66822 values[n++] = enabled +
66823- atomic64_read(&event->child_total_time_enabled);
66824+ atomic64_read_unchecked(&event->child_total_time_enabled);
66825 }
66826 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
66827 values[n++] = running +
66828- atomic64_read(&event->child_total_time_running);
66829+ atomic64_read_unchecked(&event->child_total_time_running);
66830 }
66831 if (read_format & PERF_FORMAT_ID)
66832 values[n++] = primary_event_id(event);
5e856224 66833@@ -4393,12 +4393,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
15a11c5b
MT
66834 * need to add enough zero bytes after the string to handle
66835 * the 64bit alignment we do later.
66836 */
66837- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
66838+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
66839 if (!buf) {
66840 name = strncpy(tmp, "//enomem", sizeof(tmp));
66841 goto got_name;
66842 }
66843- name = d_path(&file->f_path, buf, PATH_MAX);
66844+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
66845 if (IS_ERR(name)) {
66846 name = strncpy(tmp, "//toolong", sizeof(tmp));
66847 goto got_name;
5e856224 66848@@ -5765,7 +5765,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
15a11c5b
MT
66849 event->parent = parent_event;
66850
66851 event->ns = get_pid_ns(current->nsproxy->pid_ns);
66852- event->id = atomic64_inc_return(&perf_event_id);
66853+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
66854
66855 event->state = PERF_EVENT_STATE_INACTIVE;
66856
5e856224 66857@@ -6287,10 +6287,10 @@ static void sync_child_event(struct perf_event *child_event,
15a11c5b
MT
66858 /*
66859 * Add back the child's count to the parent's count:
66860 */
66861- atomic64_add(child_val, &parent_event->child_count);
66862- atomic64_add(child_event->total_time_enabled,
66863+ atomic64_add_unchecked(child_val, &parent_event->child_count);
66864+ atomic64_add_unchecked(child_event->total_time_enabled,
66865 &parent_event->child_total_time_enabled);
66866- atomic64_add(child_event->total_time_running,
66867+ atomic64_add_unchecked(child_event->total_time_running,
66868 &parent_event->child_total_time_running);
66869
66870 /*
fe2de317 66871diff --git a/kernel/exit.c b/kernel/exit.c
5e856224 66872index 46c8b14..d868958 100644
fe2de317
MT
66873--- a/kernel/exit.c
66874+++ b/kernel/exit.c
5e856224 66875@@ -58,6 +58,10 @@
57199397 66876 #include <asm/pgtable.h>
58c5fc13 66877 #include <asm/mmu_context.h>
58c5fc13
MT
66878
66879+#ifdef CONFIG_GRKERNSEC
66880+extern rwlock_t grsec_exec_file_lock;
66881+#endif
66882+
66883 static void exit_mm(struct task_struct * tsk);
66884
57199397 66885 static void __unhash_process(struct task_struct *p, bool group_dead)
5e856224 66886@@ -169,6 +173,10 @@ void release_task(struct task_struct * p)
58c5fc13
MT
66887 struct task_struct *leader;
66888 int zap_leader;
66889 repeat:
15a11c5b 66890+#ifdef CONFIG_NET
58c5fc13 66891+ gr_del_task_from_ip_table(p);
15a11c5b 66892+#endif
58c5fc13 66893+
58c5fc13 66894 /* don't need to get the RCU readlock here - the process is dead and
df50ba0c 66895 * can't be modifying its own credentials. But shut RCU-lockdep up */
6e9df6a3 66896 rcu_read_lock();
5e856224 66897@@ -381,7 +389,7 @@ int allow_signal(int sig)
ae4e228f
MT
66898 * know it'll be handled, so that they don't get converted to
66899 * SIGKILL or just silently dropped.
66900 */
66901- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
66902+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
66903 recalc_sigpending();
66904 spin_unlock_irq(&current->sighand->siglock);
66905 return 0;
5e856224 66906@@ -417,6 +425,17 @@ void daemonize(const char *name, ...)
58c5fc13
MT
66907 vsnprintf(current->comm, sizeof(current->comm), name, args);
66908 va_end(args);
66909
66910+#ifdef CONFIG_GRKERNSEC
66911+ write_lock(&grsec_exec_file_lock);
66912+ if (current->exec_file) {
66913+ fput(current->exec_file);
66914+ current->exec_file = NULL;
66915+ }
66916+ write_unlock(&grsec_exec_file_lock);
66917+#endif
66918+
66919+ gr_set_kernel_label(current);
66920+
66921 /*
66922 * If we were started as result of loading a module, close all of the
66923 * user space pages. We don't need them, and if we didn't close them
5e856224 66924@@ -873,6 +892,8 @@ void do_exit(long code)
bc901d79
MT
66925 struct task_struct *tsk = current;
66926 int group_dead;
66927
6e9df6a3
MT
66928+ set_fs(USER_DS);
66929+
66930 profile_task_exit(tsk);
bc901d79 66931
6e9df6a3 66932 WARN_ON(blk_needs_flush_plug(tsk));
5e856224 66933@@ -889,7 +910,6 @@ void do_exit(long code)
6e9df6a3
MT
66934 * mm_release()->clear_child_tid() from writing to a user-controlled
66935 * kernel address.
bc901d79 66936 */
6e9df6a3 66937- set_fs(USER_DS);
bc901d79 66938
6e9df6a3 66939 ptrace_event(PTRACE_EVENT_EXIT, code);
bc901d79 66940
5e856224 66941@@ -950,6 +970,9 @@ void do_exit(long code)
58c5fc13
MT
66942 tsk->exit_code = code;
66943 taskstats_exit(tsk, group_dead);
66944
66945+ gr_acl_handle_psacct(tsk, code);
66946+ gr_acl_handle_exit();
66947+
66948 exit_mm(tsk);
66949
66950 if (group_dead)
5e856224 66951@@ -1066,7 +1089,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
4c928ab7
MT
66952 * Take down every thread in the group. This is called by fatal signals
66953 * as well as by sys_exit_group (below).
66954 */
5e856224 66955-void
4c928ab7
MT
66956+__noreturn void
66957 do_group_exit(int exit_code)
66958 {
66959 struct signal_struct *sig = current->signal;
fe2de317 66960diff --git a/kernel/fork.c b/kernel/fork.c
5e856224 66961index 26a7a67..a1053f9 100644
fe2de317
MT
66962--- a/kernel/fork.c
66963+++ b/kernel/fork.c
5e856224 66964@@ -284,7 +284,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
58c5fc13
MT
66965 *stackend = STACK_END_MAGIC; /* for overflow detection */
66966
66967 #ifdef CONFIG_CC_STACKPROTECTOR
66968- tsk->stack_canary = get_random_int();
66969+ tsk->stack_canary = pax_get_random_long();
66970 #endif
66971
6e9df6a3 66972 /*
5e856224 66973@@ -308,13 +308,77 @@ out:
57199397
MT
66974 }
66975
66976 #ifdef CONFIG_MMU
66977+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
66978+{
66979+ struct vm_area_struct *tmp;
66980+ unsigned long charge;
66981+ struct mempolicy *pol;
66982+ struct file *file;
66983+
66984+ charge = 0;
66985+ if (mpnt->vm_flags & VM_ACCOUNT) {
66986+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
66987+ if (security_vm_enough_memory(len))
66988+ goto fail_nomem;
66989+ charge = len;
66990+ }
66991+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
66992+ if (!tmp)
66993+ goto fail_nomem;
66994+ *tmp = *mpnt;
66995+ tmp->vm_mm = mm;
66996+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
66997+ pol = mpol_dup(vma_policy(mpnt));
66998+ if (IS_ERR(pol))
66999+ goto fail_nomem_policy;
67000+ vma_set_policy(tmp, pol);
67001+ if (anon_vma_fork(tmp, mpnt))
67002+ goto fail_nomem_anon_vma_fork;
67003+ tmp->vm_flags &= ~VM_LOCKED;
6892158b 67004+ tmp->vm_next = tmp->vm_prev = NULL;
57199397
MT
67005+ tmp->vm_mirror = NULL;
67006+ file = tmp->vm_file;
67007+ if (file) {
67008+ struct inode *inode = file->f_path.dentry->d_inode;
67009+ struct address_space *mapping = file->f_mapping;
67010+
67011+ get_file(file);
67012+ if (tmp->vm_flags & VM_DENYWRITE)
67013+ atomic_dec(&inode->i_writecount);
15a11c5b 67014+ mutex_lock(&mapping->i_mmap_mutex);
57199397
MT
67015+ if (tmp->vm_flags & VM_SHARED)
67016+ mapping->i_mmap_writable++;
57199397
MT
67017+ flush_dcache_mmap_lock(mapping);
67018+ /* insert tmp into the share list, just after mpnt */
67019+ vma_prio_tree_add(tmp, mpnt);
67020+ flush_dcache_mmap_unlock(mapping);
15a11c5b 67021+ mutex_unlock(&mapping->i_mmap_mutex);
57199397
MT
67022+ }
67023+
67024+ /*
67025+ * Clear hugetlb-related page reserves for children. This only
67026+ * affects MAP_PRIVATE mappings. Faults generated by the child
67027+ * are not guaranteed to succeed, even if read-only
67028+ */
67029+ if (is_vm_hugetlb_page(tmp))
67030+ reset_vma_resv_huge_pages(tmp);
67031+
67032+ return tmp;
67033+
67034+fail_nomem_anon_vma_fork:
67035+ mpol_put(pol);
67036+fail_nomem_policy:
67037+ kmem_cache_free(vm_area_cachep, tmp);
67038+fail_nomem:
67039+ vm_unacct_memory(charge);
67040+ return NULL;
67041+}
67042+
67043 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
67044 {
67045 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
67046 struct rb_node **rb_link, *rb_parent;
67047 int retval;
67048- unsigned long charge;
67049- struct mempolicy *pol;
67050
67051 down_write(&oldmm->mmap_sem);
67052 flush_cache_dup_mm(oldmm);
5e856224 67053@@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
58c5fc13
MT
67054 mm->locked_vm = 0;
67055 mm->mmap = NULL;
67056 mm->mmap_cache = NULL;
67057- mm->free_area_cache = oldmm->mmap_base;
67058- mm->cached_hole_size = ~0UL;
67059+ mm->free_area_cache = oldmm->free_area_cache;
67060+ mm->cached_hole_size = oldmm->cached_hole_size;
67061 mm->map_count = 0;
67062 cpumask_clear(mm_cpumask(mm));
67063 mm->mm_rb = RB_ROOT;
5e856224 67064@@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
57199397
MT
67065
67066 prev = NULL;
67067 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
67068- struct file *file;
67069-
67070 if (mpnt->vm_flags & VM_DONTCOPY) {
67071 long pages = vma_pages(mpnt);
67072 mm->total_vm -= pages;
5e856224 67073@@ -352,53 +414,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
57199397
MT
67074 -pages);
67075 continue;
67076 }
67077- charge = 0;
67078- if (mpnt->vm_flags & VM_ACCOUNT) {
67079- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
67080- if (security_vm_enough_memory(len))
67081- goto fail_nomem;
67082- charge = len;
fe2de317
MT
67083+ tmp = dup_vma(mm, mpnt);
67084+ if (!tmp) {
67085+ retval = -ENOMEM;
67086+ goto out;
67087 }
57199397
MT
67088- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
67089- if (!tmp)
67090- goto fail_nomem;
67091- *tmp = *mpnt;
67092- INIT_LIST_HEAD(&tmp->anon_vma_chain);
67093- pol = mpol_dup(vma_policy(mpnt));
67094- retval = PTR_ERR(pol);
67095- if (IS_ERR(pol))
67096- goto fail_nomem_policy;
67097- vma_set_policy(tmp, pol);
6892158b 67098- tmp->vm_mm = mm;
57199397
MT
67099- if (anon_vma_fork(tmp, mpnt))
67100- goto fail_nomem_anon_vma_fork;
67101- tmp->vm_flags &= ~VM_LOCKED;
57199397
MT
67102- tmp->vm_next = tmp->vm_prev = NULL;
67103- file = tmp->vm_file;
67104- if (file) {
67105- struct inode *inode = file->f_path.dentry->d_inode;
67106- struct address_space *mapping = file->f_mapping;
67107-
67108- get_file(file);
67109- if (tmp->vm_flags & VM_DENYWRITE)
67110- atomic_dec(&inode->i_writecount);
15a11c5b 67111- mutex_lock(&mapping->i_mmap_mutex);
57199397
MT
67112- if (tmp->vm_flags & VM_SHARED)
67113- mapping->i_mmap_writable++;
57199397
MT
67114- flush_dcache_mmap_lock(mapping);
67115- /* insert tmp into the share list, just after mpnt */
67116- vma_prio_tree_add(tmp, mpnt);
67117- flush_dcache_mmap_unlock(mapping);
15a11c5b 67118- mutex_unlock(&mapping->i_mmap_mutex);
fe2de317
MT
67119- }
67120-
67121- /*
57199397
MT
67122- * Clear hugetlb-related page reserves for children. This only
67123- * affects MAP_PRIVATE mappings. Faults generated by the child
67124- * are not guaranteed to succeed, even if read-only
67125- */
67126- if (is_vm_hugetlb_page(tmp))
67127- reset_vma_resv_huge_pages(tmp);
fe2de317
MT
67128
67129 /*
57199397 67130 * Link in the new vma and copy the page table entries.
5e856224 67131@@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
58c5fc13
MT
67132 if (retval)
67133 goto out;
67134 }
67135+
67136+#ifdef CONFIG_PAX_SEGMEXEC
67137+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
67138+ struct vm_area_struct *mpnt_m;
67139+
67140+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
67141+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
67142+
67143+ if (!mpnt->vm_mirror)
67144+ continue;
67145+
67146+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
67147+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
67148+ mpnt->vm_mirror = mpnt_m;
67149+ } else {
67150+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
67151+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
67152+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
67153+ mpnt->vm_mirror->vm_mirror = mpnt;
67154+ }
67155+ }
67156+ BUG_ON(mpnt_m);
67157+ }
67158+#endif
67159+
67160 /* a new mm has just been created */
67161 arch_dup_mmap(oldmm, mm);
67162 retval = 0;
5e856224 67163@@ -429,14 +474,6 @@ out:
57199397
MT
67164 flush_tlb_mm(oldmm);
67165 up_write(&oldmm->mmap_sem);
67166 return retval;
67167-fail_nomem_anon_vma_fork:
67168- mpol_put(pol);
67169-fail_nomem_policy:
67170- kmem_cache_free(vm_area_cachep, tmp);
67171-fail_nomem:
67172- retval = -ENOMEM;
67173- vm_unacct_memory(charge);
67174- goto out;
67175 }
67176
6e9df6a3 67177 static inline int mm_alloc_pgd(struct mm_struct *mm)
5e856224
MT
67178@@ -658,8 +695,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
67179 return ERR_PTR(err);
4c928ab7 67180
5e856224
MT
67181 mm = get_task_mm(task);
67182- if (mm && mm != current->mm &&
67183- !ptrace_may_access(task, mode)) {
4c928ab7 67184+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
5e856224
MT
67185+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
67186 mmput(mm);
67187 mm = ERR_PTR(-EACCES);
67188 }
67189@@ -881,13 +918,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
6892158b 67190 spin_unlock(&fs->lock);
58c5fc13
MT
67191 return -EAGAIN;
67192 }
67193- fs->users++;
67194+ atomic_inc(&fs->users);
6892158b 67195 spin_unlock(&fs->lock);
58c5fc13
MT
67196 return 0;
67197 }
df50ba0c
MT
67198 tsk->fs = copy_fs_struct(fs);
67199 if (!tsk->fs)
67200 return -ENOMEM;
67201+ gr_set_chroot_entries(tsk, &tsk->fs->root);
67202 return 0;
67203 }
67204
5e856224 67205@@ -1151,6 +1189,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
58c5fc13
MT
67206 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
67207 #endif
67208 retval = -EAGAIN;
67209+
67210+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
67211+
67212 if (atomic_read(&p->real_cred->user->processes) >=
df50ba0c 67213 task_rlimit(p, RLIMIT_NPROC)) {
6e9df6a3 67214 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
5e856224 67215@@ -1306,6 +1347,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
15a11c5b
MT
67216 if (clone_flags & CLONE_THREAD)
67217 p->tgid = current->tgid;
58c5fc13
MT
67218
67219+ gr_copy_label(p);
67220+
67221 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
67222 /*
67223 * Clear TID on mm_release()?
5e856224 67224@@ -1472,6 +1515,8 @@ bad_fork_cleanup_count:
58c5fc13
MT
67225 bad_fork_free:
67226 free_task(p);
67227 fork_out:
67228+ gr_log_forkfail(retval);
67229+
67230 return ERR_PTR(retval);
67231 }
67232
5e856224 67233@@ -1572,6 +1617,8 @@ long do_fork(unsigned long clone_flags,
58c5fc13
MT
67234 if (clone_flags & CLONE_PARENT_SETTID)
67235 put_user(nr, parent_tidptr);
67236
67237+ gr_handle_brute_check();
67238+
67239 if (clone_flags & CLONE_VFORK) {
67240 p->vfork_done = &vfork;
67241 init_completion(&vfork);
5e856224 67242@@ -1670,7 +1717,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
58c5fc13
MT
67243 return 0;
67244
67245 /* don't need lock here; in the worst case we'll do useless copy */
67246- if (fs->users == 1)
67247+ if (atomic_read(&fs->users) == 1)
67248 return 0;
67249
67250 *new_fsp = copy_fs_struct(fs);
5e856224 67251@@ -1759,7 +1806,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
58c5fc13 67252 fs = current->fs;
6892158b 67253 spin_lock(&fs->lock);
58c5fc13
MT
67254 current->fs = new_fs;
67255- if (--fs->users)
df50ba0c 67256+ gr_set_chroot_entries(current, &current->fs->root);
58c5fc13
MT
67257+ if (atomic_dec_return(&fs->users))
67258 new_fs = NULL;
67259 else
67260 new_fs = fs;
fe2de317 67261diff --git a/kernel/futex.c b/kernel/futex.c
5e856224 67262index 866c9d5..5c5f828 100644
fe2de317
MT
67263--- a/kernel/futex.c
67264+++ b/kernel/futex.c
ae4e228f
MT
67265@@ -54,6 +54,7 @@
67266 #include <linux/mount.h>
67267 #include <linux/pagemap.h>
67268 #include <linux/syscalls.h>
67269+#include <linux/ptrace.h>
67270 #include <linux/signal.h>
4c928ab7 67271 #include <linux/export.h>
ae4e228f 67272 #include <linux/magic.h>
5e856224 67273@@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
16454cff 67274 struct page *page, *page_head;
15a11c5b 67275 int err, ro = 0;
58c5fc13
MT
67276
67277+#ifdef CONFIG_PAX_SEGMEXEC
67278+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
67279+ return -EFAULT;
67280+#endif
67281+
67282 /*
67283 * The futex address must be "naturally" aligned.
67284 */
5e856224 67285@@ -2721,6 +2727,7 @@ static int __init futex_init(void)
58c5fc13 67286 {
bc901d79
MT
67287 u32 curval;
67288 int i;
67289+ mm_segment_t oldfs;
58c5fc13 67290
bc901d79
MT
67291 /*
67292 * This will fail and we want it. Some arch implementations do
5e856224 67293@@ -2732,8 +2739,11 @@ static int __init futex_init(void)
bc901d79
MT
67294 * implementation, the non-functional ones will return
67295 * -ENOSYS.
67296 */
67297+ oldfs = get_fs();
67298+ set_fs(USER_DS);
66a7e928 67299 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
bc901d79 67300 futex_cmpxchg_enabled = 1;
66a7e928 67301+ set_fs(oldfs);
bc901d79 67302
66a7e928 67303 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
6e9df6a3 67304 plist_head_init(&futex_queues[i].chain);
fe2de317
MT
67305diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
67306index 9b22d03..6295b62 100644
67307--- a/kernel/gcov/base.c
67308+++ b/kernel/gcov/base.c
58c5fc13
MT
67309@@ -102,11 +102,6 @@ void gcov_enable_events(void)
67310 }
67311
67312 #ifdef CONFIG_MODULES
67313-static inline int within(void *addr, void *start, unsigned long size)
67314-{
67315- return ((addr >= start) && (addr < start + size));
67316-}
67317-
67318 /* Update list and generate events when modules are unloaded. */
67319 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
67320 void *data)
fe2de317 67321@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
58c5fc13
MT
67322 prev = NULL;
67323 /* Remove entries located in module from linked list. */
67324 for (info = gcov_info_head; info; info = info->next) {
67325- if (within(info, mod->module_core, mod->core_size)) {
67326+ if (within_module_core_rw((unsigned long)info, mod)) {
67327 if (prev)
67328 prev->next = info->next;
67329 else
fe2de317 67330diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
4c928ab7 67331index ae34bf5..4e2f3d0 100644
fe2de317
MT
67332--- a/kernel/hrtimer.c
67333+++ b/kernel/hrtimer.c
67334@@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
ae4e228f
MT
67335 local_irq_restore(flags);
67336 }
67337
67338-static void run_hrtimer_softirq(struct softirq_action *h)
67339+static void run_hrtimer_softirq(void)
67340 {
67341 hrtimer_peek_ahead_timers();
67342 }
fe2de317 67343diff --git a/kernel/jump_label.c b/kernel/jump_label.c
5e856224 67344index 01d3b70..9e4d098 100644
fe2de317
MT
67345--- a/kernel/jump_label.c
67346+++ b/kernel/jump_label.c
67347@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
15a11c5b
MT
67348
67349 size = (((unsigned long)stop - (unsigned long)start)
67350 / sizeof(struct jump_entry));
67351+ pax_open_kernel();
67352 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
67353+ pax_close_kernel();
bc901d79
MT
67354 }
67355
15a11c5b 67356 static void jump_label_update(struct jump_label_key *key, int enable);
5e856224 67357@@ -340,10 +342,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
15a11c5b
MT
67358 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
67359 struct jump_entry *iter;
67360
bc901d79 67361+ pax_open_kernel();
15a11c5b
MT
67362 for (iter = iter_start; iter < iter_stop; iter++) {
67363 if (within_module_init(iter->code, mod))
67364 iter->code = 0;
67365 }
bc901d79 67366+ pax_close_kernel();
15a11c5b 67367 }
bc901d79 67368
15a11c5b 67369 static int
fe2de317
MT
67370diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
67371index 079f1d3..a407562 100644
67372--- a/kernel/kallsyms.c
67373+++ b/kernel/kallsyms.c
58c5fc13
MT
67374@@ -11,6 +11,9 @@
67375 * Changed the compression method from stem compression to "table lookup"
67376 * compression (see scripts/kallsyms.c for a more complete description)
67377 */
67378+#ifdef CONFIG_GRKERNSEC_HIDESYM
67379+#define __INCLUDED_BY_HIDESYM 1
67380+#endif
67381 #include <linux/kallsyms.h>
67382 #include <linux/module.h>
67383 #include <linux/init.h>
fe2de317 67384@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
58c5fc13
MT
67385
67386 static inline int is_kernel_inittext(unsigned long addr)
67387 {
67388+ if (system_state != SYSTEM_BOOTING)
67389+ return 0;
67390+
67391 if (addr >= (unsigned long)_sinittext
67392 && addr <= (unsigned long)_einittext)
67393 return 1;
57199397
MT
67394 return 0;
67395 }
58c5fc13 67396
ae4e228f 67397+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
df50ba0c 67398+#ifdef CONFIG_MODULES
57199397
MT
67399+static inline int is_module_text(unsigned long addr)
67400+{
67401+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
67402+ return 1;
67403+
67404+ addr = ktla_ktva(addr);
67405+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
67406+}
67407+#else
67408+static inline int is_module_text(unsigned long addr)
67409+{
67410+ return 0;
67411+}
67412+#endif
df50ba0c 67413+#endif
58c5fc13 67414+
57199397
MT
67415 static inline int is_kernel_text(unsigned long addr)
67416 {
67417 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
fe2de317 67418@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
57199397
MT
67419
67420 static inline int is_kernel(unsigned long addr)
67421 {
ae4e228f 67422+
57199397
MT
67423+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67424+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
58c5fc13 67425+ return 1;
ae4e228f 67426+
57199397
MT
67427+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
67428+#else
67429 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
ae4e228f 67430+#endif
58c5fc13 67431+
58c5fc13 67432 return 1;
66a7e928 67433 return in_gate_area_no_mm(addr);
57199397
MT
67434 }
67435
67436 static int is_ksym_addr(unsigned long addr)
67437 {
67438+
67439+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67440+ if (is_module_text(addr))
67441+ return 0;
67442+#endif
67443+
67444 if (all_var)
67445 return is_kernel(addr);
67446
fe2de317 67447@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
58c5fc13
MT
67448
67449 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
67450 {
67451- iter->name[0] = '\0';
67452 iter->nameoff = get_symbol_offset(new_pos);
67453 iter->pos = new_pos;
67454 }
fe2de317 67455@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
ae4e228f
MT
67456 {
67457 struct kallsym_iter *iter = m->private;
67458
67459+#ifdef CONFIG_GRKERNSEC_HIDESYM
67460+ if (current_uid())
67461+ return 0;
67462+#endif
67463+
67464 /* Some debugging symbols have no name. Ignore them. */
67465 if (!iter->name[0])
67466 return 0;
fe2de317 67467@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
58c5fc13
MT
67468 struct kallsym_iter *iter;
67469 int ret;
67470
67471- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
67472+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
67473 if (!iter)
67474 return -ENOMEM;
67475 reset_iter(iter, 0);
fe2de317 67476diff --git a/kernel/kexec.c b/kernel/kexec.c
5e856224 67477index 7b08867..3bac516 100644
fe2de317
MT
67478--- a/kernel/kexec.c
67479+++ b/kernel/kexec.c
5e856224 67480@@ -1047,7 +1047,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
6e9df6a3
MT
67481 unsigned long flags)
67482 {
67483 struct compat_kexec_segment in;
67484- struct kexec_segment out, __user *ksegments;
67485+ struct kexec_segment out;
67486+ struct kexec_segment __user *ksegments;
67487 unsigned long i, result;
67488
67489 /* Don't allow clients that don't understand the native
fe2de317 67490diff --git a/kernel/kmod.c b/kernel/kmod.c
5e856224 67491index a3a46cb..f2e42f8 100644
fe2de317
MT
67492--- a/kernel/kmod.c
67493+++ b/kernel/kmod.c
5e856224 67494@@ -75,13 +75,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
71d190be
MT
67495 * If module auto-loading support is disabled then this function
67496 * becomes a no-operation.
67497 */
67498-int __request_module(bool wait, const char *fmt, ...)
67499+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
67500 {
67501- va_list args;
67502 char module_name[MODULE_NAME_LEN];
67503 unsigned int max_modprobes;
67504 int ret;
67505- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
67506+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
67507 static char *envp[] = { "HOME=/",
67508 "TERM=linux",
67509 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
5e856224 67510@@ -90,9 +89,7 @@ int __request_module(bool wait, const char *fmt, ...)
71d190be
MT
67511 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
67512 static int kmod_loop_msg;
67513
67514- va_start(args, fmt);
67515- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
67516- va_end(args);
67517+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
67518 if (ret >= MODULE_NAME_LEN)
67519 return -ENAMETOOLONG;
67520
5e856224 67521@@ -100,6 +97,20 @@ int __request_module(bool wait, const char *fmt, ...)
ae4e228f
MT
67522 if (ret)
67523 return ret;
58c5fc13
MT
67524
67525+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71d190be
MT
67526+ if (!current_uid()) {
67527+ /* hack to workaround consolekit/udisks stupidity */
67528+ read_lock(&tasklist_lock);
67529+ if (!strcmp(current->comm, "mount") &&
67530+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
67531+ read_unlock(&tasklist_lock);
67532+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
67533+ return -EPERM;
67534+ }
67535+ read_unlock(&tasklist_lock);
58c5fc13
MT
67536+ }
67537+#endif
67538+
67539 /* If modprobe needs a service that is in a module, we get a recursive
67540 * loop. Limit the number of running kmod threads to max_threads/2 or
67541 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
5e856224 67542@@ -135,6 +146,47 @@ int __request_module(bool wait, const char *fmt, ...)
71d190be
MT
67543 atomic_dec(&kmod_concurrent);
67544 return ret;
67545 }
67546+
67547+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
67548+{
67549+ va_list args;
67550+ int ret;
67551+
67552+ va_start(args, fmt);
67553+ ret = ____request_module(wait, module_param, fmt, args);
67554+ va_end(args);
67555+
67556+ return ret;
67557+}
67558+
67559+int __request_module(bool wait, const char *fmt, ...)
67560+{
67561+ va_list args;
67562+ int ret;
67563+
67564+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67565+ if (current_uid()) {
67566+ char module_param[MODULE_NAME_LEN];
67567+
67568+ memset(module_param, 0, sizeof(module_param));
67569+
67570+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
67571+
67572+ va_start(args, fmt);
67573+ ret = ____request_module(wait, module_param, fmt, args);
67574+ va_end(args);
67575+
67576+ return ret;
67577+ }
67578+#endif
67579+
67580+ va_start(args, fmt);
67581+ ret = ____request_module(wait, NULL, fmt, args);
67582+ va_end(args);
67583+
67584+ return ret;
67585+}
67586+
67587 EXPORT_SYMBOL(__request_module);
67588 #endif /* CONFIG_MODULES */
67589
5e856224 67590@@ -224,7 +276,7 @@ static int wait_for_helper(void *data)
6e9df6a3
MT
67591 *
67592 * Thus the __user pointer cast is valid here.
67593 */
67594- sys_wait4(pid, (int __user *)&ret, 0, NULL);
67595+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
67596
67597 /*
67598 * If ret is 0, either ____call_usermodehelper failed and the
fe2de317 67599diff --git a/kernel/kprobes.c b/kernel/kprobes.c
5e856224 67600index c62b854..cb67968 100644
fe2de317
MT
67601--- a/kernel/kprobes.c
67602+++ b/kernel/kprobes.c
67603@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
58c5fc13
MT
67604 * kernel image and loaded module images reside. This is required
67605 * so x86_64 can correctly handle the %rip-relative fixups.
67606 */
67607- kip->insns = module_alloc(PAGE_SIZE);
67608+ kip->insns = module_alloc_exec(PAGE_SIZE);
67609 if (!kip->insns) {
67610 kfree(kip);
67611 return NULL;
fe2de317 67612@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
ae4e228f 67613 */
df50ba0c 67614 if (!list_is_singular(&kip->list)) {
ae4e228f 67615 list_del(&kip->list);
58c5fc13
MT
67616- module_free(NULL, kip->insns);
67617+ module_free_exec(NULL, kip->insns);
67618 kfree(kip);
67619 }
67620 return 1;
4c928ab7 67621@@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
df50ba0c
MT
67622 {
67623 int i, err = 0;
67624 unsigned long offset = 0, size = 0;
67625- char *modname, namebuf[128];
67626+ char *modname, namebuf[KSYM_NAME_LEN];
67627 const char *symbol_name;
67628 void *addr;
67629 struct kprobe_blackpoint *kb;
4c928ab7 67630@@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
df50ba0c
MT
67631 const char *sym = NULL;
67632 unsigned int i = *(loff_t *) v;
67633 unsigned long offset = 0;
67634- char *modname, namebuf[128];
67635+ char *modname, namebuf[KSYM_NAME_LEN];
67636
67637 head = &kprobe_table[i];
67638 preempt_disable();
5e856224
MT
67639diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
67640index 4e316e1..5501eef 100644
67641--- a/kernel/ksysfs.c
67642+++ b/kernel/ksysfs.c
67643@@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
67644 {
67645 if (count+1 > UEVENT_HELPER_PATH_LEN)
67646 return -ENOENT;
67647+ if (!capable(CAP_SYS_ADMIN))
67648+ return -EPERM;
67649 memcpy(uevent_helper, buf, count);
67650 uevent_helper[count] = '\0';
67651 if (count && uevent_helper[count-1] == '\n')
fe2de317 67652diff --git a/kernel/lockdep.c b/kernel/lockdep.c
5e856224 67653index 8889f7d..95319b7 100644
fe2de317
MT
67654--- a/kernel/lockdep.c
67655+++ b/kernel/lockdep.c
5e856224 67656@@ -590,6 +590,10 @@ static int static_obj(void *obj)
df50ba0c
MT
67657 end = (unsigned long) &_end,
67658 addr = (unsigned long) obj;
58c5fc13
MT
67659
67660+#ifdef CONFIG_PAX_KERNEXEC
ae4e228f 67661+ start = ktla_ktva(start);
58c5fc13
MT
67662+#endif
67663+
67664 /*
67665 * static variable?
67666 */
5e856224 67667@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
ae4e228f
MT
67668 if (!static_obj(lock->key)) {
67669 debug_locks_off();
67670 printk("INFO: trying to register non-static key.\n");
67671+ printk("lock:%pS key:%pS.\n", lock, lock->key);
67672 printk("the code is fine but needs lockdep annotation.\n");
67673 printk("turning off the locking correctness validator.\n");
67674 dump_stack();
4c928ab7 67675@@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
bc901d79
MT
67676 if (!class)
67677 return 0;
67678 }
67679- atomic_inc((atomic_t *)&class->ops);
67680+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
67681 if (very_verbose(class)) {
67682 printk("\nacquire class [%p] %s", class->key, class->name);
67683 if (class->name_version > 1)
fe2de317 67684diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
4c928ab7 67685index 91c32a0..b2c71c5 100644
fe2de317
MT
67686--- a/kernel/lockdep_proc.c
67687+++ b/kernel/lockdep_proc.c
67688@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
df50ba0c
MT
67689
67690 static void print_name(struct seq_file *m, struct lock_class *class)
67691 {
67692- char str[128];
67693+ char str[KSYM_NAME_LEN];
67694 const char *name = class->name;
67695
67696 if (!name) {
fe2de317 67697diff --git a/kernel/module.c b/kernel/module.c
5e856224 67698index 3d56b6f..2a22bd0 100644
fe2de317
MT
67699--- a/kernel/module.c
67700+++ b/kernel/module.c
15a11c5b 67701@@ -58,6 +58,7 @@
71d190be
MT
67702 #include <linux/jump_label.h>
67703 #include <linux/pfn.h>
15a11c5b 67704 #include <linux/bsearch.h>
71d190be
MT
67705+#include <linux/grsecurity.h>
67706
67707 #define CREATE_TRACE_POINTS
67708 #include <trace/events/module.h>
5e856224 67709@@ -113,7 +114,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
58c5fc13 67710
57199397
MT
67711 /* Bounds of module allocation, for speeding __module_address.
67712 * Protected by module_mutex. */
58c5fc13
MT
67713-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
67714+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
67715+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
67716
67717 int register_module_notifier(struct notifier_block * nb)
67718 {
5e856224 67719@@ -277,7 +279,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
58c5fc13
MT
67720 return true;
67721
67722 list_for_each_entry_rcu(mod, &modules, list) {
67723- struct symsearch arr[] = {
67724+ struct symsearch modarr[] = {
67725 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
67726 NOT_GPL_ONLY, false },
67727 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
5e856224 67728@@ -299,7 +301,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
58c5fc13
MT
67729 #endif
67730 };
67731
67732- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
67733+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
67734 return true;
67735 }
67736 return false;
5e856224 67737@@ -431,7 +433,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
df50ba0c
MT
67738 static int percpu_modalloc(struct module *mod,
67739 unsigned long size, unsigned long align)
ae4e228f 67740 {
58c5fc13
MT
67741- if (align > PAGE_SIZE) {
67742+ if (align-1 >= PAGE_SIZE) {
67743 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
df50ba0c 67744 mod->name, align, PAGE_SIZE);
58c5fc13 67745 align = PAGE_SIZE;
5e856224
MT
67746@@ -1001,7 +1003,7 @@ struct module_attribute module_uevent =
67747 static ssize_t show_coresize(struct module_attribute *mattr,
67748 struct module_kobject *mk, char *buffer)
67749 {
67750- return sprintf(buffer, "%u\n", mk->mod->core_size);
67751+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
67752 }
67753
67754 static struct module_attribute modinfo_coresize =
67755@@ -1010,7 +1012,7 @@ static struct module_attribute modinfo_coresize =
67756 static ssize_t show_initsize(struct module_attribute *mattr,
67757 struct module_kobject *mk, char *buffer)
67758 {
67759- return sprintf(buffer, "%u\n", mk->mod->init_size);
67760+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
67761 }
67762
67763 static struct module_attribute modinfo_initsize =
67764@@ -1224,7 +1226,7 @@ resolve_symbol_wait(struct module *mod,
c52201e0
MT
67765 */
67766 #ifdef CONFIG_SYSFS
67767
67768-#ifdef CONFIG_KALLSYMS
67769+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67770 static inline bool sect_empty(const Elf_Shdr *sect)
67771 {
67772 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
5e856224 67773@@ -1690,21 +1692,21 @@ static void set_section_ro_nx(void *base,
16454cff 67774
15a11c5b
MT
67775 static void unset_module_core_ro_nx(struct module *mod)
67776 {
67777- set_page_attributes(mod->module_core + mod->core_text_size,
67778- mod->module_core + mod->core_size,
67779+ set_page_attributes(mod->module_core_rw,
67780+ mod->module_core_rw + mod->core_size_rw,
67781 set_memory_x);
67782- set_page_attributes(mod->module_core,
67783- mod->module_core + mod->core_ro_size,
67784+ set_page_attributes(mod->module_core_rx,
67785+ mod->module_core_rx + mod->core_size_rx,
67786 set_memory_rw);
67787 }
16454cff 67788
15a11c5b
MT
67789 static void unset_module_init_ro_nx(struct module *mod)
67790 {
67791- set_page_attributes(mod->module_init + mod->init_text_size,
67792- mod->module_init + mod->init_size,
67793+ set_page_attributes(mod->module_init_rw,
67794+ mod->module_init_rw + mod->init_size_rw,
67795 set_memory_x);
67796- set_page_attributes(mod->module_init,
67797- mod->module_init + mod->init_ro_size,
67798+ set_page_attributes(mod->module_init_rx,
67799+ mod->module_init_rx + mod->init_size_rx,
67800 set_memory_rw);
16454cff
MT
67801 }
67802
5e856224 67803@@ -1715,14 +1717,14 @@ void set_all_modules_text_rw(void)
16454cff
MT
67804
67805 mutex_lock(&module_mutex);
67806 list_for_each_entry_rcu(mod, &modules, list) {
67807- if ((mod->module_core) && (mod->core_text_size)) {
67808- set_page_attributes(mod->module_core,
67809- mod->module_core + mod->core_text_size,
67810+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
67811+ set_page_attributes(mod->module_core_rx,
67812+ mod->module_core_rx + mod->core_size_rx,
67813 set_memory_rw);
67814 }
67815- if ((mod->module_init) && (mod->init_text_size)) {
67816- set_page_attributes(mod->module_init,
67817- mod->module_init + mod->init_text_size,
67818+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
67819+ set_page_attributes(mod->module_init_rx,
67820+ mod->module_init_rx + mod->init_size_rx,
67821 set_memory_rw);
67822 }
67823 }
5e856224 67824@@ -1736,14 +1738,14 @@ void set_all_modules_text_ro(void)
16454cff
MT
67825
67826 mutex_lock(&module_mutex);
67827 list_for_each_entry_rcu(mod, &modules, list) {
67828- if ((mod->module_core) && (mod->core_text_size)) {
67829- set_page_attributes(mod->module_core,
67830- mod->module_core + mod->core_text_size,
67831+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
67832+ set_page_attributes(mod->module_core_rx,
67833+ mod->module_core_rx + mod->core_size_rx,
67834 set_memory_ro);
67835 }
67836- if ((mod->module_init) && (mod->init_text_size)) {
67837- set_page_attributes(mod->module_init,
67838- mod->module_init + mod->init_text_size,
67839+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
67840+ set_page_attributes(mod->module_init_rx,
67841+ mod->module_init_rx + mod->init_size_rx,
67842 set_memory_ro);
67843 }
67844 }
5e856224 67845@@ -1789,16 +1791,19 @@ static void free_module(struct module *mod)
58c5fc13
MT
67846
67847 /* This may be NULL, but that's OK */
15a11c5b 67848 unset_module_init_ro_nx(mod);
58c5fc13
MT
67849- module_free(mod, mod->module_init);
67850+ module_free(mod, mod->module_init_rw);
67851+ module_free_exec(mod, mod->module_init_rx);
67852 kfree(mod->args);
df50ba0c 67853 percpu_modfree(mod);
6892158b 67854
58c5fc13
MT
67855 /* Free lock-classes: */
67856- lockdep_free_key_range(mod->module_core, mod->core_size);
67857+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
67858+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
67859
67860 /* Finally, free the core (containing the module structure) */
15a11c5b 67861 unset_module_core_ro_nx(mod);
58c5fc13
MT
67862- module_free(mod, mod->module_core);
67863+ module_free_exec(mod, mod->module_core_rx);
67864+ module_free(mod, mod->module_core_rw);
58c5fc13 67865
ae4e228f
MT
67866 #ifdef CONFIG_MPU
67867 update_protections(current->mm);
5e856224 67868@@ -1867,10 +1872,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
71d190be
MT
67869 unsigned int i;
67870 int ret = 0;
67871 const struct kernel_symbol *ksym;
67872+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67873+ int is_fs_load = 0;
67874+ int register_filesystem_found = 0;
8308f9c9 67875+ char *p;
71d190be 67876+
8308f9c9
MT
67877+ p = strstr(mod->args, "grsec_modharden_fs");
67878+ if (p) {
67879+ char *endptr = p + strlen("grsec_modharden_fs");
67880+ /* copy \0 as well */
67881+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
71d190be 67882+ is_fs_load = 1;
8308f9c9 67883+ }
71d190be
MT
67884+#endif
67885
67886 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
67887 const char *name = info->strtab + sym[i].st_name;
67888
67889+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67890+ /* it's a real shame this will never get ripped and copied
67891+ upstream! ;(
67892+ */
67893+ if (is_fs_load && !strcmp(name, "register_filesystem"))
67894+ register_filesystem_found = 1;
67895+#endif
67896+
67897 switch (sym[i].st_shndx) {
67898 case SHN_COMMON:
67899 /* We compiled with -fno-common. These are not
5e856224 67900@@ -1891,7 +1917,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
6892158b 67901 ksym = resolve_symbol_wait(mod, info, name);
58c5fc13 67902 /* Ok if resolved. */
57199397 67903 if (ksym && !IS_ERR(ksym)) {
ae4e228f 67904+ pax_open_kernel();
58c5fc13 67905 sym[i].st_value = ksym->value;
ae4e228f 67906+ pax_close_kernel();
58c5fc13
MT
67907 break;
67908 }
67909
5e856224 67910@@ -1910,11 +1938,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
df50ba0c 67911 secbase = (unsigned long)mod_percpu(mod);
58c5fc13 67912 else
6892158b 67913 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
ae4e228f 67914+ pax_open_kernel();
58c5fc13 67915 sym[i].st_value += secbase;
ae4e228f 67916+ pax_close_kernel();
58c5fc13
MT
67917 break;
67918 }
67919 }
71d190be
MT
67920
67921+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67922+ if (is_fs_load && !register_filesystem_found) {
67923+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
67924+ ret = -EPERM;
67925+ }
67926+#endif
67927+
67928 return ret;
67929 }
67930
5e856224 67931@@ -2018,22 +2055,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
58c5fc13 67932 || s->sh_entsize != ~0UL
6892158b 67933 || strstarts(sname, ".init"))
58c5fc13
MT
67934 continue;
67935- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
67936+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
67937+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
67938+ else
67939+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
5e856224 67940 pr_debug("\t%s\n", sname);
58c5fc13 67941 }
16454cff
MT
67942- switch (m) {
67943- case 0: /* executable */
67944- mod->core_size = debug_align(mod->core_size);
58c5fc13 67945- mod->core_text_size = mod->core_size;
16454cff
MT
67946- break;
67947- case 1: /* RO: text and ro-data */
67948- mod->core_size = debug_align(mod->core_size);
67949- mod->core_ro_size = mod->core_size;
67950- break;
67951- case 3: /* whole core */
67952- mod->core_size = debug_align(mod->core_size);
67953- break;
67954- }
58c5fc13
MT
67955 }
67956
5e856224
MT
67957 pr_debug("Init section allocation order:\n");
67958@@ -2047,23 +2074,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
58c5fc13 67959 || s->sh_entsize != ~0UL
6892158b 67960 || !strstarts(sname, ".init"))
58c5fc13
MT
67961 continue;
67962- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
67963- | INIT_OFFSET_MASK);
67964+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
67965+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
67966+ else
67967+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
67968+ s->sh_entsize |= INIT_OFFSET_MASK;
5e856224 67969 pr_debug("\t%s\n", sname);
58c5fc13 67970 }
16454cff
MT
67971- switch (m) {
67972- case 0: /* executable */
67973- mod->init_size = debug_align(mod->init_size);
58c5fc13 67974- mod->init_text_size = mod->init_size;
16454cff
MT
67975- break;
67976- case 1: /* RO: text and ro-data */
67977- mod->init_size = debug_align(mod->init_size);
67978- mod->init_ro_size = mod->init_size;
67979- break;
67980- case 3: /* whole init */
67981- mod->init_size = debug_align(mod->init_size);
67982- break;
67983- }
58c5fc13
MT
67984 }
67985 }
67986
5e856224 67987@@ -2235,7 +2252,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
58c5fc13 67988
ae4e228f
MT
67989 /* Put symbol section at end of init part of module. */
67990 symsect->sh_flags |= SHF_ALLOC;
67991- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
67992+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
6892158b 67993 info->index.sym) | INIT_OFFSET_MASK;
5e856224 67994 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
ae4e228f 67995
5e856224 67996@@ -2250,13 +2267,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
ae4e228f
MT
67997 }
67998
67999 /* Append room for core symbols at end of core part. */
6892158b 68000- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
5e856224
MT
68001- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
68002- mod->core_size += strtab_size;
6892158b 68003+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
5e856224
MT
68004+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
68005+ mod->core_size_rx += strtab_size;
ae4e228f
MT
68006
68007 /* Put string table section at end of init part of module. */
68008 strsect->sh_flags |= SHF_ALLOC;
68009- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
68010+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
6892158b 68011 info->index.str) | INIT_OFFSET_MASK;
5e856224 68012 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
ae4e228f 68013 }
5e856224 68014@@ -2274,12 +2291,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
6892158b
MT
68015 /* Make sure we get permanent strtab: don't use info->strtab. */
68016 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
58c5fc13 68017
ae4e228f
MT
68018+ pax_open_kernel();
68019+
58c5fc13 68020 /* Set types up while we still have access to sections. */
ae4e228f 68021 for (i = 0; i < mod->num_symtab; i++)
6892158b 68022 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
ae4e228f 68023
6892158b 68024- mod->core_symtab = dst = mod->module_core + info->symoffs;
5e856224 68025- mod->core_strtab = s = mod->module_core + info->stroffs;
6892158b 68026+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
5e856224 68027+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
ae4e228f
MT
68028 src = mod->symtab;
68029 *dst = *src;
5e856224
MT
68030 *s++ = 0;
68031@@ -2292,6 +2311,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
68032 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
ae4e228f
MT
68033 }
68034 mod->core_num_syms = ndst;
58c5fc13 68035+
ae4e228f 68036+ pax_close_kernel();
58c5fc13
MT
68037 }
68038 #else
6892158b 68039 static inline void layout_symtab(struct module *mod, struct load_info *info)
5e856224 68040@@ -2325,17 +2346,33 @@ void * __weak module_alloc(unsigned long size)
6e9df6a3 68041 return size == 0 ? NULL : vmalloc_exec(size);
58c5fc13
MT
68042 }
68043
68044-static void *module_alloc_update_bounds(unsigned long size)
68045+static void *module_alloc_update_bounds_rw(unsigned long size)
68046 {
68047 void *ret = module_alloc(size);
68048
68049 if (ret) {
57199397 68050 mutex_lock(&module_mutex);
58c5fc13
MT
68051 /* Update module bounds. */
68052- if ((unsigned long)ret < module_addr_min)
68053- module_addr_min = (unsigned long)ret;
68054- if ((unsigned long)ret + size > module_addr_max)
68055- module_addr_max = (unsigned long)ret + size;
68056+ if ((unsigned long)ret < module_addr_min_rw)
68057+ module_addr_min_rw = (unsigned long)ret;
68058+ if ((unsigned long)ret + size > module_addr_max_rw)
68059+ module_addr_max_rw = (unsigned long)ret + size;
57199397 68060+ mutex_unlock(&module_mutex);
58c5fc13
MT
68061+ }
68062+ return ret;
68063+}
68064+
68065+static void *module_alloc_update_bounds_rx(unsigned long size)
68066+{
68067+ void *ret = module_alloc_exec(size);
68068+
68069+ if (ret) {
57199397 68070+ mutex_lock(&module_mutex);
58c5fc13
MT
68071+ /* Update module bounds. */
68072+ if ((unsigned long)ret < module_addr_min_rx)
68073+ module_addr_min_rx = (unsigned long)ret;
68074+ if ((unsigned long)ret + size > module_addr_max_rx)
68075+ module_addr_max_rx = (unsigned long)ret + size;
57199397 68076 mutex_unlock(&module_mutex);
58c5fc13
MT
68077 }
68078 return ret;
5e856224 68079@@ -2512,8 +2549,14 @@ static struct module *setup_load_info(struct load_info *info)
fe2de317
MT
68080 static int check_modinfo(struct module *mod, struct load_info *info)
68081 {
68082 const char *modmagic = get_modinfo(info, "vermagic");
68083+ const char *license = get_modinfo(info, "license");
68084 int err;
68085
68086+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
68087+ if (!license || !license_is_gpl_compatible(license))
68088+ return -ENOEXEC;
68089+#endif
68090+
68091 /* This is allowed: modprobe --force will invalidate it. */
68092 if (!modmagic) {
68093 err = try_to_force_load(mod, "bad vermagic");
5e856224 68094@@ -2536,7 +2579,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
fe2de317
MT
68095 }
68096
68097 /* Set up license info based on the info section */
68098- set_license(mod, get_modinfo(info, "license"));
68099+ set_license(mod, license);
68100
68101 return 0;
68102 }
5e856224 68103@@ -2630,7 +2673,7 @@ static int move_module(struct module *mod, struct load_info *info)
6892158b 68104 void *ptr;
58c5fc13
MT
68105
68106 /* Do the allocs. */
68107- ptr = module_alloc_update_bounds(mod->core_size);
68108+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
68109 /*
68110 * The pointer to this block is stored in the module structure
68111 * which is inside the block. Just mark it as not being a
5e856224 68112@@ -2640,23 +2683,50 @@ static int move_module(struct module *mod, struct load_info *info)
6892158b
MT
68113 if (!ptr)
68114 return -ENOMEM;
68115
58c5fc13
MT
68116- memset(ptr, 0, mod->core_size);
68117- mod->module_core = ptr;
68118+ memset(ptr, 0, mod->core_size_rw);
68119+ mod->module_core_rw = ptr;
68120
68121- ptr = module_alloc_update_bounds(mod->init_size);
68122+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
68123 /*
68124 * The pointer to this block is stored in the module structure
68125 * which is inside the block. This block doesn't need to be
68126 * scanned as it contains data and code that will be freed
68127 * after the module is initialized.
68128 */
68129- kmemleak_ignore(ptr);
68130- if (!ptr && mod->init_size) {
6892158b 68131- module_free(mod, mod->module_core);
58c5fc13
MT
68132+ kmemleak_not_leak(ptr);
68133+ if (!ptr && mod->init_size_rw) {
6892158b 68134+ module_free(mod, mod->module_core_rw);
16454cff
MT
68135 return -ENOMEM;
68136 }
68137- memset(ptr, 0, mod->init_size);
68138- mod->module_init = ptr;
58c5fc13
MT
68139+ memset(ptr, 0, mod->init_size_rw);
68140+ mod->module_init_rw = ptr;
68141+
68142+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
68143+ kmemleak_not_leak(ptr);
68144+ if (!ptr) {
6892158b
MT
68145+ module_free(mod, mod->module_init_rw);
68146+ module_free(mod, mod->module_core_rw);
c52201e0
MT
68147+ return -ENOMEM;
68148+ }
58c5fc13 68149+
ae4e228f 68150+ pax_open_kernel();
58c5fc13 68151+ memset(ptr, 0, mod->core_size_rx);
ae4e228f 68152+ pax_close_kernel();
58c5fc13
MT
68153+ mod->module_core_rx = ptr;
68154+
68155+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
68156+ kmemleak_not_leak(ptr);
68157+ if (!ptr && mod->init_size_rx) {
6892158b
MT
68158+ module_free_exec(mod, mod->module_core_rx);
68159+ module_free(mod, mod->module_init_rw);
68160+ module_free(mod, mod->module_core_rw);
16454cff
MT
68161+ return -ENOMEM;
68162+ }
58c5fc13 68163+
ae4e228f 68164+ pax_open_kernel();
58c5fc13 68165+ memset(ptr, 0, mod->init_size_rx);
ae4e228f 68166+ pax_close_kernel();
58c5fc13
MT
68167+ mod->module_init_rx = ptr;
68168
68169 /* Transfer each section which specifies SHF_ALLOC */
5e856224
MT
68170 pr_debug("final section addresses:\n");
68171@@ -2667,16 +2737,45 @@ static int move_module(struct module *mod, struct load_info *info)
6892158b 68172 if (!(shdr->sh_flags & SHF_ALLOC))
58c5fc13
MT
68173 continue;
68174
6892158b 68175- if (shdr->sh_entsize & INIT_OFFSET_MASK)
58c5fc13 68176- dest = mod->module_init
6892158b 68177- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58c5fc13 68178- else
6892158b
MT
68179- dest = mod->module_core + shdr->sh_entsize;
68180+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
68181+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
58c5fc13 68182+ dest = mod->module_init_rw
6892158b 68183+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58c5fc13
MT
68184+ else
68185+ dest = mod->module_init_rx
6892158b 68186+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58c5fc13 68187+ } else {
6892158b
MT
68188+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
68189+ dest = mod->module_core_rw + shdr->sh_entsize;
58c5fc13 68190+ else
6892158b 68191+ dest = mod->module_core_rx + shdr->sh_entsize;
58c5fc13
MT
68192+ }
68193+
6892158b
MT
68194+ if (shdr->sh_type != SHT_NOBITS) {
68195+
58c5fc13 68196+#ifdef CONFIG_PAX_KERNEXEC
bc901d79
MT
68197+#ifdef CONFIG_X86_64
68198+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
68199+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
68200+#endif
6892158b 68201+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
ae4e228f 68202+ pax_open_kernel();
6892158b 68203+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
ae4e228f 68204+ pax_close_kernel();
58c5fc13
MT
68205+ } else
68206+#endif
6892158b
MT
68207
68208- if (shdr->sh_type != SHT_NOBITS)
68209 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
58c5fc13
MT
68210+ }
68211 /* Update sh_addr to point to copy in image. */
6892158b 68212- shdr->sh_addr = (unsigned long)dest;
58c5fc13
MT
68213+
68214+#ifdef CONFIG_PAX_KERNEXEC
6892158b
MT
68215+ if (shdr->sh_flags & SHF_EXECINSTR)
68216+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
58c5fc13
MT
68217+ else
68218+#endif
68219+
6892158b 68220+ shdr->sh_addr = (unsigned long)dest;
5e856224
MT
68221 pr_debug("\t0x%lx %s\n",
68222 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
58c5fc13 68223 }
5e856224 68224@@ -2727,12 +2826,12 @@ static void flush_module_icache(const struct module *mod)
58c5fc13
MT
68225 * Do it before processing of module parameters, so the module
68226 * can provide parameter accessor functions of its own.
68227 */
68228- if (mod->module_init)
68229- flush_icache_range((unsigned long)mod->module_init,
68230- (unsigned long)mod->module_init
68231- + mod->init_size);
68232- flush_icache_range((unsigned long)mod->module_core,
68233- (unsigned long)mod->module_core + mod->core_size);
68234+ if (mod->module_init_rx)
68235+ flush_icache_range((unsigned long)mod->module_init_rx,
68236+ (unsigned long)mod->module_init_rx
68237+ + mod->init_size_rx);
68238+ flush_icache_range((unsigned long)mod->module_core_rx,
68239+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
68240
68241 set_fs(old_fs);
6892158b 68242 }
5e856224
MT
68243@@ -2802,8 +2901,10 @@ out:
68244 static void module_deallocate(struct module *mod, struct load_info *info)
6892158b 68245 {
6892158b 68246 percpu_modfree(mod);
58c5fc13 68247- module_free(mod, mod->module_init);
58c5fc13
MT
68248- module_free(mod, mod->module_core);
68249+ module_free_exec(mod, mod->module_init_rx);
58c5fc13 68250+ module_free_exec(mod, mod->module_core_rx);
58c5fc13 68251+ module_free(mod, mod->module_init_rw);
58c5fc13 68252+ module_free(mod, mod->module_core_rw);
6892158b
MT
68253 }
68254
6e9df6a3 68255 int __weak module_finalize(const Elf_Ehdr *hdr,
5e856224 68256@@ -2867,9 +2968,38 @@ static struct module *load_module(void __user *umod,
71d190be
MT
68257 if (err)
68258 goto free_unload;
68259
68260+ /* Now copy in args */
68261+ mod->args = strndup_user(uargs, ~0UL >> 1);
68262+ if (IS_ERR(mod->args)) {
68263+ err = PTR_ERR(mod->args);
68264+ goto free_unload;
68265+ }
68266+
68267 /* Set up MODINFO_ATTR fields */
68268 setup_modinfo(mod, &info);
68269
68270+#ifdef CONFIG_GRKERNSEC_MODHARDEN
68271+ {
68272+ char *p, *p2;
68273+
68274+ if (strstr(mod->args, "grsec_modharden_netdev")) {
68275+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
68276+ err = -EPERM;
68277+ goto free_modinfo;
68278+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
68279+ p += strlen("grsec_modharden_normal");
68280+ p2 = strstr(p, "_");
68281+ if (p2) {
68282+ *p2 = '\0';
68283+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
68284+ *p2 = '_';
68285+ }
68286+ err = -EPERM;
68287+ goto free_modinfo;
68288+ }
68289+ }
68290+#endif
68291+
68292 /* Fix up syms, so that st_value is a pointer to location. */
68293 err = simplify_symbols(mod, &info);
68294 if (err < 0)
5e856224 68295@@ -2885,13 +3015,6 @@ static struct module *load_module(void __user *umod,
71d190be
MT
68296
68297 flush_module_icache(mod);
68298
68299- /* Now copy in args */
68300- mod->args = strndup_user(uargs, ~0UL >> 1);
68301- if (IS_ERR(mod->args)) {
68302- err = PTR_ERR(mod->args);
68303- goto free_arch_cleanup;
68304- }
68305-
68306 /* Mark state as coming so strong_try_module_get() ignores us. */
68307 mod->state = MODULE_STATE_COMING;
68308
5e856224 68309@@ -2948,11 +3071,10 @@ static struct module *load_module(void __user *umod,
71d190be
MT
68310 unlock:
68311 mutex_unlock(&module_mutex);
68312 synchronize_sched();
68313- kfree(mod->args);
68314- free_arch_cleanup:
68315 module_arch_cleanup(mod);
68316 free_modinfo:
68317 free_modinfo(mod);
68318+ kfree(mod->args);
68319 free_unload:
68320 module_unload_free(mod);
68321 free_module:
5e856224 68322@@ -2993,16 +3115,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
16454cff
MT
68323 MODULE_STATE_COMING, mod);
68324
68325 /* Set RO and NX regions for core */
68326- set_section_ro_nx(mod->module_core,
68327- mod->core_text_size,
68328- mod->core_ro_size,
68329- mod->core_size);
68330+ set_section_ro_nx(mod->module_core_rx,
68331+ mod->core_size_rx,
68332+ mod->core_size_rx,
68333+ mod->core_size_rx);
68334
68335 /* Set RO and NX regions for init */
68336- set_section_ro_nx(mod->module_init,
68337- mod->init_text_size,
68338- mod->init_ro_size,
68339- mod->init_size);
68340+ set_section_ro_nx(mod->module_init_rx,
68341+ mod->init_size_rx,
68342+ mod->init_size_rx,
68343+ mod->init_size_rx);
68344
68345 do_mod_ctors(mod);
68346 /* Start the module */
5e856224 68347@@ -3048,11 +3170,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
ae4e228f
MT
68348 mod->strtab = mod->core_strtab;
68349 #endif
15a11c5b 68350 unset_module_init_ro_nx(mod);
58c5fc13
MT
68351- module_free(mod, mod->module_init);
68352- mod->module_init = NULL;
68353- mod->init_size = 0;
15a11c5b 68354- mod->init_ro_size = 0;
58c5fc13
MT
68355- mod->init_text_size = 0;
68356+ module_free(mod, mod->module_init_rw);
68357+ module_free_exec(mod, mod->module_init_rx);
68358+ mod->module_init_rw = NULL;
68359+ mod->module_init_rx = NULL;
68360+ mod->init_size_rw = 0;
68361+ mod->init_size_rx = 0;
68362 mutex_unlock(&module_mutex);
68363
68364 return 0;
5e856224 68365@@ -3083,10 +3206,16 @@ static const char *get_ksymbol(struct module *mod,
58c5fc13
MT
68366 unsigned long nextval;
68367
68368 /* At worse, next value is at end of module */
68369- if (within_module_init(addr, mod))
68370- nextval = (unsigned long)mod->module_init+mod->init_text_size;
68371+ if (within_module_init_rx(addr, mod))
68372+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
68373+ else if (within_module_init_rw(addr, mod))
68374+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
68375+ else if (within_module_core_rx(addr, mod))
68376+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
68377+ else if (within_module_core_rw(addr, mod))
68378+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
68379 else
68380- nextval = (unsigned long)mod->module_core+mod->core_text_size;
68381+ return NULL;
68382
66a7e928 68383 /* Scan for closest preceding symbol, and next symbol. (ELF
58c5fc13 68384 starts real symbols at 1). */
5e856224 68385@@ -3321,7 +3450,7 @@ static int m_show(struct seq_file *m, void *p)
58c5fc13
MT
68386 char buf[8];
68387
68388 seq_printf(m, "%s %u",
68389- mod->name, mod->init_size + mod->core_size);
68390+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
68391 print_unload_info(m, mod);
68392
68393 /* Informative for users. */
5e856224 68394@@ -3330,7 +3459,7 @@ static int m_show(struct seq_file *m, void *p)
58c5fc13
MT
68395 mod->state == MODULE_STATE_COMING ? "Loading":
68396 "Live");
68397 /* Used by oprofile and other similar tools. */
66a7e928
MT
68398- seq_printf(m, " 0x%pK", mod->module_core);
68399+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
58c5fc13
MT
68400
68401 /* Taints info */
68402 if (mod->taints)
5e856224 68403@@ -3366,7 +3495,17 @@ static const struct file_operations proc_modules_operations = {
58c5fc13
MT
68404
68405 static int __init proc_modules_init(void)
68406 {
68407+#ifndef CONFIG_GRKERNSEC_HIDESYM
68408+#ifdef CONFIG_GRKERNSEC_PROC_USER
68409+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
68410+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68411+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
68412+#else
68413 proc_create("modules", 0, NULL, &proc_modules_operations);
68414+#endif
68415+#else
68416+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
68417+#endif
68418 return 0;
68419 }
68420 module_init(proc_modules_init);
5e856224 68421@@ -3425,12 +3564,12 @@ struct module *__module_address(unsigned long addr)
58c5fc13
MT
68422 {
68423 struct module *mod;
68424
68425- if (addr < module_addr_min || addr > module_addr_max)
68426+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
68427+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
68428 return NULL;
68429
68430 list_for_each_entry_rcu(mod, &modules, list)
68431- if (within_module_core(addr, mod)
68432- || within_module_init(addr, mod))
68433+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
68434 return mod;
68435 return NULL;
68436 }
5e856224 68437@@ -3464,11 +3603,20 @@ bool is_module_text_address(unsigned long addr)
58c5fc13
MT
68438 */
68439 struct module *__module_text_address(unsigned long addr)
68440 {
68441- struct module *mod = __module_address(addr);
68442+ struct module *mod;
68443+
68444+#ifdef CONFIG_X86_32
68445+ addr = ktla_ktva(addr);
68446+#endif
68447+
68448+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
68449+ return NULL;
68450+
68451+ mod = __module_address(addr);
68452+
68453 if (mod) {
68454 /* Make sure it's within the text section. */
68455- if (!within(addr, mod->module_init, mod->init_text_size)
68456- && !within(addr, mod->module_core, mod->core_text_size))
68457+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
68458 mod = NULL;
68459 }
68460 return mod;
fe2de317 68461diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
4c928ab7 68462index 7e3443f..b2a1e6b 100644
fe2de317
MT
68463--- a/kernel/mutex-debug.c
68464+++ b/kernel/mutex-debug.c
68465@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
71d190be
MT
68466 }
68467
68468 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68469- struct thread_info *ti)
68470+ struct task_struct *task)
68471 {
68472 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
68473
68474 /* Mark the current thread as blocked on the lock: */
68475- ti->task->blocked_on = waiter;
68476+ task->blocked_on = waiter;
68477 }
68478
68479 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68480- struct thread_info *ti)
68481+ struct task_struct *task)
68482 {
68483 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
68484- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
68485- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
68486- ti->task->blocked_on = NULL;
68487+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
66a7e928 68488+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
71d190be
MT
68489+ task->blocked_on = NULL;
68490
68491 list_del_init(&waiter->list);
68492 waiter->task = NULL;
fe2de317
MT
68493diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
68494index 0799fd3..d06ae3b 100644
68495--- a/kernel/mutex-debug.h
68496+++ b/kernel/mutex-debug.h
68497@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
71d190be
MT
68498 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
68499 extern void debug_mutex_add_waiter(struct mutex *lock,
68500 struct mutex_waiter *waiter,
68501- struct thread_info *ti);
68502+ struct task_struct *task);
68503 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68504- struct thread_info *ti);
68505+ struct task_struct *task);
68506 extern void debug_mutex_unlock(struct mutex *lock);
68507 extern void debug_mutex_init(struct mutex *lock, const char *name,
68508 struct lock_class_key *key);
fe2de317 68509diff --git a/kernel/mutex.c b/kernel/mutex.c
4c928ab7 68510index 89096dd..f91ebc5 100644
fe2de317
MT
68511--- a/kernel/mutex.c
68512+++ b/kernel/mutex.c
68513@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
68514 spin_lock_mutex(&lock->wait_lock, flags);
68515
68516 debug_mutex_lock_common(lock, &waiter);
68517- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
68518+ debug_mutex_add_waiter(lock, &waiter, task);
68519
68520 /* add waiting tasks to the end of the waitqueue (FIFO): */
68521 list_add_tail(&waiter.list, &lock->wait_list);
68522@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
68523 * TASK_UNINTERRUPTIBLE case.)
68524 */
68525 if (unlikely(signal_pending_state(state, task))) {
68526- mutex_remove_waiter(lock, &waiter,
68527- task_thread_info(task));
68528+ mutex_remove_waiter(lock, &waiter, task);
68529 mutex_release(&lock->dep_map, 1, ip);
68530 spin_unlock_mutex(&lock->wait_lock, flags);
68531
68532@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
68533 done:
68534 lock_acquired(&lock->dep_map, ip);
68535 /* got the lock - rejoice! */
68536- mutex_remove_waiter(lock, &waiter, current_thread_info());
68537+ mutex_remove_waiter(lock, &waiter, task);
68538 mutex_set_owner(lock);
68539
68540 /* set it to 0 if there are no waiters left: */
68541diff --git a/kernel/padata.c b/kernel/padata.c
4c928ab7 68542index b452599..5d68f4e 100644
fe2de317
MT
68543--- a/kernel/padata.c
68544+++ b/kernel/padata.c
68545@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
8308f9c9
MT
68546 padata->pd = pd;
68547 padata->cb_cpu = cb_cpu;
68548
68549- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
68550- atomic_set(&pd->seq_nr, -1);
68551+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
68552+ atomic_set_unchecked(&pd->seq_nr, -1);
68553
68554- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
68555+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
68556
68557 target_cpu = padata_cpu_hash(padata);
68558 queue = per_cpu_ptr(pd->pqueue, target_cpu);
fe2de317 68559@@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
8308f9c9
MT
68560 padata_init_pqueues(pd);
68561 padata_init_squeues(pd);
68562 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
68563- atomic_set(&pd->seq_nr, -1);
68564+ atomic_set_unchecked(&pd->seq_nr, -1);
68565 atomic_set(&pd->reorder_objects, 0);
68566 atomic_set(&pd->refcnt, 0);
68567 pd->pinst = pinst;
fe2de317 68568diff --git a/kernel/panic.c b/kernel/panic.c
5e856224 68569index 8ed89a1..e83856a 100644
fe2de317
MT
68570--- a/kernel/panic.c
68571+++ b/kernel/panic.c
5e856224 68572@@ -402,7 +402,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
bc901d79
MT
68573 const char *board;
68574
68575 printk(KERN_WARNING "------------[ cut here ]------------\n");
68576- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
68577+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
68578 board = dmi_get_system_info(DMI_PRODUCT_NAME);
68579 if (board)
68580 printk(KERN_WARNING "Hardware name: %s\n", board);
5e856224 68581@@ -457,7 +457,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
58c5fc13
MT
68582 */
68583 void __stack_chk_fail(void)
68584 {
68585- panic("stack-protector: Kernel stack is corrupted in: %p\n",
68586+ dump_stack();
bc901d79 68587+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
58c5fc13
MT
68588 __builtin_return_address(0));
68589 }
68590 EXPORT_SYMBOL(__stack_chk_fail);
fe2de317 68591diff --git a/kernel/pid.c b/kernel/pid.c
5e856224 68592index 9f08dfa..6765c40 100644
fe2de317
MT
68593--- a/kernel/pid.c
68594+++ b/kernel/pid.c
58c5fc13
MT
68595@@ -33,6 +33,7 @@
68596 #include <linux/rculist.h>
68597 #include <linux/bootmem.h>
68598 #include <linux/hash.h>
68599+#include <linux/security.h>
68600 #include <linux/pid_namespace.h>
68601 #include <linux/init_task.h>
68602 #include <linux/syscalls.h>
fe2de317 68603@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
58c5fc13
MT
68604
68605 int pid_max = PID_MAX_DEFAULT;
68606
68607-#define RESERVED_PIDS 300
68608+#define RESERVED_PIDS 500
68609
68610 int pid_max_min = RESERVED_PIDS + 1;
68611 int pid_max_max = PID_MAX_LIMIT;
5e856224 68612@@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
58c5fc13
MT
68613 */
68614 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
68615 {
58c5fc13 68616+ struct task_struct *task;
bc901d79 68617+
4c928ab7
MT
68618 rcu_lockdep_assert(rcu_read_lock_held(),
68619 "find_task_by_pid_ns() needs rcu_read_lock()"
68620 " protection");
bc901d79 68621- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
4c928ab7 68622+
58c5fc13
MT
68623+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
68624+
68625+ if (gr_pid_is_chrooted(task))
68626+ return NULL;
68627+
68628+ return task;
68629 }
68630
68631 struct task_struct *find_task_by_vpid(pid_t vnr)
5e856224 68632@@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
15a11c5b
MT
68633 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
68634 }
68635
68636+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
68637+{
4c928ab7
MT
68638+ rcu_lockdep_assert(rcu_read_lock_held(),
68639+ "find_task_by_pid_ns() needs rcu_read_lock()"
68640+ " protection");
15a11c5b
MT
68641+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
68642+}
68643+
68644 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
68645 {
68646 struct pid *pid;
fe2de317 68647diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
5e856224 68648index 125cb67..a4d1c30 100644
fe2de317
MT
68649--- a/kernel/posix-cpu-timers.c
68650+++ b/kernel/posix-cpu-timers.c
58c5fc13
MT
68651@@ -6,6 +6,7 @@
68652 #include <linux/posix-timers.h>
68653 #include <linux/errno.h>
68654 #include <linux/math64.h>
68655+#include <linux/security.h>
68656 #include <asm/uaccess.h>
68657 #include <linux/kernel_stat.h>
ae4e228f 68658 #include <trace/events/timer.h>
5e856224 68659@@ -1578,14 +1579,14 @@ struct k_clock clock_posix_cpu = {
66a7e928
MT
68660
68661 static __init int init_posix_cpu_timers(void)
68662 {
68663- struct k_clock process = {
15a11c5b 68664+ static struct k_clock process = {
66a7e928
MT
68665 .clock_getres = process_cpu_clock_getres,
68666 .clock_get = process_cpu_clock_get,
68667 .timer_create = process_cpu_timer_create,
68668 .nsleep = process_cpu_nsleep,
68669 .nsleep_restart = process_cpu_nsleep_restart,
68670 };
68671- struct k_clock thread = {
15a11c5b 68672+ static struct k_clock thread = {
66a7e928
MT
68673 .clock_getres = thread_cpu_clock_getres,
68674 .clock_get = thread_cpu_clock_get,
68675 .timer_create = thread_cpu_timer_create,
fe2de317 68676diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
4c928ab7 68677index 69185ae..cc2847a 100644
fe2de317
MT
68678--- a/kernel/posix-timers.c
68679+++ b/kernel/posix-timers.c
66a7e928 68680@@ -43,6 +43,7 @@
bc901d79 68681 #include <linux/idr.h>
66a7e928 68682 #include <linux/posix-clock.h>
bc901d79
MT
68683 #include <linux/posix-timers.h>
68684+#include <linux/grsecurity.h>
68685 #include <linux/syscalls.h>
68686 #include <linux/wait.h>
68687 #include <linux/workqueue.h>
15a11c5b
MT
68688@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
68689 * which we beg off on and pass to do_sys_settimeofday().
68690 */
68691
68692-static struct k_clock posix_clocks[MAX_CLOCKS];
68693+static struct k_clock *posix_clocks[MAX_CLOCKS];
68694
68695 /*
68696 * These ones are defined below.
fe2de317 68697@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
66a7e928
MT
68698 */
68699 static __init int init_posix_timers(void)
68700 {
68701- struct k_clock clock_realtime = {
15a11c5b 68702+ static struct k_clock clock_realtime = {
66a7e928
MT
68703 .clock_getres = hrtimer_get_res,
68704 .clock_get = posix_clock_realtime_get,
68705 .clock_set = posix_clock_realtime_set,
fe2de317 68706@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
66a7e928
MT
68707 .timer_get = common_timer_get,
68708 .timer_del = common_timer_del,
68709 };
68710- struct k_clock clock_monotonic = {
15a11c5b 68711+ static struct k_clock clock_monotonic = {
66a7e928
MT
68712 .clock_getres = hrtimer_get_res,
68713 .clock_get = posix_ktime_get_ts,
68714 .nsleep = common_nsleep,
fe2de317 68715@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
66a7e928
MT
68716 .timer_get = common_timer_get,
68717 .timer_del = common_timer_del,
68718 };
68719- struct k_clock clock_monotonic_raw = {
15a11c5b 68720+ static struct k_clock clock_monotonic_raw = {
66a7e928
MT
68721 .clock_getres = hrtimer_get_res,
68722 .clock_get = posix_get_monotonic_raw,
68723 };
68724- struct k_clock clock_realtime_coarse = {
15a11c5b 68725+ static struct k_clock clock_realtime_coarse = {
66a7e928
MT
68726 .clock_getres = posix_get_coarse_res,
68727 .clock_get = posix_get_realtime_coarse,
68728 };
68729- struct k_clock clock_monotonic_coarse = {
15a11c5b 68730+ static struct k_clock clock_monotonic_coarse = {
66a7e928
MT
68731 .clock_getres = posix_get_coarse_res,
68732 .clock_get = posix_get_monotonic_coarse,
68733 };
68734- struct k_clock clock_boottime = {
15a11c5b 68735+ static struct k_clock clock_boottime = {
66a7e928
MT
68736 .clock_getres = hrtimer_get_res,
68737 .clock_get = posix_get_boottime,
68738 .nsleep = common_nsleep,
4c928ab7 68739@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
15a11c5b
MT
68740 return;
68741 }
66a7e928 68742
15a11c5b
MT
68743- posix_clocks[clock_id] = *new_clock;
68744+ posix_clocks[clock_id] = new_clock;
66a7e928 68745 }
15a11c5b 68746 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
66a7e928 68747
4c928ab7 68748@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
66a7e928 68749 return (id & CLOCKFD_MASK) == CLOCKFD ?
15a11c5b 68750 &clock_posix_dynamic : &clock_posix_cpu;
66a7e928 68751
15a11c5b
MT
68752- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
68753+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
68754 return NULL;
68755- return &posix_clocks[id];
68756+ return posix_clocks[id];
68757 }
66a7e928 68758
15a11c5b 68759 static int common_timer_create(struct k_itimer *new_timer)
4c928ab7 68760@@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
bc901d79
MT
68761 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
68762 return -EFAULT;
df50ba0c 68763
bc901d79
MT
68764+ /* only the CLOCK_REALTIME clock can be set, all other clocks
68765+ have their clock_set fptr set to a nosettime dummy function
68766+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
68767+ call common_clock_set, which calls do_sys_settimeofday, which
68768+ we hook
68769+ */
68770+
66a7e928 68771 return kc->clock_set(which_clock, &new_tp);
bc901d79
MT
68772 }
68773
fe2de317
MT
68774diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
68775index d523593..68197a4 100644
68776--- a/kernel/power/poweroff.c
68777+++ b/kernel/power/poweroff.c
68778@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
58c5fc13
MT
68779 .enable_mask = SYSRQ_ENABLE_BOOT,
68780 };
68781
68782-static int pm_sysrq_init(void)
68783+static int __init pm_sysrq_init(void)
68784 {
68785 register_sysrq_key('o', &sysrq_poweroff_op);
68786 return 0;
fe2de317 68787diff --git a/kernel/power/process.c b/kernel/power/process.c
5e856224 68788index 7aac07a..2d3c6dc 100644
fe2de317
MT
68789--- a/kernel/power/process.c
68790+++ b/kernel/power/process.c
5e856224 68791@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
58c5fc13
MT
68792 u64 elapsed_csecs64;
68793 unsigned int elapsed_csecs;
bc901d79 68794 bool wakeup = false;
58c5fc13
MT
68795+ bool timedout = false;
68796
68797 do_gettimeofday(&start);
68798
5e856224 68799@@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
6892158b 68800
ae4e228f 68801 while (true) {
58c5fc13
MT
68802 todo = 0;
68803+ if (time_after(jiffies, end_time))
68804+ timedout = true;
68805 read_lock(&tasklist_lock);
68806 do_each_thread(g, p) {
5e856224
MT
68807 if (p == current || !freeze_task(p))
68808@@ -60,9 +63,13 @@ static int try_to_freeze_tasks(bool user_only)
16454cff
MT
68809 * try_to_stop() after schedule() in ptrace/signal
68810 * stop sees TIF_FREEZE.
58c5fc13
MT
68811 */
68812- if (!task_is_stopped_or_traced(p) &&
68813- !freezer_should_skip(p))
68814+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
68815 todo++;
68816+ if (timedout) {
68817+ printk(KERN_ERR "Task refusing to freeze:\n");
68818+ sched_show_task(p);
68819+ }
68820+ }
68821 } while_each_thread(g, p);
68822 read_unlock(&tasklist_lock);
6892158b 68823
5e856224 68824@@ -71,7 +78,7 @@ static int try_to_freeze_tasks(bool user_only)
6892158b
MT
68825 todo += wq_busy;
68826 }
68827
ae4e228f
MT
68828- if (!todo || time_after(jiffies, end_time))
68829+ if (!todo || timedout)
68830 break;
68831
16454cff 68832 if (pm_wakeup_pending()) {
fe2de317 68833diff --git a/kernel/printk.c b/kernel/printk.c
5e856224 68834index 32690a0..cd7c798 100644
fe2de317
MT
68835--- a/kernel/printk.c
68836+++ b/kernel/printk.c
68837@@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
16454cff
MT
68838 if (from_file && type != SYSLOG_ACTION_OPEN)
68839 return 0;
58c5fc13
MT
68840
68841+#ifdef CONFIG_GRKERNSEC_DMESG
16454cff 68842+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
58c5fc13
MT
68843+ return -EPERM;
68844+#endif
68845+
16454cff
MT
68846 if (syslog_action_restricted(type)) {
68847 if (capable(CAP_SYSLOG))
68848 return 0;
fe2de317 68849diff --git a/kernel/profile.c b/kernel/profile.c
4c928ab7 68850index 76b8e77..a2930e8 100644
fe2de317
MT
68851--- a/kernel/profile.c
68852+++ b/kernel/profile.c
8308f9c9
MT
68853@@ -39,7 +39,7 @@ struct profile_hit {
68854 /* Oprofile timer tick hook */
68855 static int (*timer_hook)(struct pt_regs *) __read_mostly;
68856
68857-static atomic_t *prof_buffer;
68858+static atomic_unchecked_t *prof_buffer;
68859 static unsigned long prof_len, prof_shift;
68860
68861 int prof_on __read_mostly;
15a11c5b 68862@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
8308f9c9
MT
68863 hits[i].pc = 0;
68864 continue;
68865 }
68866- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
68867+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
68868 hits[i].hits = hits[i].pc = 0;
68869 }
68870 }
fe2de317 68871@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
8308f9c9
MT
68872 * Add the current hit(s) and flush the write-queue out
68873 * to the global buffer:
68874 */
68875- atomic_add(nr_hits, &prof_buffer[pc]);
68876+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
68877 for (i = 0; i < NR_PROFILE_HIT; ++i) {
68878- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
68879+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
68880 hits[i].pc = hits[i].hits = 0;
68881 }
68882 out:
fe2de317 68883@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
15a11c5b
MT
68884 {
68885 unsigned long pc;
8308f9c9
MT
68886 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
68887- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
68888+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
68889 }
68890 #endif /* !CONFIG_SMP */
15a11c5b 68891
fe2de317 68892@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
8308f9c9
MT
68893 return -EFAULT;
68894 buf++; p++; count--; read++;
68895 }
68896- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
68897+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
68898 if (copy_to_user(buf, (void *)pnt, count))
68899 return -EFAULT;
68900 read += count;
fe2de317 68901@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
8308f9c9
MT
68902 }
68903 #endif
68904 profile_discard_flip_buffers();
68905- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
68906+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
68907 return count;
68908 }
68909
fe2de317 68910diff --git a/kernel/ptrace.c b/kernel/ptrace.c
5e856224 68911index 00ab2ca..d237f61 100644
fe2de317
MT
68912--- a/kernel/ptrace.c
68913+++ b/kernel/ptrace.c
5e856224 68914@@ -285,7 +285,7 @@ static int ptrace_attach(struct task_struct *task, long request,
58c5fc13 68915 task->ptrace = PT_PTRACED;
6e9df6a3
MT
68916 if (seize)
68917 task->ptrace |= PT_SEIZED;
5e856224
MT
68918- if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
68919+ if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
58c5fc13
MT
68920 task->ptrace |= PT_PTRACE_CAP;
68921
68922 __ptrace_link(task, current);
5e856224 68923@@ -491,7 +491,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
ae4e228f
MT
68924 break;
68925 return -EIO;
68926 }
68927- if (copy_to_user(dst, buf, retval))
68928+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
68929 return -EFAULT;
68930 copied += retval;
68931 src += retval;
5e856224 68932@@ -688,7 +688,7 @@ int ptrace_request(struct task_struct *child, long request,
6e9df6a3 68933 bool seized = child->ptrace & PT_SEIZED;
bc901d79 68934 int ret = -EIO;
6e9df6a3 68935 siginfo_t siginfo, *si;
bc901d79
MT
68936- void __user *datavp = (void __user *) data;
68937+ void __user *datavp = (__force void __user *) data;
68938 unsigned long __user *datalp = datavp;
6e9df6a3 68939 unsigned long flags;
ae4e228f 68940
5e856224 68941@@ -890,14 +890,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
ae4e228f
MT
68942 goto out;
68943 }
58c5fc13
MT
68944
68945+ if (gr_handle_ptrace(child, request)) {
68946+ ret = -EPERM;
68947+ goto out_put_task_struct;
68948+ }
68949+
6e9df6a3
MT
68950 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
68951 ret = ptrace_attach(child, request, data);
ae4e228f
MT
68952 /*
68953 * Some architectures need to do book-keeping after
68954 * a ptrace attach.
68955 */
68956- if (!ret)
68957+ if (!ret) {
68958 arch_ptrace_attach(child);
68959+ gr_audit_ptrace(child);
68960+ }
68961 goto out_put_task_struct;
68962 }
58c5fc13 68963
5e856224 68964@@ -923,7 +930,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
ae4e228f
MT
68965 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
68966 if (copied != sizeof(tmp))
68967 return -EIO;
68968- return put_user(tmp, (unsigned long __user *)data);
68969+ return put_user(tmp, (__force unsigned long __user *)data);
58c5fc13
MT
68970 }
68971
bc901d79 68972 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
5e856224 68973@@ -1033,14 +1040,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
bc901d79
MT
68974 goto out;
68975 }
68976
68977+ if (gr_handle_ptrace(child, request)) {
68978+ ret = -EPERM;
68979+ goto out_put_task_struct;
68980+ }
68981+
6e9df6a3
MT
68982 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
68983 ret = ptrace_attach(child, request, data);
bc901d79
MT
68984 /*
68985 * Some architectures need to do book-keeping after
68986 * a ptrace attach.
68987 */
68988- if (!ret)
68989+ if (!ret) {
68990 arch_ptrace_attach(child);
68991+ gr_audit_ptrace(child);
68992+ }
68993 goto out_put_task_struct;
68994 }
68995
4c928ab7 68996diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
5e856224 68997index 977296d..c4744dc 100644
4c928ab7
MT
68998--- a/kernel/rcutiny.c
68999+++ b/kernel/rcutiny.c
69000@@ -46,7 +46,7 @@
69001 struct rcu_ctrlblk;
69002 static void invoke_rcu_callbacks(void);
69003 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
69004-static void rcu_process_callbacks(struct softirq_action *unused);
69005+static void rcu_process_callbacks(void);
69006 static void __call_rcu(struct rcu_head *head,
69007 void (*func)(struct rcu_head *rcu),
69008 struct rcu_ctrlblk *rcp);
5e856224
MT
69009@@ -297,7 +297,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
69010 rcu_is_callbacks_kthread()));
4c928ab7
MT
69011 }
69012
69013-static void rcu_process_callbacks(struct softirq_action *unused)
69014+static void rcu_process_callbacks(void)
69015 {
69016 __rcu_process_callbacks(&rcu_sched_ctrlblk);
69017 __rcu_process_callbacks(&rcu_bh_ctrlblk);
5e856224
MT
69018diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
69019index 9cb1ae4..aac7d3e 100644
69020--- a/kernel/rcutiny_plugin.h
69021+++ b/kernel/rcutiny_plugin.h
69022@@ -920,7 +920,7 @@ static int rcu_kthread(void *arg)
69023 have_rcu_kthread_work = morework;
69024 local_irq_restore(flags);
69025 if (work)
69026- rcu_process_callbacks(NULL);
69027+ rcu_process_callbacks();
69028 schedule_timeout_interruptible(1); /* Leave CPU for others. */
69029 }
69030
fe2de317 69031diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
5e856224 69032index a58ac28..196a3d8 100644
fe2de317
MT
69033--- a/kernel/rcutorture.c
69034+++ b/kernel/rcutorture.c
5e856224 69035@@ -148,12 +148,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
8308f9c9
MT
69036 { 0 };
69037 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
69038 { 0 };
69039-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
69040-static atomic_t n_rcu_torture_alloc;
69041-static atomic_t n_rcu_torture_alloc_fail;
69042-static atomic_t n_rcu_torture_free;
69043-static atomic_t n_rcu_torture_mberror;
69044-static atomic_t n_rcu_torture_error;
69045+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
69046+static atomic_unchecked_t n_rcu_torture_alloc;
69047+static atomic_unchecked_t n_rcu_torture_alloc_fail;
69048+static atomic_unchecked_t n_rcu_torture_free;
69049+static atomic_unchecked_t n_rcu_torture_mberror;
69050+static atomic_unchecked_t n_rcu_torture_error;
69051 static long n_rcu_torture_boost_ktrerror;
69052 static long n_rcu_torture_boost_rterror;
15a11c5b 69053 static long n_rcu_torture_boost_failure;
5e856224 69054@@ -243,11 +243,11 @@ rcu_torture_alloc(void)
8308f9c9
MT
69055
69056 spin_lock_bh(&rcu_torture_lock);
69057 if (list_empty(&rcu_torture_freelist)) {
69058- atomic_inc(&n_rcu_torture_alloc_fail);
69059+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
69060 spin_unlock_bh(&rcu_torture_lock);
69061 return NULL;
69062 }
69063- atomic_inc(&n_rcu_torture_alloc);
69064+ atomic_inc_unchecked(&n_rcu_torture_alloc);
69065 p = rcu_torture_freelist.next;
69066 list_del_init(p);
69067 spin_unlock_bh(&rcu_torture_lock);
5e856224 69068@@ -260,7 +260,7 @@ rcu_torture_alloc(void)
8308f9c9
MT
69069 static void
69070 rcu_torture_free(struct rcu_torture *p)
69071 {
69072- atomic_inc(&n_rcu_torture_free);
69073+ atomic_inc_unchecked(&n_rcu_torture_free);
69074 spin_lock_bh(&rcu_torture_lock);
69075 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
69076 spin_unlock_bh(&rcu_torture_lock);
5e856224 69077@@ -380,7 +380,7 @@ rcu_torture_cb(struct rcu_head *p)
8308f9c9
MT
69078 i = rp->rtort_pipe_count;
69079 if (i > RCU_TORTURE_PIPE_LEN)
69080 i = RCU_TORTURE_PIPE_LEN;
69081- atomic_inc(&rcu_torture_wcount[i]);
69082+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
69083 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
69084 rp->rtort_mbtest = 0;
69085 rcu_torture_free(rp);
5e856224 69086@@ -427,7 +427,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
8308f9c9
MT
69087 i = rp->rtort_pipe_count;
69088 if (i > RCU_TORTURE_PIPE_LEN)
69089 i = RCU_TORTURE_PIPE_LEN;
69090- atomic_inc(&rcu_torture_wcount[i]);
69091+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
69092 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
69093 rp->rtort_mbtest = 0;
69094 list_del(&rp->rtort_free);
5e856224 69095@@ -916,7 +916,7 @@ rcu_torture_writer(void *arg)
8308f9c9
MT
69096 i = old_rp->rtort_pipe_count;
69097 if (i > RCU_TORTURE_PIPE_LEN)
69098 i = RCU_TORTURE_PIPE_LEN;
69099- atomic_inc(&rcu_torture_wcount[i]);
69100+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
69101 old_rp->rtort_pipe_count++;
69102 cur_ops->deferred_free(old_rp);
69103 }
5e856224 69104@@ -997,7 +997,7 @@ static void rcu_torture_timer(unsigned long unused)
8308f9c9
MT
69105 return;
69106 }
69107 if (p->rtort_mbtest == 0)
69108- atomic_inc(&n_rcu_torture_mberror);
69109+ atomic_inc_unchecked(&n_rcu_torture_mberror);
69110 spin_lock(&rand_lock);
69111 cur_ops->read_delay(&rand);
69112 n_rcu_torture_timers++;
5e856224 69113@@ -1061,7 +1061,7 @@ rcu_torture_reader(void *arg)
8308f9c9
MT
69114 continue;
69115 }
69116 if (p->rtort_mbtest == 0)
69117- atomic_inc(&n_rcu_torture_mberror);
69118+ atomic_inc_unchecked(&n_rcu_torture_mberror);
69119 cur_ops->read_delay(&rand);
69120 preempt_disable();
69121 pipe_count = p->rtort_pipe_count;
5e856224 69122@@ -1123,10 +1123,10 @@ rcu_torture_printk(char *page)
8308f9c9
MT
69123 rcu_torture_current,
69124 rcu_torture_current_version,
69125 list_empty(&rcu_torture_freelist),
69126- atomic_read(&n_rcu_torture_alloc),
69127- atomic_read(&n_rcu_torture_alloc_fail),
69128- atomic_read(&n_rcu_torture_free),
69129- atomic_read(&n_rcu_torture_mberror),
69130+ atomic_read_unchecked(&n_rcu_torture_alloc),
69131+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
69132+ atomic_read_unchecked(&n_rcu_torture_free),
69133+ atomic_read_unchecked(&n_rcu_torture_mberror),
69134 n_rcu_torture_boost_ktrerror,
69135 n_rcu_torture_boost_rterror,
8308f9c9 69136 n_rcu_torture_boost_failure,
5e856224
MT
69137@@ -1136,7 +1136,7 @@ rcu_torture_printk(char *page)
69138 n_online_attempts,
69139 n_offline_successes,
69140 n_offline_attempts);
8308f9c9
MT
69141- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
69142+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
69143 n_rcu_torture_boost_ktrerror != 0 ||
69144 n_rcu_torture_boost_rterror != 0 ||
15a11c5b 69145 n_rcu_torture_boost_failure != 0)
5e856224 69146@@ -1144,7 +1144,7 @@ rcu_torture_printk(char *page)
8308f9c9
MT
69147 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
69148 if (i > 1) {
69149 cnt += sprintf(&page[cnt], "!!! ");
69150- atomic_inc(&n_rcu_torture_error);
69151+ atomic_inc_unchecked(&n_rcu_torture_error);
69152 WARN_ON_ONCE(1);
69153 }
69154 cnt += sprintf(&page[cnt], "Reader Pipe: ");
5e856224 69155@@ -1158,7 +1158,7 @@ rcu_torture_printk(char *page)
8308f9c9
MT
69156 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
69157 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
69158 cnt += sprintf(&page[cnt], " %d",
69159- atomic_read(&rcu_torture_wcount[i]));
69160+ atomic_read_unchecked(&rcu_torture_wcount[i]));
69161 }
69162 cnt += sprintf(&page[cnt], "\n");
69163 if (cur_ops->stats)
5e856224 69164@@ -1600,7 +1600,7 @@ rcu_torture_cleanup(void)
8308f9c9
MT
69165
69166 if (cur_ops->cleanup)
69167 cur_ops->cleanup();
69168- if (atomic_read(&n_rcu_torture_error))
69169+ if (atomic_read_unchecked(&n_rcu_torture_error))
69170 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
69171 else
69172 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
5e856224 69173@@ -1664,17 +1664,17 @@ rcu_torture_init(void)
8308f9c9
MT
69174
69175 rcu_torture_current = NULL;
69176 rcu_torture_current_version = 0;
69177- atomic_set(&n_rcu_torture_alloc, 0);
69178- atomic_set(&n_rcu_torture_alloc_fail, 0);
69179- atomic_set(&n_rcu_torture_free, 0);
69180- atomic_set(&n_rcu_torture_mberror, 0);
69181- atomic_set(&n_rcu_torture_error, 0);
69182+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
69183+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
69184+ atomic_set_unchecked(&n_rcu_torture_free, 0);
69185+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
69186+ atomic_set_unchecked(&n_rcu_torture_error, 0);
69187 n_rcu_torture_boost_ktrerror = 0;
69188 n_rcu_torture_boost_rterror = 0;
8308f9c9
MT
69189 n_rcu_torture_boost_failure = 0;
69190 n_rcu_torture_boosts = 0;
69191 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
69192- atomic_set(&rcu_torture_wcount[i], 0);
69193+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
69194 for_each_possible_cpu(cpu) {
69195 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
69196 per_cpu(rcu_torture_count, cpu)[i] = 0;
fe2de317 69197diff --git a/kernel/rcutree.c b/kernel/rcutree.c
5e856224 69198index 6c4a672..70f3202 100644
fe2de317
MT
69199--- a/kernel/rcutree.c
69200+++ b/kernel/rcutree.c
5e856224
MT
69201@@ -363,9 +363,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
69202 rcu_prepare_for_idle(smp_processor_id());
15a11c5b
MT
69203 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
69204 smp_mb__before_atomic_inc(); /* See above. */
69205- atomic_inc(&rdtp->dynticks);
69206+ atomic_inc_unchecked(&rdtp->dynticks);
69207 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
69208- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
69209+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
4c928ab7 69210 }
15a11c5b 69211
5e856224
MT
69212 /**
69213@@ -438,10 +438,10 @@ void rcu_irq_exit(void)
69214 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
69215 {
15a11c5b
MT
69216 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
69217- atomic_inc(&rdtp->dynticks);
69218+ atomic_inc_unchecked(&rdtp->dynticks);
69219 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
69220 smp_mb__after_atomic_inc(); /* See above. */
69221- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
69222+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
5e856224
MT
69223 rcu_cleanup_after_idle(smp_processor_id());
69224 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
69225 if (!is_idle_task(current)) {
69226@@ -531,14 +531,14 @@ void rcu_nmi_enter(void)
15a11c5b
MT
69227 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
69228
69229 if (rdtp->dynticks_nmi_nesting == 0 &&
69230- (atomic_read(&rdtp->dynticks) & 0x1))
69231+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
69232 return;
69233 rdtp->dynticks_nmi_nesting++;
69234 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
69235- atomic_inc(&rdtp->dynticks);
69236+ atomic_inc_unchecked(&rdtp->dynticks);
69237 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
69238 smp_mb__after_atomic_inc(); /* See above. */
69239- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
69240+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
69241 }
69242
69243 /**
5e856224 69244@@ -557,9 +557,9 @@ void rcu_nmi_exit(void)
15a11c5b
MT
69245 return;
69246 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
69247 smp_mb__before_atomic_inc(); /* See above. */
69248- atomic_inc(&rdtp->dynticks);
69249+ atomic_inc_unchecked(&rdtp->dynticks);
69250 smp_mb__after_atomic_inc(); /* Force delay to next write. */
69251- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
69252+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
69253 }
69254
5e856224
MT
69255 #ifdef CONFIG_PROVE_RCU
69256@@ -575,7 +575,7 @@ int rcu_is_cpu_idle(void)
69257 int ret;
69258
69259 preempt_disable();
69260- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
69261+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
69262 preempt_enable();
69263 return ret;
69264 }
69265@@ -604,7 +604,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
15a11c5b
MT
69266 */
69267 static int dyntick_save_progress_counter(struct rcu_data *rdp)
69268 {
69269- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
69270+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
5e856224 69271 return (rdp->dynticks_snap & 0x1) == 0;
15a11c5b
MT
69272 }
69273
5e856224 69274@@ -619,7 +619,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
4c928ab7
MT
69275 unsigned int curr;
69276 unsigned int snap;
15a11c5b 69277
4c928ab7
MT
69278- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
69279+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
69280 snap = (unsigned int)rdp->dynticks_snap;
15a11c5b
MT
69281
69282 /*
5e856224 69283@@ -1667,7 +1667,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
58c5fc13 69284 /*
4c928ab7 69285 * Do RCU core processing for the current CPU.
58c5fc13 69286 */
ae4e228f
MT
69287-static void rcu_process_callbacks(struct softirq_action *unused)
69288+static void rcu_process_callbacks(void)
69289 {
4c928ab7 69290 trace_rcu_utilization("Start RCU core");
15a11c5b 69291 __rcu_process_callbacks(&rcu_sched_state,
5e856224
MT
69292@@ -2030,7 +2030,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
69293 rdp->qlen = 0;
69294 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
69295 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
69296- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
69297+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
69298 rdp->cpu = cpu;
69299 rdp->rsp = rsp;
69300 raw_spin_unlock_irqrestore(&rnp->lock, flags);
69301@@ -2058,8 +2058,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
69302 rdp->n_force_qs_snap = rsp->n_force_qs;
69303 rdp->blimit = blimit;
69304 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_NESTING;
69305- atomic_set(&rdp->dynticks->dynticks,
69306- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
69307+ atomic_set_unchecked(&rdp->dynticks->dynticks,
69308+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
69309 rcu_prepare_for_idle_init(cpu);
69310 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
69311
fe2de317 69312diff --git a/kernel/rcutree.h b/kernel/rcutree.h
5e856224 69313index fddff92..2c08359 100644
fe2de317
MT
69314--- a/kernel/rcutree.h
69315+++ b/kernel/rcutree.h
5e856224
MT
69316@@ -87,7 +87,7 @@ struct rcu_dynticks {
69317 long long dynticks_nesting; /* Track irq/process nesting level. */
69318 /* Process level is worth LLONG_MAX/2. */
69319 int dynticks_nmi_nesting; /* Track NMI nesting level. */
69320- atomic_t dynticks; /* Even value for idle, else odd. */
69321+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
15a11c5b
MT
69322 };
69323
69324 /* RCU's kthread states for tracing. */
fe2de317 69325diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
5e856224 69326index 8bb35d7..6ea0a463 100644
fe2de317
MT
69327--- a/kernel/rcutree_plugin.h
69328+++ b/kernel/rcutree_plugin.h
5e856224 69329@@ -850,7 +850,7 @@ void synchronize_rcu_expedited(void)
bc901d79
MT
69330
69331 /* Clean up and exit. */
69332 smp_mb(); /* ensure expedited GP seen before counter increment. */
69333- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
69334+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
69335 unlock_mb_ret:
69336 mutex_unlock(&sync_rcu_preempt_exp_mutex);
69337 mb_ret:
5e856224 69338@@ -1833,8 +1833,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
8308f9c9
MT
69339
69340 #else /* #ifndef CONFIG_SMP */
69341
69342-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
69343-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
69344+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
69345+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
69346
69347 static int synchronize_sched_expedited_cpu_stop(void *data)
69348 {
5e856224 69349@@ -1889,7 +1889,7 @@ void synchronize_sched_expedited(void)
8308f9c9
MT
69350 int firstsnap, s, snap, trycount = 0;
69351
69352 /* Note that atomic_inc_return() implies full memory barrier. */
69353- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
69354+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
69355 get_online_cpus();
69356
69357 /*
5e856224 69358@@ -1910,7 +1910,7 @@ void synchronize_sched_expedited(void)
8308f9c9
MT
69359 }
69360
69361 /* Check to see if someone else did our work for us. */
69362- s = atomic_read(&sync_sched_expedited_done);
69363+ s = atomic_read_unchecked(&sync_sched_expedited_done);
69364 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
69365 smp_mb(); /* ensure test happens before caller kfree */
69366 return;
5e856224 69367@@ -1925,7 +1925,7 @@ void synchronize_sched_expedited(void)
8308f9c9
MT
69368 * grace period works for us.
69369 */
69370 get_online_cpus();
5e856224
MT
69371- snap = atomic_read(&sync_sched_expedited_started);
69372+ snap = atomic_read_unchecked(&sync_sched_expedited_started);
8308f9c9
MT
69373 smp_mb(); /* ensure read is before try_stop_cpus(). */
69374 }
69375
5e856224 69376@@ -1936,12 +1936,12 @@ void synchronize_sched_expedited(void)
8308f9c9
MT
69377 * than we did beat us to the punch.
69378 */
69379 do {
69380- s = atomic_read(&sync_sched_expedited_done);
69381+ s = atomic_read_unchecked(&sync_sched_expedited_done);
69382 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
69383 smp_mb(); /* ensure test happens before caller kfree */
69384 break;
69385 }
69386- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
69387+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
69388
69389 put_online_cpus();
69390 }
fe2de317 69391diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
5e856224 69392index 654cfe6..c0b28e2 100644
fe2de317
MT
69393--- a/kernel/rcutree_trace.c
69394+++ b/kernel/rcutree_trace.c
5e856224
MT
69395@@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
69396 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
fe2de317 69397 rdp->qs_pending);
5e856224 69398 seq_printf(m, " dt=%d/%llx/%d df=%lu",
fe2de317
MT
69399- atomic_read(&rdp->dynticks->dynticks),
69400+ atomic_read_unchecked(&rdp->dynticks->dynticks),
69401 rdp->dynticks->dynticks_nesting,
69402 rdp->dynticks->dynticks_nmi_nesting,
69403 rdp->dynticks_fqs);
5e856224
MT
69404@@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
69405 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
fe2de317 69406 rdp->qs_pending);
5e856224 69407 seq_printf(m, ",%d,%llx,%d,%lu",
fe2de317
MT
69408- atomic_read(&rdp->dynticks->dynticks),
69409+ atomic_read_unchecked(&rdp->dynticks->dynticks),
69410 rdp->dynticks->dynticks_nesting,
69411 rdp->dynticks->dynticks_nmi_nesting,
69412 rdp->dynticks_fqs);
fe2de317 69413diff --git a/kernel/resource.c b/kernel/resource.c
4c928ab7 69414index 7640b3a..5879283 100644
fe2de317
MT
69415--- a/kernel/resource.c
69416+++ b/kernel/resource.c
69417@@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
58c5fc13
MT
69418
69419 static int __init ioresources_init(void)
69420 {
69421+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69422+#ifdef CONFIG_GRKERNSEC_PROC_USER
69423+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
69424+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
69425+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69426+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
69427+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
69428+#endif
69429+#else
69430 proc_create("ioports", 0, NULL, &proc_ioports_operations);
69431 proc_create("iomem", 0, NULL, &proc_iomem_operations);
69432+#endif
69433 return 0;
69434 }
69435 __initcall(ioresources_init);
fe2de317 69436diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
5e856224 69437index 98ec494..4241d6d 100644
fe2de317
MT
69438--- a/kernel/rtmutex-tester.c
69439+++ b/kernel/rtmutex-tester.c
66a7e928 69440@@ -20,7 +20,7 @@
8308f9c9
MT
69441 #define MAX_RT_TEST_MUTEXES 8
69442
69443 static spinlock_t rttest_lock;
69444-static atomic_t rttest_event;
69445+static atomic_unchecked_t rttest_event;
69446
69447 struct test_thread_data {
69448 int opcode;
fe2de317 69449@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
8308f9c9
MT
69450
69451 case RTTEST_LOCKCONT:
69452 td->mutexes[td->opdata] = 1;
69453- td->event = atomic_add_return(1, &rttest_event);
69454+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69455 return 0;
69456
69457 case RTTEST_RESET:
fe2de317 69458@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
8308f9c9
MT
69459 return 0;
69460
69461 case RTTEST_RESETEVENT:
69462- atomic_set(&rttest_event, 0);
69463+ atomic_set_unchecked(&rttest_event, 0);
69464 return 0;
69465
69466 default:
fe2de317 69467@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
8308f9c9
MT
69468 return ret;
69469
69470 td->mutexes[id] = 1;
69471- td->event = atomic_add_return(1, &rttest_event);
69472+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69473 rt_mutex_lock(&mutexes[id]);
69474- td->event = atomic_add_return(1, &rttest_event);
69475+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69476 td->mutexes[id] = 4;
69477 return 0;
69478
fe2de317 69479@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
8308f9c9
MT
69480 return ret;
69481
69482 td->mutexes[id] = 1;
69483- td->event = atomic_add_return(1, &rttest_event);
69484+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69485 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
69486- td->event = atomic_add_return(1, &rttest_event);
69487+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69488 td->mutexes[id] = ret ? 0 : 4;
69489 return ret ? -EINTR : 0;
69490
fe2de317 69491@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
8308f9c9
MT
69492 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
69493 return ret;
69494
69495- td->event = atomic_add_return(1, &rttest_event);
69496+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69497 rt_mutex_unlock(&mutexes[id]);
69498- td->event = atomic_add_return(1, &rttest_event);
69499+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69500 td->mutexes[id] = 0;
69501 return 0;
69502
fe2de317 69503@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
8308f9c9
MT
69504 break;
69505
69506 td->mutexes[dat] = 2;
69507- td->event = atomic_add_return(1, &rttest_event);
69508+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69509 break;
69510
66a7e928 69511 default:
fe2de317 69512@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
8308f9c9
MT
69513 return;
69514
69515 td->mutexes[dat] = 3;
69516- td->event = atomic_add_return(1, &rttest_event);
69517+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69518 break;
69519
69520 case RTTEST_LOCKNOWAIT:
fe2de317 69521@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
8308f9c9
MT
69522 return;
69523
69524 td->mutexes[dat] = 1;
69525- td->event = atomic_add_return(1, &rttest_event);
69526+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69527 return;
69528
66a7e928 69529 default:
5e856224
MT
69530diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
69531index e8a1f83..363d17d 100644
69532--- a/kernel/sched/auto_group.c
69533+++ b/kernel/sched/auto_group.c
69534@@ -11,7 +11,7 @@
4c928ab7 69535
5e856224
MT
69536 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
69537 static struct autogroup autogroup_default;
69538-static atomic_t autogroup_seq_nr;
69539+static atomic_unchecked_t autogroup_seq_nr;
66a7e928 69540
5e856224
MT
69541 void __init autogroup_init(struct task_struct *init_task)
69542 {
69543@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
4c928ab7 69544
5e856224
MT
69545 kref_init(&ag->kref);
69546 init_rwsem(&ag->lock);
69547- ag->id = atomic_inc_return(&autogroup_seq_nr);
69548+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
69549 ag->tg = tg;
69550 #ifdef CONFIG_RT_GROUP_SCHED
69551 /*
69552diff --git a/kernel/sched/core.c b/kernel/sched/core.c
69553index 478a04c..e16339a 100644
69554--- a/kernel/sched/core.c
69555+++ b/kernel/sched/core.c
69556@@ -3851,6 +3851,8 @@ int can_nice(const struct task_struct *p, const int nice)
58c5fc13
MT
69557 /* convert nice value [19,-20] to rlimit style value [1,40] */
69558 int nice_rlim = 20 - nice;
69559
69560+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
69561+
df50ba0c 69562 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
58c5fc13
MT
69563 capable(CAP_SYS_NICE));
69564 }
5e856224 69565@@ -3884,7 +3886,8 @@ SYSCALL_DEFINE1(nice, int, increment)
58c5fc13
MT
69566 if (nice > 19)
69567 nice = 19;
69568
69569- if (increment < 0 && !can_nice(current, nice))
69570+ if (increment < 0 && (!can_nice(current, nice) ||
69571+ gr_handle_chroot_nice()))
69572 return -EPERM;
69573
69574 retval = security_task_setnice(current, nice);
5e856224 69575@@ -4041,6 +4044,7 @@ recheck:
6892158b
MT
69576 unsigned long rlim_rtprio =
69577 task_rlimit(p, RLIMIT_RTPRIO);
58c5fc13 69578
6892158b 69579+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
df50ba0c
MT
69580 /* can't set/change the rt policy */
69581 if (policy != p->policy && !rlim_rtprio)
69582 return -EPERM;
5e856224
MT
69583diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
69584index aca16b8..8e3acc4 100644
69585--- a/kernel/sched/fair.c
69586+++ b/kernel/sched/fair.c
69587@@ -5147,7 +5147,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
6892158b
MT
69588 * run_rebalance_domains is triggered when needed from the scheduler tick.
69589 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
df50ba0c
MT
69590 */
69591-static void run_rebalance_domains(struct softirq_action *h)
69592+static void run_rebalance_domains(void)
69593 {
69594 int this_cpu = smp_processor_id();
69595 struct rq *this_rq = cpu_rq(this_cpu);
fe2de317 69596diff --git a/kernel/signal.c b/kernel/signal.c
5e856224 69597index b09cf3b..b291c66 100644
fe2de317
MT
69598--- a/kernel/signal.c
69599+++ b/kernel/signal.c
5e856224 69600@@ -46,12 +46,12 @@ static struct kmem_cache *sigqueue_cachep;
df50ba0c
MT
69601
69602 int print_fatal_signals __read_mostly;
69603
69604-static void __user *sig_handler(struct task_struct *t, int sig)
69605+static __sighandler_t sig_handler(struct task_struct *t, int sig)
69606 {
69607 return t->sighand->action[sig - 1].sa.sa_handler;
69608 }
69609
69610-static int sig_handler_ignored(void __user *handler, int sig)
69611+static int sig_handler_ignored(__sighandler_t handler, int sig)
69612 {
69613 /* Is it explicitly or implicitly ignored? */
69614 return handler == SIG_IGN ||
5e856224 69615@@ -61,7 +61,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
df50ba0c
MT
69616 static int sig_task_ignored(struct task_struct *t, int sig,
69617 int from_ancestor_ns)
69618 {
69619- void __user *handler;
69620+ __sighandler_t handler;
69621
69622 handler = sig_handler(t, sig);
69623
5e856224 69624@@ -365,6 +365,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
58c5fc13 69625 atomic_inc(&user->sigpending);
ae4e228f
MT
69626 rcu_read_unlock();
69627
58c5fc13
MT
69628+ if (!override_rlimit)
69629+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
ae4e228f 69630+
58c5fc13
MT
69631 if (override_rlimit ||
69632 atomic_read(&user->sigpending) <=
df50ba0c 69633 task_rlimit(t, RLIMIT_SIGPENDING)) {
5e856224 69634@@ -489,7 +492,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
df50ba0c
MT
69635
69636 int unhandled_signal(struct task_struct *tsk, int sig)
69637 {
69638- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
69639+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
69640 if (is_global_init(tsk))
69641 return 1;
69642 if (handler != SIG_IGN && handler != SIG_DFL)
5e856224 69643@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
58c5fc13
MT
69644 }
69645 }
69646
15a11c5b
MT
69647+ /* allow glibc communication via tgkill to other threads in our
69648+ thread group */
69649+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
69650+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
69651+ && gr_handle_signal(t, sig))
58c5fc13
MT
69652+ return -EPERM;
69653+
69654 return security_task_kill(t, info, sig, 0);
69655 }
69656
5e856224 69657@@ -1197,7 +1207,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
58c5fc13
MT
69658 return send_signal(sig, info, p, 1);
69659 }
69660
69661-static int
69662+int
69663 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
69664 {
69665 return send_signal(sig, info, t, 0);
5e856224 69666@@ -1234,6 +1244,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
c52201e0
MT
69667 unsigned long int flags;
69668 int ret, blocked, ignored;
69669 struct k_sigaction *action;
69670+ int is_unhandled = 0;
69671
69672 spin_lock_irqsave(&t->sighand->siglock, flags);
69673 action = &t->sighand->action[sig-1];
5e856224 69674@@ -1248,9 +1259,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
c52201e0
MT
69675 }
69676 if (action->sa.sa_handler == SIG_DFL)
69677 t->signal->flags &= ~SIGNAL_UNKILLABLE;
69678+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
69679+ is_unhandled = 1;
58c5fc13
MT
69680 ret = specific_send_sig_info(sig, info, t);
69681 spin_unlock_irqrestore(&t->sighand->siglock, flags);
69682
c52201e0
MT
69683+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
69684+ normal operation */
69685+ if (is_unhandled) {
69686+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
69687+ gr_handle_crash(t, sig);
69688+ }
58c5fc13
MT
69689+
69690 return ret;
69691 }
69692
5e856224 69693@@ -1317,8 +1337,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
57199397
MT
69694 ret = check_kill_permission(sig, info, p);
69695 rcu_read_unlock();
ae4e228f
MT
69696
69697- if (!ret && sig)
69698+ if (!ret && sig) {
69699 ret = do_send_sig_info(sig, info, p, true);
58c5fc13
MT
69700+ if (!ret)
69701+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
ae4e228f 69702+ }
58c5fc13
MT
69703
69704 return ret;
ae4e228f 69705 }
5e856224 69706@@ -2829,7 +2852,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
15a11c5b
MT
69707 int error = -ESRCH;
69708
69709 rcu_read_lock();
69710- p = find_task_by_vpid(pid);
69711+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
69712+ /* allow glibc communication via tgkill to other threads in our
69713+ thread group */
69714+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
69715+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
69716+ p = find_task_by_vpid_unrestricted(pid);
69717+ else
69718+#endif
69719+ p = find_task_by_vpid(pid);
69720 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
69721 error = check_kill_permission(sig, info, p);
69722 /*
fe2de317 69723diff --git a/kernel/smp.c b/kernel/smp.c
4c928ab7 69724index db197d6..17aef0b 100644
fe2de317
MT
69725--- a/kernel/smp.c
69726+++ b/kernel/smp.c
69727@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
ae4e228f
MT
69728 }
69729 EXPORT_SYMBOL(smp_call_function);
69730
69731-void ipi_call_lock(void)
69732+void ipi_call_lock(void) __acquires(call_function.lock)
69733 {
69734 raw_spin_lock(&call_function.lock);
69735 }
69736
69737-void ipi_call_unlock(void)
69738+void ipi_call_unlock(void) __releases(call_function.lock)
69739 {
69740 raw_spin_unlock(&call_function.lock);
69741 }
69742
69743-void ipi_call_lock_irq(void)
69744+void ipi_call_lock_irq(void) __acquires(call_function.lock)
69745 {
69746 raw_spin_lock_irq(&call_function.lock);
69747 }
69748
69749-void ipi_call_unlock_irq(void)
69750+void ipi_call_unlock_irq(void) __releases(call_function.lock)
69751 {
69752 raw_spin_unlock_irq(&call_function.lock);
69753 }
fe2de317 69754diff --git a/kernel/softirq.c b/kernel/softirq.c
5e856224 69755index 4eb3a0f..6f1fa81 100644
fe2de317
MT
69756--- a/kernel/softirq.c
69757+++ b/kernel/softirq.c
69758@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
ae4e228f 69759
66a7e928 69760 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
ae4e228f
MT
69761
69762-char *softirq_to_name[NR_SOFTIRQS] = {
69763+const char * const softirq_to_name[NR_SOFTIRQS] = {
69764 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
15a11c5b 69765 "TASKLET", "SCHED", "HRTIMER", "RCU"
ae4e228f 69766 };
bc901d79
MT
69767@@ -235,7 +235,7 @@ restart:
69768 kstat_incr_softirqs_this_cpu(vec_nr);
ae4e228f 69769
bc901d79 69770 trace_softirq_entry(vec_nr);
ae4e228f
MT
69771- h->action(h);
69772+ h->action();
bc901d79 69773 trace_softirq_exit(vec_nr);
ae4e228f 69774 if (unlikely(prev_count != preempt_count())) {
bc901d79 69775 printk(KERN_ERR "huh, entered softirq %u %s %p"
15a11c5b 69776@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
ae4e228f
MT
69777 local_irq_restore(flags);
69778 }
69779
69780-void open_softirq(int nr, void (*action)(struct softirq_action *))
69781+void open_softirq(int nr, void (*action)(void))
69782 {
15a11c5b
MT
69783- softirq_vec[nr].action = action;
69784+ pax_open_kernel();
69785+ *(void **)&softirq_vec[nr].action = action;
69786+ pax_close_kernel();
ae4e228f 69787 }
15a11c5b
MT
69788
69789 /*
fe2de317 69790@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
ae4e228f
MT
69791
69792 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
69793
69794-static void tasklet_action(struct softirq_action *a)
69795+static void tasklet_action(void)
69796 {
69797 struct tasklet_struct *list;
69798
fe2de317 69799@@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
ae4e228f
MT
69800 }
69801 }
69802
69803-static void tasklet_hi_action(struct softirq_action *a)
69804+static void tasklet_hi_action(void)
69805 {
69806 struct tasklet_struct *list;
69807
fe2de317 69808diff --git a/kernel/sys.c b/kernel/sys.c
5e856224 69809index 888d227..f04b318 100644
fe2de317
MT
69810--- a/kernel/sys.c
69811+++ b/kernel/sys.c
4c928ab7 69812@@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
58c5fc13
MT
69813 error = -EACCES;
69814 goto out;
69815 }
69816+
69817+ if (gr_handle_chroot_setpriority(p, niceval)) {
69818+ error = -EACCES;
69819+ goto out;
69820+ }
69821+
69822 no_nice = security_task_setnice(p, niceval);
69823 if (no_nice) {
69824 error = no_nice;
4c928ab7 69825@@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
58c5fc13
MT
69826 goto error;
69827 }
69828
69829+ if (gr_check_group_change(new->gid, new->egid, -1))
69830+ goto error;
69831+
69832 if (rgid != (gid_t) -1 ||
69833 (egid != (gid_t) -1 && egid != old->gid))
69834 new->sgid = new->egid;
4c928ab7 69835@@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
57199397 69836 old = current_cred();
58c5fc13
MT
69837
69838 retval = -EPERM;
69839+
69840+ if (gr_check_group_change(gid, gid, gid))
69841+ goto error;
69842+
66a7e928 69843 if (nsown_capable(CAP_SETGID))
58c5fc13
MT
69844 new->gid = new->egid = new->sgid = new->fsgid = gid;
69845 else if (gid == old->gid || gid == old->sgid)
4c928ab7
MT
69846@@ -618,7 +631,7 @@ error:
69847 /*
69848 * change the user struct in a credentials set to match the new UID
69849 */
69850-static int set_user(struct cred *new)
69851+int set_user(struct cred *new)
69852 {
69853 struct user_struct *new_user;
69854
69855@@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
58c5fc13
MT
69856 goto error;
69857 }
69858
69859+ if (gr_check_user_change(new->uid, new->euid, -1))
69860+ goto error;
69861+
69862 if (new->uid != old->uid) {
69863 retval = set_user(new);
69864 if (retval < 0)
4c928ab7 69865@@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
57199397 69866 old = current_cred();
58c5fc13
MT
69867
69868 retval = -EPERM;
69869+
69870+ if (gr_check_crash_uid(uid))
69871+ goto error;
69872+ if (gr_check_user_change(uid, uid, uid))
69873+ goto error;
69874+
66a7e928 69875 if (nsown_capable(CAP_SETUID)) {
58c5fc13
MT
69876 new->suid = new->uid = uid;
69877 if (uid != old->uid) {
4c928ab7 69878@@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
58c5fc13
MT
69879 goto error;
69880 }
69881
69882+ if (gr_check_user_change(ruid, euid, -1))
69883+ goto error;
69884+
69885 if (ruid != (uid_t) -1) {
69886 new->uid = ruid;
69887 if (ruid != old->uid) {
4c928ab7 69888@@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
58c5fc13
MT
69889 goto error;
69890 }
69891
69892+ if (gr_check_group_change(rgid, egid, -1))
69893+ goto error;
69894+
69895 if (rgid != (gid_t) -1)
69896 new->gid = rgid;
69897 if (egid != (gid_t) -1)
4c928ab7 69898@@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
57199397
MT
69899 old = current_cred();
69900 old_fsuid = old->fsuid;
58c5fc13
MT
69901
69902+ if (gr_check_user_change(-1, -1, uid))
69903+ goto error;
69904+
69905 if (uid == old->uid || uid == old->euid ||
69906 uid == old->suid || uid == old->fsuid ||
66a7e928 69907 nsown_capable(CAP_SETUID)) {
4c928ab7 69908@@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
57199397
MT
69909 }
69910 }
69911
69912+error:
69913 abort_creds(new);
69914 return old_fsuid;
69915
4c928ab7 69916@@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
58c5fc13
MT
69917 if (gid == old->gid || gid == old->egid ||
69918 gid == old->sgid || gid == old->fsgid ||
66a7e928 69919 nsown_capable(CAP_SETGID)) {
58c5fc13
MT
69920+ if (gr_check_group_change(-1, -1, gid))
69921+ goto error;
69922+
69923 if (gid != old_fsgid) {
69924 new->fsgid = gid;
69925 goto change_okay;
57199397
MT
69926 }
69927 }
69928
69929+error:
69930 abort_creds(new);
69931 return old_fsgid;
69932
4c928ab7 69933@@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
fe2de317
MT
69934 }
69935 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
69936 snprintf(buf, len, "2.6.%u%s", v, rest);
69937- ret = copy_to_user(release, buf, len);
69938+ if (len > sizeof(buf))
69939+ ret = -EFAULT;
69940+ else
69941+ ret = copy_to_user(release, buf, len);
69942 }
69943 return ret;
69944 }
4c928ab7 69945@@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
6e9df6a3
MT
69946 return -EFAULT;
69947
69948 down_read(&uts_sem);
69949- error = __copy_to_user(&name->sysname, &utsname()->sysname,
69950+ error = __copy_to_user(name->sysname, &utsname()->sysname,
69951 __OLD_UTS_LEN);
69952 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
69953- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
69954+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
69955 __OLD_UTS_LEN);
69956 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
69957- error |= __copy_to_user(&name->release, &utsname()->release,
69958+ error |= __copy_to_user(name->release, &utsname()->release,
69959 __OLD_UTS_LEN);
69960 error |= __put_user(0, name->release + __OLD_UTS_LEN);
69961- error |= __copy_to_user(&name->version, &utsname()->version,
69962+ error |= __copy_to_user(name->version, &utsname()->version,
69963 __OLD_UTS_LEN);
69964 error |= __put_user(0, name->version + __OLD_UTS_LEN);
69965- error |= __copy_to_user(&name->machine, &utsname()->machine,
69966+ error |= __copy_to_user(name->machine, &utsname()->machine,
69967 __OLD_UTS_LEN);
69968 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
69969 up_read(&uts_sem);
5e856224 69970@@ -1838,7 +1877,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
58c5fc13
MT
69971 error = get_dumpable(me->mm);
69972 break;
69973 case PR_SET_DUMPABLE:
69974- if (arg2 < 0 || arg2 > 1) {
69975+ if (arg2 > 1) {
69976 error = -EINVAL;
69977 break;
69978 }
fe2de317 69979diff --git a/kernel/sysctl.c b/kernel/sysctl.c
5e856224 69980index f03a6ef..5fcc8af 100644
fe2de317
MT
69981--- a/kernel/sysctl.c
69982+++ b/kernel/sysctl.c
4c928ab7 69983@@ -86,6 +86,13 @@
ae4e228f 69984
58c5fc13
MT
69985
69986 #if defined(CONFIG_SYSCTL)
69987+#include <linux/grsecurity.h>
69988+#include <linux/grinternal.h>
69989+
69990+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
69991+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
69992+ const int op);
69993+extern int gr_handle_chroot_sysctl(const int op);
69994
69995 /* External variables not in a header file. */
df50ba0c 69996 extern int sysctl_overcommit_memory;
4c928ab7 69997@@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
57199397
MT
69998 }
69999
58c5fc13 70000 #endif
57199397 70001+extern struct ctl_table grsecurity_table[];
58c5fc13
MT
70002
70003 static struct ctl_table root_table[];
70004 static struct ctl_table_root sysctl_table_root;
4c928ab7 70005@@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
58c5fc13
MT
70006 int sysctl_legacy_va_layout;
70007 #endif
70008
70009+#ifdef CONFIG_PAX_SOFTMODE
70010+static ctl_table pax_table[] = {
70011+ {
58c5fc13
MT
70012+ .procname = "softmode",
70013+ .data = &pax_softmode,
70014+ .maxlen = sizeof(unsigned int),
70015+ .mode = 0600,
70016+ .proc_handler = &proc_dointvec,
70017+ },
70018+
ae4e228f 70019+ { }
58c5fc13
MT
70020+};
70021+#endif
70022+
df50ba0c 70023 /* The default sysctl tables: */
58c5fc13 70024
df50ba0c 70025 static struct ctl_table root_table[] = {
4c928ab7 70026@@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
58c5fc13
MT
70027 #endif
70028
70029 static struct ctl_table kern_table[] = {
ae4e228f 70030+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
58c5fc13 70031+ {
58c5fc13
MT
70032+ .procname = "grsecurity",
70033+ .mode = 0500,
70034+ .child = grsecurity_table,
70035+ },
70036+#endif
70037+
70038+#ifdef CONFIG_PAX_SOFTMODE
70039+ {
58c5fc13
MT
70040+ .procname = "pax",
70041+ .mode = 0500,
70042+ .child = pax_table,
70043+ },
70044+#endif
70045+
58c5fc13 70046 {
ae4e228f
MT
70047 .procname = "sched_child_runs_first",
70048 .data = &sysctl_sched_child_runs_first,
4c928ab7 70049@@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
bc901d79
MT
70050 .data = &modprobe_path,
70051 .maxlen = KMOD_PATH_LEN,
70052 .mode = 0644,
70053- .proc_handler = proc_dostring,
70054+ .proc_handler = proc_dostring_modpriv,
70055 },
70056 {
70057 .procname = "modules_disabled",
4c928ab7 70058@@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
16454cff
MT
70059 .extra1 = &zero,
70060 .extra2 = &one,
70061 },
70062+#endif
70063 {
70064 .procname = "kptr_restrict",
70065 .data = &kptr_restrict,
70066 .maxlen = sizeof(int),
70067 .mode = 0644,
5e856224 70068 .proc_handler = proc_dointvec_minmax_sysadmin,
16454cff
MT
70069+#ifdef CONFIG_GRKERNSEC_HIDESYM
70070+ .extra1 = &two,
70071+#else
70072 .extra1 = &zero,
70073+#endif
70074 .extra2 = &two,
70075 },
70076-#endif
70077 {
70078 .procname = "ngroups_max",
70079 .data = &ngroups_max,
5e856224 70080@@ -1225,6 +1267,13 @@ static struct ctl_table vm_table[] = {
57199397
MT
70081 .proc_handler = proc_dointvec_minmax,
70082 .extra1 = &zero,
70083 },
70084+ {
70085+ .procname = "heap_stack_gap",
70086+ .data = &sysctl_heap_stack_gap,
70087+ .maxlen = sizeof(sysctl_heap_stack_gap),
70088+ .mode = 0644,
70089+ .proc_handler = proc_doulongvec_minmax,
70090+ },
70091 #else
70092 {
70093 .procname = "nr_trim_pages",
5e856224 70094@@ -1729,6 +1778,17 @@ static int test_perm(int mode, int op)
66a7e928
MT
70095 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
70096 {
58c5fc13 70097 int mode;
66a7e928
MT
70098+ int error;
70099+
58c5fc13
MT
70100+ if (table->parent != NULL && table->parent->procname != NULL &&
70101+ table->procname != NULL &&
70102+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
70103+ return -EACCES;
70104+ if (gr_handle_chroot_sysctl(op))
70105+ return -EACCES;
70106+ error = gr_handle_sysctl(table, op);
70107+ if (error)
70108+ return error;
66a7e928
MT
70109
70110 if (root->permissions)
70111 mode = root->permissions(root, current->nsproxy, table);
5e856224 70112@@ -2133,6 +2193,16 @@ int proc_dostring(struct ctl_table *table, int write,
bc901d79
MT
70113 buffer, lenp, ppos);
70114 }
70115
70116+int proc_dostring_modpriv(struct ctl_table *table, int write,
70117+ void __user *buffer, size_t *lenp, loff_t *ppos)
70118+{
70119+ if (write && !capable(CAP_SYS_MODULE))
70120+ return -EPERM;
70121+
70122+ return _proc_do_string(table->data, table->maxlen, write,
70123+ buffer, lenp, ppos);
70124+}
70125+
70126 static size_t proc_skip_spaces(char **buf)
70127 {
70128 size_t ret;
5e856224 70129@@ -2238,6 +2308,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
57199397
MT
70130 len = strlen(tmp);
70131 if (len > *size)
70132 len = *size;
70133+ if (len > sizeof(tmp))
70134+ len = sizeof(tmp);
70135 if (copy_to_user(*buf, tmp, len))
70136 return -EFAULT;
70137 *size -= len;
5e856224 70138@@ -2554,8 +2626,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
6892158b
MT
70139 *i = val;
70140 } else {
70141 val = convdiv * (*i) / convmul;
70142- if (!first)
70143+ if (!first) {
70144 err = proc_put_char(&buffer, &left, '\t');
70145+ if (err)
70146+ break;
70147+ }
70148 err = proc_put_long(&buffer, &left, val, false);
70149 if (err)
70150 break;
5e856224 70151@@ -2950,6 +3025,12 @@ int proc_dostring(struct ctl_table *table, int write,
bc901d79
MT
70152 return -ENOSYS;
70153 }
70154
70155+int proc_dostring_modpriv(struct ctl_table *table, int write,
70156+ void __user *buffer, size_t *lenp, loff_t *ppos)
70157+{
70158+ return -ENOSYS;
70159+}
70160+
70161 int proc_dointvec(struct ctl_table *table, int write,
70162 void __user *buffer, size_t *lenp, loff_t *ppos)
70163 {
5e856224 70164@@ -3006,6 +3087,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
bc901d79
MT
70165 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
70166 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
70167 EXPORT_SYMBOL(proc_dostring);
70168+EXPORT_SYMBOL(proc_dostring_modpriv);
70169 EXPORT_SYMBOL(proc_doulongvec_minmax);
70170 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
70171 EXPORT_SYMBOL(register_sysctl_table);
fe2de317 70172diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
4c928ab7 70173index a650694..aaeeb20 100644
fe2de317
MT
70174--- a/kernel/sysctl_binary.c
70175+++ b/kernel/sysctl_binary.c
70176@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
70177 int i;
70178
70179 set_fs(KERNEL_DS);
70180- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
70181+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
70182 set_fs(old_fs);
70183 if (result < 0)
70184 goto out_kfree;
70185@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
70186 }
70187
70188 set_fs(KERNEL_DS);
70189- result = vfs_write(file, buffer, str - buffer, &pos);
70190+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
70191 set_fs(old_fs);
70192 if (result < 0)
70193 goto out_kfree;
70194@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
70195 int i;
70196
70197 set_fs(KERNEL_DS);
70198- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
70199+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
70200 set_fs(old_fs);
70201 if (result < 0)
70202 goto out_kfree;
70203@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
70204 }
70205
70206 set_fs(KERNEL_DS);
70207- result = vfs_write(file, buffer, str - buffer, &pos);
70208+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
70209 set_fs(old_fs);
70210 if (result < 0)
70211 goto out_kfree;
70212@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
70213 int i;
70214
70215 set_fs(KERNEL_DS);
70216- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
70217+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
70218 set_fs(old_fs);
70219 if (result < 0)
70220 goto out;
70221@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
70222 __le16 dnaddr;
70223
70224 set_fs(KERNEL_DS);
70225- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
70226+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
70227 set_fs(old_fs);
70228 if (result < 0)
70229 goto out;
70230@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
70231 le16_to_cpu(dnaddr) & 0x3ff);
70232
70233 set_fs(KERNEL_DS);
70234- result = vfs_write(file, buf, len, &pos);
70235+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
70236 set_fs(old_fs);
70237 if (result < 0)
70238 goto out;
70239diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
70240index 362da65..ab8ef8c 100644
70241--- a/kernel/sysctl_check.c
70242+++ b/kernel/sysctl_check.c
70243@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
bc901d79
MT
70244 set_fail(&fail, table, "Directory with extra2");
70245 } else {
70246 if ((table->proc_handler == proc_dostring) ||
70247+ (table->proc_handler == proc_dostring_modpriv) ||
70248 (table->proc_handler == proc_dointvec) ||
70249 (table->proc_handler == proc_dointvec_minmax) ||
70250 (table->proc_handler == proc_dointvec_jiffies) ||
fe2de317
MT
70251diff --git a/kernel/taskstats.c b/kernel/taskstats.c
70252index e660464..c8b9e67 100644
70253--- a/kernel/taskstats.c
70254+++ b/kernel/taskstats.c
df50ba0c 70255@@ -27,9 +27,12 @@
58c5fc13
MT
70256 #include <linux/cgroup.h>
70257 #include <linux/fs.h>
70258 #include <linux/file.h>
70259+#include <linux/grsecurity.h>
70260 #include <net/genetlink.h>
6e9df6a3 70261 #include <linux/atomic.h>
58c5fc13
MT
70262
70263+extern int gr_is_taskstats_denied(int pid);
70264+
70265 /*
70266 * Maximum length of a cpumask that can be specified in
70267 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
6e9df6a3 70268@@ -556,6 +559,9 @@ err:
58c5fc13 70269
bc901d79
MT
70270 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
70271 {
58c5fc13
MT
70272+ if (gr_is_taskstats_denied(current->pid))
70273+ return -EACCES;
70274+
bc901d79
MT
70275 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
70276 return cmd_attr_register_cpumask(info);
70277 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
fe2de317 70278diff --git a/kernel/time.c b/kernel/time.c
4c928ab7 70279index 73e416d..cfc6f69 100644
fe2de317
MT
70280--- a/kernel/time.c
70281+++ b/kernel/time.c
70282@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
70283 return error;
70284
70285 if (tz) {
70286+ /* we log in do_settimeofday called below, so don't log twice
70287+ */
70288+ if (!tv)
70289+ gr_log_timechange();
70290+
70291 /* SMP safe, global irq locking makes it work. */
70292 sys_tz = *tz;
70293 update_vsyscall_tz();
70294diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
4c928ab7 70295index 8a46f5d..bbe6f9c 100644
fe2de317
MT
70296--- a/kernel/time/alarmtimer.c
70297+++ b/kernel/time/alarmtimer.c
4c928ab7
MT
70298@@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
70299 struct platform_device *pdev;
15a11c5b
MT
70300 int error = 0;
70301 int i;
70302- struct k_clock alarm_clock = {
70303+ static struct k_clock alarm_clock = {
70304 .clock_getres = alarm_clock_getres,
70305 .clock_get = alarm_clock_get,
70306 .timer_create = alarm_timer_create,
fe2de317 70307diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
4c928ab7 70308index fd4a7b1..fae5c2a 100644
fe2de317
MT
70309--- a/kernel/time/tick-broadcast.c
70310+++ b/kernel/time/tick-broadcast.c
70311@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
58c5fc13
MT
70312 * then clear the broadcast bit.
70313 */
70314 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
70315- int cpu = smp_processor_id();
70316+ cpu = smp_processor_id();
70317
70318 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
70319 tick_broadcast_clear_oneshot(cpu);
fe2de317 70320diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
5e856224 70321index 0c63581..e25dcb6 100644
fe2de317
MT
70322--- a/kernel/time/timekeeping.c
70323+++ b/kernel/time/timekeeping.c
bc901d79
MT
70324@@ -14,6 +14,7 @@
70325 #include <linux/init.h>
70326 #include <linux/mm.h>
70327 #include <linux/sched.h>
70328+#include <linux/grsecurity.h>
66a7e928 70329 #include <linux/syscore_ops.h>
bc901d79
MT
70330 #include <linux/clocksource.h>
70331 #include <linux/jiffies.h>
fe2de317 70332@@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
bc901d79
MT
70333 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
70334 return -EINVAL;
70335
70336+ gr_log_timechange();
70337+
70338 write_seqlock_irqsave(&xtime_lock, flags);
70339
70340 timekeeping_forward_now();
fe2de317
MT
70341diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
70342index 3258455..f35227d 100644
70343--- a/kernel/time/timer_list.c
70344+++ b/kernel/time/timer_list.c
70345@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
57199397
MT
70346
70347 static void print_name_offset(struct seq_file *m, void *sym)
70348 {
70349+#ifdef CONFIG_GRKERNSEC_HIDESYM
70350+ SEQ_printf(m, "<%p>", NULL);
70351+#else
70352 char symname[KSYM_NAME_LEN];
70353
70354 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
16454cff 70355 SEQ_printf(m, "<%pK>", sym);
57199397
MT
70356 else
70357 SEQ_printf(m, "%s", symname);
70358+#endif
70359 }
70360
70361 static void
70362@@ -112,7 +116,11 @@ next_one:
70363 static void
70364 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
70365 {
70366+#ifdef CONFIG_GRKERNSEC_HIDESYM
70367+ SEQ_printf(m, " .base: %p\n", NULL);
70368+#else
16454cff 70369 SEQ_printf(m, " .base: %pK\n", base);
57199397
MT
70370+#endif
70371 SEQ_printf(m, " .index: %d\n",
70372 base->index);
70373 SEQ_printf(m, " .resolution: %Lu nsecs\n",
fe2de317 70374@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
57199397
MT
70375 {
70376 struct proc_dir_entry *pe;
70377
70378+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70379+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
70380+#else
70381 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
70382+#endif
70383 if (!pe)
70384 return -ENOMEM;
70385 return 0;
fe2de317 70386diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
4c928ab7 70387index 0b537f2..9e71eca 100644
fe2de317
MT
70388--- a/kernel/time/timer_stats.c
70389+++ b/kernel/time/timer_stats.c
8308f9c9
MT
70390@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
70391 static unsigned long nr_entries;
70392 static struct entry entries[MAX_ENTRIES];
70393
70394-static atomic_t overflow_count;
70395+static atomic_unchecked_t overflow_count;
70396
70397 /*
70398 * The entries are in a hash-table, for fast lookup:
70399@@ -140,7 +140,7 @@ static void reset_entries(void)
70400 nr_entries = 0;
70401 memset(entries, 0, sizeof(entries));
70402 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
70403- atomic_set(&overflow_count, 0);
70404+ atomic_set_unchecked(&overflow_count, 0);
70405 }
70406
70407 static struct entry *alloc_entry(void)
fe2de317 70408@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
8308f9c9
MT
70409 if (likely(entry))
70410 entry->count++;
70411 else
70412- atomic_inc(&overflow_count);
70413+ atomic_inc_unchecked(&overflow_count);
70414
70415 out_unlock:
70416 raw_spin_unlock_irqrestore(lock, flags);
fe2de317 70417@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
57199397
MT
70418
70419 static void print_name_offset(struct seq_file *m, unsigned long addr)
70420 {
70421+#ifdef CONFIG_GRKERNSEC_HIDESYM
70422+ seq_printf(m, "<%p>", NULL);
70423+#else
70424 char symname[KSYM_NAME_LEN];
70425
70426 if (lookup_symbol_name(addr, symname) < 0)
70427 seq_printf(m, "<%p>", (void *)addr);
70428 else
70429 seq_printf(m, "%s", symname);
70430+#endif
70431 }
70432
70433 static int tstats_show(struct seq_file *m, void *v)
fe2de317 70434@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
8308f9c9
MT
70435
70436 seq_puts(m, "Timer Stats Version: v0.2\n");
70437 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
70438- if (atomic_read(&overflow_count))
70439+ if (atomic_read_unchecked(&overflow_count))
70440 seq_printf(m, "Overflow: %d entries\n",
70441- atomic_read(&overflow_count));
70442+ atomic_read_unchecked(&overflow_count));
70443
70444 for (i = 0; i < nr_entries; i++) {
70445 entry = entries + i;
fe2de317 70446@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
57199397
MT
70447 {
70448 struct proc_dir_entry *pe;
70449
70450+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70451+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
70452+#else
70453 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
70454+#endif
70455 if (!pe)
70456 return -ENOMEM;
70457 return 0;
fe2de317 70458diff --git a/kernel/timer.c b/kernel/timer.c
5e856224 70459index a297ffc..5e16b0b 100644
fe2de317
MT
70460--- a/kernel/timer.c
70461+++ b/kernel/timer.c
5e856224 70462@@ -1354,7 +1354,7 @@ void update_process_times(int user_tick)
ae4e228f
MT
70463 /*
70464 * This function runs timers and the timer-tq in bottom half context.
70465 */
70466-static void run_timer_softirq(struct softirq_action *h)
70467+static void run_timer_softirq(void)
70468 {
16454cff 70469 struct tvec_base *base = __this_cpu_read(tvec_bases);
58c5fc13 70470
fe2de317 70471diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
5e856224 70472index cdea7b5..9b820d4 100644
fe2de317
MT
70473--- a/kernel/trace/blktrace.c
70474+++ b/kernel/trace/blktrace.c
4c928ab7 70475@@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
8308f9c9
MT
70476 struct blk_trace *bt = filp->private_data;
70477 char buf[16];
70478
70479- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
70480+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
70481
70482 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
70483 }
4c928ab7 70484@@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
8308f9c9
MT
70485 return 1;
70486
70487 bt = buf->chan->private_data;
70488- atomic_inc(&bt->dropped);
70489+ atomic_inc_unchecked(&bt->dropped);
70490 return 0;
70491 }
70492
4c928ab7 70493@@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
8308f9c9
MT
70494
70495 bt->dir = dir;
70496 bt->dev = dev;
70497- atomic_set(&bt->dropped, 0);
70498+ atomic_set_unchecked(&bt->dropped, 0);
70499
70500 ret = -EIO;
70501 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
fe2de317 70502diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
5e856224 70503index 683d559..d70d914 100644
fe2de317
MT
70504--- a/kernel/trace/ftrace.c
70505+++ b/kernel/trace/ftrace.c
5e856224 70506@@ -1726,12 +1726,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
15a11c5b
MT
70507 if (unlikely(ftrace_disabled))
70508 return 0;
ae4e228f
MT
70509
70510+ ret = ftrace_arch_code_modify_prepare();
70511+ FTRACE_WARN_ON(ret);
70512+ if (ret)
70513+ return 0;
70514+
70515 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
70516+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
70517 if (ret) {
70518 ftrace_bug(ret, ip);
ae4e228f
MT
70519- return 0;
70520 }
70521- return 1;
70522+ return ret ? 0 : 1;
58c5fc13
MT
70523 }
70524
ae4e228f 70525 /*
5e856224 70526@@ -2843,7 +2848,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
66a7e928
MT
70527
70528 int
70529 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
70530- void *data)
70531+ void *data)
70532 {
70533 struct ftrace_func_probe *entry;
70534 struct ftrace_page *pg;
fe2de317 70535diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
5e856224 70536index c4579f1..6a439da 100644
fe2de317
MT
70537--- a/kernel/trace/trace.c
70538+++ b/kernel/trace/trace.c
5e856224 70539@@ -4258,10 +4258,9 @@ static const struct file_operations tracing_dyn_info_fops = {
ae4e228f
MT
70540 };
70541 #endif
58c5fc13 70542
ae4e228f
MT
70543-static struct dentry *d_tracer;
70544-
70545 struct dentry *tracing_init_dentry(void)
70546 {
70547+ static struct dentry *d_tracer;
70548 static int once;
70549
70550 if (d_tracer)
5e856224 70551@@ -4281,10 +4280,9 @@ struct dentry *tracing_init_dentry(void)
ae4e228f 70552 return d_tracer;
58c5fc13
MT
70553 }
70554
ae4e228f
MT
70555-static struct dentry *d_percpu;
70556-
70557 struct dentry *tracing_dentry_percpu(void)
70558 {
70559+ static struct dentry *d_percpu;
70560 static int once;
70561 struct dentry *d_tracer;
70562
fe2de317
MT
70563diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
70564index c212a7f..7b02394 100644
70565--- a/kernel/trace/trace_events.c
70566+++ b/kernel/trace/trace_events.c
70567@@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
bc901d79
MT
70568 struct ftrace_module_file_ops {
70569 struct list_head list;
70570 struct module *mod;
16454cff
MT
70571- struct file_operations id;
70572- struct file_operations enable;
70573- struct file_operations format;
70574- struct file_operations filter;
16454cff
MT
70575 };
70576
70577 static struct ftrace_module_file_ops *
fe2de317 70578@@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
15a11c5b
MT
70579
70580 file_ops->mod = mod;
66a7e928 70581
15a11c5b
MT
70582- file_ops->id = ftrace_event_id_fops;
70583- file_ops->id.owner = mod;
70584-
70585- file_ops->enable = ftrace_enable_fops;
70586- file_ops->enable.owner = mod;
70587-
70588- file_ops->filter = ftrace_event_filter_fops;
70589- file_ops->filter.owner = mod;
70590-
70591- file_ops->format = ftrace_event_format_fops;
70592- file_ops->format.owner = mod;
70593+ pax_open_kernel();
70594+ *(void **)&mod->trace_id.owner = mod;
70595+ *(void **)&mod->trace_enable.owner = mod;
70596+ *(void **)&mod->trace_filter.owner = mod;
70597+ *(void **)&mod->trace_format.owner = mod;
70598+ pax_close_kernel();
70599
70600 list_add(&file_ops->list, &ftrace_module_file_list);
70601
fe2de317 70602@@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
15a11c5b
MT
70603
70604 for_each_event(call, start, end) {
70605 __trace_add_event_call(*call, mod,
70606- &file_ops->id, &file_ops->enable,
70607- &file_ops->filter, &file_ops->format);
70608+ &mod->trace_id, &mod->trace_enable,
70609+ &mod->trace_filter, &mod->trace_format);
70610 }
70611 }
70612
fe2de317
MT
70613diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
70614index 00d527c..7c5b1a3 100644
70615--- a/kernel/trace/trace_kprobe.c
70616+++ b/kernel/trace/trace_kprobe.c
70617@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
6e9df6a3
MT
70618 long ret;
70619 int maxlen = get_rloc_len(*(u32 *)dest);
70620 u8 *dst = get_rloc_data(dest);
70621- u8 *src = addr;
70622+ const u8 __user *src = (const u8 __force_user *)addr;
70623 mm_segment_t old_fs = get_fs();
70624 if (!maxlen)
70625 return;
fe2de317 70626@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
6e9df6a3
MT
70627 pagefault_disable();
70628 do
70629 ret = __copy_from_user_inatomic(dst++, src++, 1);
70630- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
70631+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
70632 dst[-1] = '\0';
70633 pagefault_enable();
70634 set_fs(old_fs);
fe2de317 70635@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
6e9df6a3
MT
70636 ((u8 *)get_rloc_data(dest))[0] = '\0';
70637 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
70638 } else
70639- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
70640+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
70641 get_rloc_offs(*(u32 *)dest));
70642 }
70643 /* Return the length of string -- including null terminal byte */
fe2de317 70644@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
6e9df6a3
MT
70645 set_fs(KERNEL_DS);
70646 pagefault_disable();
70647 do {
70648- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
70649+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
70650 len++;
70651 } while (c && ret == 0 && len < MAX_STRING_SIZE);
70652 pagefault_enable();
fe2de317
MT
70653diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
70654index fd3c8aa..5f324a6 100644
70655--- a/kernel/trace/trace_mmiotrace.c
70656+++ b/kernel/trace/trace_mmiotrace.c
8308f9c9
MT
70657@@ -24,7 +24,7 @@ struct header_iter {
70658 static struct trace_array *mmio_trace_array;
70659 static bool overrun_detected;
70660 static unsigned long prev_overruns;
70661-static atomic_t dropped_count;
70662+static atomic_unchecked_t dropped_count;
70663
70664 static void mmio_reset_data(struct trace_array *tr)
70665 {
fe2de317 70666@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
8308f9c9
MT
70667
70668 static unsigned long count_overruns(struct trace_iterator *iter)
70669 {
70670- unsigned long cnt = atomic_xchg(&dropped_count, 0);
70671+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
70672 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
70673
70674 if (over > prev_overruns)
fe2de317 70675@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
8308f9c9
MT
70676 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
70677 sizeof(*entry), 0, pc);
70678 if (!event) {
70679- atomic_inc(&dropped_count);
70680+ atomic_inc_unchecked(&dropped_count);
70681 return;
70682 }
70683 entry = ring_buffer_event_data(event);
fe2de317 70684@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
8308f9c9
MT
70685 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
70686 sizeof(*entry), 0, pc);
70687 if (!event) {
70688- atomic_inc(&dropped_count);
70689+ atomic_inc_unchecked(&dropped_count);
70690 return;
70691 }
70692 entry = ring_buffer_event_data(event);
fe2de317 70693diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
5e856224 70694index d9c07f0..c1eeceb 100644
fe2de317
MT
70695--- a/kernel/trace/trace_output.c
70696+++ b/kernel/trace/trace_output.c
70697@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
ae4e228f 70698
58c5fc13
MT
70699 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
70700 if (!IS_ERR(p)) {
70701- p = mangle_path(s->buffer + s->len, p, "\n");
70702+ p = mangle_path(s->buffer + s->len, p, "\n\\");
70703 if (p) {
70704 s->len = p - s->buffer;
70705 return 1;
fe2de317 70706diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
5e856224 70707index d4545f4..a9010a1 100644
fe2de317
MT
70708--- a/kernel/trace/trace_stack.c
70709+++ b/kernel/trace/trace_stack.c
5e856224 70710@@ -53,7 +53,7 @@ static inline void check_stack(void)
ae4e228f 70711 return;
58c5fc13 70712
ae4e228f
MT
70713 /* we do not handle interrupt stacks yet */
70714- if (!object_is_on_stack(&this_size))
70715+ if (!object_starts_on_stack(&this_size))
70716 return;
70717
70718 local_irq_save(flags);
fe2de317
MT
70719diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
70720index 209b379..7f76423 100644
70721--- a/kernel/trace/trace_workqueue.c
70722+++ b/kernel/trace/trace_workqueue.c
71d190be
MT
70723@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
70724 int cpu;
70725 pid_t pid;
70726 /* Can be inserted from interrupt or user context, need to be atomic */
70727- atomic_t inserted;
70728+ atomic_unchecked_t inserted;
70729 /*
70730 * Don't need to be atomic, works are serialized in a single workqueue thread
70731 * on a single CPU.
70732@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
70733 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
70734 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
70735 if (node->pid == wq_thread->pid) {
70736- atomic_inc(&node->inserted);
70737+ atomic_inc_unchecked(&node->inserted);
70738 goto found;
70739 }
70740 }
fe2de317 70741@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
71d190be
MT
70742 tsk = get_pid_task(pid, PIDTYPE_PID);
70743 if (tsk) {
70744 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
70745- atomic_read(&cws->inserted), cws->executed,
70746+ atomic_read_unchecked(&cws->inserted), cws->executed,
70747 tsk->comm);
70748 put_task_struct(tsk);
70749 }
fe2de317 70750diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
5e856224 70751index 8745ac7..d144e37 100644
fe2de317
MT
70752--- a/lib/Kconfig.debug
70753+++ b/lib/Kconfig.debug
4c928ab7 70754@@ -1103,6 +1103,7 @@ config LATENCYTOP
fe2de317
MT
70755 depends on DEBUG_KERNEL
70756 depends on STACKTRACE_SUPPORT
70757 depends on PROC_FS
70758+ depends on !GRKERNSEC_HIDESYM
4c928ab7 70759 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
fe2de317
MT
70760 select KALLSYMS
70761 select KALLSYMS_ALL
70762diff --git a/lib/bitmap.c b/lib/bitmap.c
4c928ab7 70763index 0d4a127..33a06c7 100644
fe2de317
MT
70764--- a/lib/bitmap.c
70765+++ b/lib/bitmap.c
70766@@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
6e9df6a3
MT
70767 {
70768 int c, old_c, totaldigits, ndigits, nchunks, nbits;
70769 u32 chunk;
4c928ab7 70770- const char __user __force *ubuf = (const char __user __force *)buf;
6e9df6a3
MT
70771+ const char __user *ubuf = (const char __force_user *)buf;
70772
70773 bitmap_zero(maskp, nmaskbits);
70774
fe2de317 70775@@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
6e9df6a3
MT
70776 {
70777 if (!access_ok(VERIFY_READ, ubuf, ulen))
70778 return -EFAULT;
4c928ab7
MT
70779- return __bitmap_parse((const char __force *)ubuf,
70780+ return __bitmap_parse((const char __force_kernel *)ubuf,
70781 ulen, 1, maskp, nmaskbits);
6e9df6a3 70782
4c928ab7
MT
70783 }
70784@@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
6e9df6a3
MT
70785 {
70786 unsigned a, b;
70787 int c, old_c, totaldigits;
4c928ab7 70788- const char __user __force *ubuf = (const char __user __force *)buf;
6e9df6a3
MT
70789+ const char __user *ubuf = (const char __force_user *)buf;
70790 int exp_digit, in_range;
70791
70792 totaldigits = c = 0;
4c928ab7 70793@@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
6e9df6a3
MT
70794 {
70795 if (!access_ok(VERIFY_READ, ubuf, ulen))
70796 return -EFAULT;
4c928ab7 70797- return __bitmap_parselist((const char __force *)ubuf,
6e9df6a3
MT
70798+ return __bitmap_parselist((const char __force_kernel *)ubuf,
70799 ulen, 1, maskp, nmaskbits);
70800 }
70801 EXPORT_SYMBOL(bitmap_parselist_user);
fe2de317 70802diff --git a/lib/bug.c b/lib/bug.c
5e856224 70803index a28c141..2bd3d95 100644
fe2de317
MT
70804--- a/lib/bug.c
70805+++ b/lib/bug.c
70806@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
ae4e228f
MT
70807 return BUG_TRAP_TYPE_NONE;
70808
70809 bug = find_bug(bugaddr);
70810+ if (!bug)
70811+ return BUG_TRAP_TYPE_NONE;
70812
6892158b
MT
70813 file = NULL;
70814 line = 0;
fe2de317 70815diff --git a/lib/debugobjects.c b/lib/debugobjects.c
5e856224 70816index 0ab9ae8..f01ceca 100644
fe2de317
MT
70817--- a/lib/debugobjects.c
70818+++ b/lib/debugobjects.c
5e856224 70819@@ -288,7 +288,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
ae4e228f
MT
70820 if (limit > 4)
70821 return;
70822
70823- is_on_stack = object_is_on_stack(addr);
70824+ is_on_stack = object_starts_on_stack(addr);
70825 if (is_on_stack == onstack)
70826 return;
70827
fe2de317 70828diff --git a/lib/devres.c b/lib/devres.c
5e856224 70829index 9676617..5149e15 100644
fe2de317
MT
70830--- a/lib/devres.c
70831+++ b/lib/devres.c
6e9df6a3
MT
70832@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
70833 void devm_iounmap(struct device *dev, void __iomem *addr)
70834 {
70835 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
70836- (void *)addr));
70837+ (void __force *)addr));
70838 iounmap(addr);
70839 }
70840 EXPORT_SYMBOL(devm_iounmap);
5e856224 70841@@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
6e9df6a3
MT
70842 {
70843 ioport_unmap(addr);
70844 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
70845- devm_ioport_map_match, (void *)addr));
70846+ devm_ioport_map_match, (void __force *)addr));
70847 }
70848 EXPORT_SYMBOL(devm_ioport_unmap);
70849
fe2de317 70850diff --git a/lib/dma-debug.c b/lib/dma-debug.c
4c928ab7 70851index fea790a..ebb0e82 100644
fe2de317
MT
70852--- a/lib/dma-debug.c
70853+++ b/lib/dma-debug.c
4c928ab7 70854@@ -925,7 +925,7 @@ out:
58c5fc13 70855
ae4e228f
MT
70856 static void check_for_stack(struct device *dev, void *addr)
70857 {
70858- if (object_is_on_stack(addr))
70859+ if (object_starts_on_stack(addr))
70860 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
70861 "stack [addr=%p]\n", addr);
70862 }
fe2de317
MT
70863diff --git a/lib/extable.c b/lib/extable.c
70864index 4cac81e..63e9b8f 100644
70865--- a/lib/extable.c
70866+++ b/lib/extable.c
15a11c5b
MT
70867@@ -13,6 +13,7 @@
70868 #include <linux/init.h>
70869 #include <linux/sort.h>
70870 #include <asm/uaccess.h>
70871+#include <asm/pgtable.h>
70872
70873 #ifndef ARCH_HAS_SORT_EXTABLE
70874 /*
fe2de317 70875@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
15a11c5b
MT
70876 void sort_extable(struct exception_table_entry *start,
70877 struct exception_table_entry *finish)
70878 {
70879+ pax_open_kernel();
70880 sort(start, finish - start, sizeof(struct exception_table_entry),
70881 cmp_ex, NULL);
70882+ pax_close_kernel();
70883 }
70884
70885 #ifdef CONFIG_MODULES
fe2de317
MT
70886diff --git a/lib/inflate.c b/lib/inflate.c
70887index 013a761..c28f3fc 100644
70888--- a/lib/inflate.c
70889+++ b/lib/inflate.c
6892158b 70890@@ -269,7 +269,7 @@ static void free(void *where)
58c5fc13
MT
70891 malloc_ptr = free_mem_ptr;
70892 }
70893 #else
70894-#define malloc(a) kmalloc(a, GFP_KERNEL)
70895+#define malloc(a) kmalloc((a), GFP_KERNEL)
70896 #define free(a) kfree(a)
70897 #endif
70898
5e856224
MT
70899diff --git a/lib/ioremap.c b/lib/ioremap.c
70900index da4e2ad..6373b5f 100644
70901--- a/lib/ioremap.c
70902+++ b/lib/ioremap.c
70903@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
70904 unsigned long next;
70905
70906 phys_addr -= addr;
70907- pmd = pmd_alloc(&init_mm, pud, addr);
70908+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
70909 if (!pmd)
70910 return -ENOMEM;
70911 do {
70912@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
70913 unsigned long next;
70914
70915 phys_addr -= addr;
70916- pud = pud_alloc(&init_mm, pgd, addr);
70917+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
70918 if (!pud)
70919 return -ENOMEM;
70920 do {
4c928ab7
MT
70921diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
70922index bd2bea9..6b3c95e 100644
70923--- a/lib/is_single_threaded.c
70924+++ b/lib/is_single_threaded.c
70925@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
70926 struct task_struct *p, *t;
70927 bool ret;
70928
70929+ if (!mm)
70930+ return true;
70931+
70932 if (atomic_read(&task->signal->live) != 1)
70933 return false;
70934
fe2de317 70935diff --git a/lib/radix-tree.c b/lib/radix-tree.c
5e856224 70936index dc63d08..95ae14a 100644
fe2de317
MT
70937--- a/lib/radix-tree.c
70938+++ b/lib/radix-tree.c
5e856224 70939@@ -78,7 +78,7 @@ struct radix_tree_preload {
58c5fc13
MT
70940 int nr;
70941 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
70942 };
70943-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
70944+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
70945
bc901d79 70946 static inline void *ptr_to_indirect(void *ptr)
58c5fc13 70947 {
fe2de317 70948diff --git a/lib/vsprintf.c b/lib/vsprintf.c
5e856224 70949index 38e612e..4fb99a8 100644
fe2de317
MT
70950--- a/lib/vsprintf.c
70951+++ b/lib/vsprintf.c
bc901d79
MT
70952@@ -16,6 +16,9 @@
70953 * - scnprintf and vscnprintf
70954 */
70955
70956+#ifdef CONFIG_GRKERNSEC_HIDESYM
70957+#define __INCLUDED_BY_HIDESYM 1
70958+#endif
70959 #include <stdarg.h>
70960 #include <linux/module.h>
70961 #include <linux/types.h>
4c928ab7 70962@@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
bc901d79 70963 char sym[KSYM_SYMBOL_LEN];
66a7e928
MT
70964 if (ext == 'B')
70965 sprint_backtrace(sym, value);
70966- else if (ext != 'f' && ext != 's')
70967+ else if (ext != 'f' && ext != 's' && ext != 'a')
bc901d79
MT
70968 sprint_symbol(sym, value);
70969 else
70970 kallsyms_lookup(value, NULL, NULL, NULL, sym);
5e856224
MT
70971@@ -789,7 +792,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
70972 return number(buf, end, *(const netdev_features_t *)addr, spec);
16454cff
MT
70973 }
70974
70975+#ifdef CONFIG_GRKERNSEC_HIDESYM
66a7e928 70976+int kptr_restrict __read_mostly = 2;
16454cff 70977+#else
66a7e928 70978 int kptr_restrict __read_mostly;
16454cff
MT
70979+#endif
70980
70981 /*
70982 * Show a '%p' thing. A kernel extension is that the '%p' is followed
5e856224 70983@@ -803,6 +810,8 @@ int kptr_restrict __read_mostly;
bc901d79
MT
70984 * - 'S' For symbolic direct pointers with offset
70985 * - 's' For symbolic direct pointers without offset
66a7e928 70986 * - 'B' For backtraced symbolic direct pointers with offset
bc901d79
MT
70987+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
70988+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
70989 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
70990 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
70991 * - 'M' For a 6-byte MAC address, it prints the address in the
5e856224 70992@@ -848,12 +857,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
bc901d79 70993 {
66a7e928 70994 if (!ptr && *fmt != 'K') {
bc901d79
MT
70995 /*
70996- * Print (null) with the same width as a pointer so it makes
70997+ * Print (nil) with the same width as a pointer so it makes
70998 * tabular output look nice.
70999 */
71000 if (spec.field_width == -1)
71001 spec.field_width = 2 * sizeof(void *);
6892158b
MT
71002- return string(buf, end, "(null)", spec);
71003+ return string(buf, end, "(nil)", spec);
bc901d79 71004 }
6892158b
MT
71005
71006 switch (*fmt) {
5e856224 71007@@ -863,6 +872,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
bc901d79
MT
71008 /* Fallthrough */
71009 case 'S':
71010 case 's':
71011+#ifdef CONFIG_GRKERNSEC_HIDESYM
71012+ break;
71013+#else
71014+ return symbol_string(buf, end, ptr, spec, *fmt);
71015+#endif
71016+ case 'A':
71017+ case 'a':
66a7e928 71018 case 'B':
bc901d79
MT
71019 return symbol_string(buf, end, ptr, spec, *fmt);
71020 case 'R':
5e856224 71021@@ -1633,11 +1649,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
bc901d79
MT
71022 typeof(type) value; \
71023 if (sizeof(type) == 8) { \
71024 args = PTR_ALIGN(args, sizeof(u32)); \
71025- *(u32 *)&value = *(u32 *)args; \
71026- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
71027+ *(u32 *)&value = *(const u32 *)args; \
71028+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
71029 } else { \
71030 args = PTR_ALIGN(args, sizeof(type)); \
71031- value = *(typeof(type) *)args; \
71032+ value = *(const typeof(type) *)args; \
71033 } \
71034 args += sizeof(type); \
71035 value; \
5e856224 71036@@ -1700,7 +1716,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
bc901d79
MT
71037 case FORMAT_TYPE_STR: {
71038 const char *str_arg = args;
71039 args += strlen(str_arg) + 1;
71040- str = string(str, end, (char *)str_arg, spec);
71041+ str = string(str, end, str_arg, spec);
71042 break;
71043 }
71044
fe2de317
MT
71045diff --git a/localversion-grsec b/localversion-grsec
71046new file mode 100644
71047index 0000000..7cd6065
71048--- /dev/null
71049+++ b/localversion-grsec
58c5fc13
MT
71050@@ -0,0 +1 @@
71051+-grsec
fe2de317 71052diff --git a/mm/Kconfig b/mm/Kconfig
5e856224 71053index e338407..49b5b7a 100644
fe2de317
MT
71054--- a/mm/Kconfig
71055+++ b/mm/Kconfig
5e856224 71056@@ -247,10 +247,10 @@ config KSM
fe2de317 71057 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
15a11c5b 71058
fe2de317
MT
71059 config DEFAULT_MMAP_MIN_ADDR
71060- int "Low address space to protect from user allocation"
71061+ int "Low address space to protect from user allocation"
71062 depends on MMU
71063- default 4096
71064- help
71065+ default 65536
71066+ help
71067 This is the portion of low virtual memory which should be protected
71068 from userspace allocation. Keeping a user from writing to low pages
71069 can help reduce the impact of kernel NULL pointer bugs.
71070diff --git a/mm/filemap.c b/mm/filemap.c
5e856224 71071index b662757..3081ddd 100644
fe2de317
MT
71072--- a/mm/filemap.c
71073+++ b/mm/filemap.c
4c928ab7 71074@@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
58c5fc13
MT
71075 struct address_space *mapping = file->f_mapping;
71076
71077 if (!mapping->a_ops->readpage)
71078- return -ENOEXEC;
71079+ return -ENODEV;
71080 file_accessed(file);
71081 vma->vm_ops = &generic_file_vm_ops;
71082 vma->vm_flags |= VM_CAN_NONLINEAR;
4c928ab7 71083@@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
58c5fc13
MT
71084 *pos = i_size_read(inode);
71085
71086 if (limit != RLIM_INFINITY) {
71087+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
71088 if (*pos >= limit) {
71089 send_sig(SIGXFSZ, current, 0);
71090 return -EFBIG;
fe2de317 71091diff --git a/mm/fremap.c b/mm/fremap.c
4c928ab7 71092index 9ed4fd4..c42648d 100644
fe2de317
MT
71093--- a/mm/fremap.c
71094+++ b/mm/fremap.c
4c928ab7 71095@@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
58c5fc13
MT
71096 retry:
71097 vma = find_vma(mm, start);
71098
71099+#ifdef CONFIG_PAX_SEGMEXEC
71100+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
71101+ goto out;
71102+#endif
71103+
71104 /*
71105 * Make sure the vma is shared, that it supports prefaulting,
71106 * and that the remapped range is valid and fully within
fe2de317 71107diff --git a/mm/highmem.c b/mm/highmem.c
4c928ab7 71108index 57d82c6..e9e0552 100644
fe2de317
MT
71109--- a/mm/highmem.c
71110+++ b/mm/highmem.c
bc901d79 71111@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
58c5fc13
MT
71112 * So no dangers, even with speculative execution.
71113 */
71114 page = pte_page(pkmap_page_table[i]);
ae4e228f 71115+ pax_open_kernel();
58c5fc13
MT
71116 pte_clear(&init_mm, (unsigned long)page_address(page),
71117 &pkmap_page_table[i]);
ae4e228f
MT
71118-
71119+ pax_close_kernel();
58c5fc13
MT
71120 set_page_address(page, NULL);
71121 need_flush = 1;
71122 }
bc901d79 71123@@ -186,9 +187,11 @@ start:
58c5fc13
MT
71124 }
71125 }
71126 vaddr = PKMAP_ADDR(last_pkmap_nr);
ae4e228f
MT
71127+
71128+ pax_open_kernel();
58c5fc13
MT
71129 set_pte_at(&init_mm, vaddr,
71130 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
ae4e228f
MT
71131-
71132+ pax_close_kernel();
58c5fc13
MT
71133 pkmap_count[last_pkmap_nr] = 1;
71134 set_page_address(page, (void *)vaddr);
58c5fc13 71135
fe2de317 71136diff --git a/mm/huge_memory.c b/mm/huge_memory.c
5e856224 71137index 8f7fc39..69bf1e9 100644
fe2de317
MT
71138--- a/mm/huge_memory.c
71139+++ b/mm/huge_memory.c
5e856224 71140@@ -733,7 +733,7 @@ out:
66a7e928
MT
71141 * run pte_offset_map on the pmd, if an huge pmd could
71142 * materialize from under us from a different thread.
71143 */
71144- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
71145+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
71146 return VM_FAULT_OOM;
71147 /* if an huge pmd materialized from under us just retry later */
71148 if (unlikely(pmd_trans_huge(*pmd)))
fe2de317 71149diff --git a/mm/hugetlb.c b/mm/hugetlb.c
5e856224 71150index 24b1787..e0fbc01 100644
fe2de317
MT
71151--- a/mm/hugetlb.c
71152+++ b/mm/hugetlb.c
5e856224 71153@@ -2425,6 +2425,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
71154 return 1;
71155 }
71156
71157+#ifdef CONFIG_PAX_SEGMEXEC
71158+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
71159+{
71160+ struct mm_struct *mm = vma->vm_mm;
71161+ struct vm_area_struct *vma_m;
71162+ unsigned long address_m;
71163+ pte_t *ptep_m;
71164+
71165+ vma_m = pax_find_mirror_vma(vma);
71166+ if (!vma_m)
71167+ return;
71168+
71169+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71170+ address_m = address + SEGMEXEC_TASK_SIZE;
71171+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
71172+ get_page(page_m);
6892158b 71173+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
58c5fc13
MT
71174+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
71175+}
71176+#endif
71177+
6892158b
MT
71178 /*
71179 * Hugetlb_cow() should be called with page lock of the original hugepage held.
5e856224
MT
71180 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
71181@@ -2538,6 +2559,11 @@ retry_avoidcopy:
58c5fc13 71182 make_huge_pte(vma, new_page, 1));
6892158b
MT
71183 page_remove_rmap(old_page);
71184 hugepage_add_new_anon_rmap(new_page, vma, address);
58c5fc13
MT
71185+
71186+#ifdef CONFIG_PAX_SEGMEXEC
71187+ pax_mirror_huge_pte(vma, address, new_page);
71188+#endif
71189+
71190 /* Make the old page be freed below */
71191 new_page = old_page;
6892158b 71192 mmu_notifier_invalidate_range_end(mm,
5e856224 71193@@ -2692,6 +2718,10 @@ retry:
58c5fc13
MT
71194 && (vma->vm_flags & VM_SHARED)));
71195 set_huge_pte_at(mm, address, ptep, new_pte);
71196
71197+#ifdef CONFIG_PAX_SEGMEXEC
71198+ pax_mirror_huge_pte(vma, address, page);
71199+#endif
71200+
71201 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
71202 /* Optimization, do the COW without a second fault */
71203 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
5e856224 71204@@ -2721,6 +2751,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
71205 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
71206 struct hstate *h = hstate_vma(vma);
71207
71208+#ifdef CONFIG_PAX_SEGMEXEC
71209+ struct vm_area_struct *vma_m;
6892158b 71210+#endif
58c5fc13 71211+
5e856224
MT
71212 address &= huge_page_mask(h);
71213
6892158b 71214 ptep = huge_pte_offset(mm, address);
5e856224 71215@@ -2734,6 +2768,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
bc901d79 71216 VM_FAULT_SET_HINDEX(h - hstates);
6892158b
MT
71217 }
71218
71219+#ifdef CONFIG_PAX_SEGMEXEC
58c5fc13
MT
71220+ vma_m = pax_find_mirror_vma(vma);
71221+ if (vma_m) {
71222+ unsigned long address_m;
71223+
71224+ if (vma->vm_start > vma_m->vm_start) {
71225+ address_m = address;
71226+ address -= SEGMEXEC_TASK_SIZE;
71227+ vma = vma_m;
71228+ h = hstate_vma(vma);
71229+ } else
71230+ address_m = address + SEGMEXEC_TASK_SIZE;
71231+
71232+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
71233+ return VM_FAULT_OOM;
71234+ address_m &= HPAGE_MASK;
71235+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
71236+ }
71237+#endif
71238+
71239 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
71240 if (!ptep)
71241 return VM_FAULT_OOM;
fe2de317
MT
71242diff --git a/mm/internal.h b/mm/internal.h
71243index 2189af4..f2ca332 100644
71244--- a/mm/internal.h
71245+++ b/mm/internal.h
71246@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
15a11c5b
MT
71247 * in mm/page_alloc.c
71248 */
71249 extern void __free_pages_bootmem(struct page *page, unsigned int order);
71250+extern void free_compound_page(struct page *page);
71251 extern void prep_compound_page(struct page *page, unsigned long order);
71252 #ifdef CONFIG_MEMORY_FAILURE
71253 extern bool is_free_buddy_page(struct page *page);
fe2de317 71254diff --git a/mm/kmemleak.c b/mm/kmemleak.c
5e856224 71255index 45eb621..6ccd8ea 100644
fe2de317
MT
71256--- a/mm/kmemleak.c
71257+++ b/mm/kmemleak.c
5e856224 71258@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
bc901d79
MT
71259
71260 for (i = 0; i < object->trace_len; i++) {
71261 void *ptr = (void *)object->trace[i];
71262- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
71263+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
71264 }
71265 }
71266
fe2de317 71267diff --git a/mm/maccess.c b/mm/maccess.c
4c928ab7 71268index d53adf9..03a24bf 100644
fe2de317
MT
71269--- a/mm/maccess.c
71270+++ b/mm/maccess.c
71271@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
6e9df6a3
MT
71272 set_fs(KERNEL_DS);
71273 pagefault_disable();
71274 ret = __copy_from_user_inatomic(dst,
71275- (__force const void __user *)src, size);
71276+ (const void __force_user *)src, size);
71277 pagefault_enable();
71278 set_fs(old_fs);
71279
fe2de317 71280@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
6e9df6a3
MT
71281
71282 set_fs(KERNEL_DS);
71283 pagefault_disable();
71284- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
71285+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
71286 pagefault_enable();
71287 set_fs(old_fs);
71288
fe2de317
MT
71289diff --git a/mm/madvise.c b/mm/madvise.c
71290index 74bf193..feb6fd3 100644
71291--- a/mm/madvise.c
71292+++ b/mm/madvise.c
71293@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
58c5fc13 71294 pgoff_t pgoff;
ae4e228f 71295 unsigned long new_flags = vma->vm_flags;
58c5fc13
MT
71296
71297+#ifdef CONFIG_PAX_SEGMEXEC
71298+ struct vm_area_struct *vma_m;
71299+#endif
71300+
71301 switch (behavior) {
71302 case MADV_NORMAL:
71303 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
16454cff 71304@@ -110,6 +114,13 @@ success:
58c5fc13
MT
71305 /*
71306 * vm_flags is protected by the mmap_sem held in write mode.
71307 */
71308+
71309+#ifdef CONFIG_PAX_SEGMEXEC
71310+ vma_m = pax_find_mirror_vma(vma);
71311+ if (vma_m)
71312+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
71313+#endif
71314+
71315 vma->vm_flags = new_flags;
71316
71317 out:
fe2de317 71318@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
ae4e228f
MT
71319 struct vm_area_struct ** prev,
71320 unsigned long start, unsigned long end)
71321 {
58c5fc13
MT
71322+
71323+#ifdef CONFIG_PAX_SEGMEXEC
ae4e228f
MT
71324+ struct vm_area_struct *vma_m;
71325+#endif
58c5fc13 71326+
ae4e228f
MT
71327 *prev = vma;
71328 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
71329 return -EINVAL;
fe2de317 71330@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
ae4e228f
MT
71331 zap_page_range(vma, start, end - start, &details);
71332 } else
71333 zap_page_range(vma, start, end - start, NULL);
71334+
71335+#ifdef CONFIG_PAX_SEGMEXEC
71336+ vma_m = pax_find_mirror_vma(vma);
71337+ if (vma_m) {
71338+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
71339+ struct zap_details details = {
71340+ .nonlinear_vma = vma_m,
71341+ .last_index = ULONG_MAX,
71342+ };
71343+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
71344+ } else
71345+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
71346+ }
58c5fc13
MT
71347+#endif
71348+
ae4e228f
MT
71349 return 0;
71350 }
58c5fc13 71351
fe2de317 71352@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
58c5fc13
MT
71353 if (end < start)
71354 goto out;
71355
71356+#ifdef CONFIG_PAX_SEGMEXEC
71357+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
71358+ if (end > SEGMEXEC_TASK_SIZE)
71359+ goto out;
71360+ } else
71361+#endif
71362+
71363+ if (end > TASK_SIZE)
71364+ goto out;
71365+
71366 error = 0;
71367 if (end == start)
71368 goto out;
fe2de317 71369diff --git a/mm/memory-failure.c b/mm/memory-failure.c
5e856224 71370index 56080ea..115071e 100644
fe2de317
MT
71371--- a/mm/memory-failure.c
71372+++ b/mm/memory-failure.c
4c928ab7 71373@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
fe2de317
MT
71374
71375 int sysctl_memory_failure_recovery __read_mostly = 1;
71376
71377-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
71378+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
71379
71380 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
71381
4c928ab7 71382@@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
fe2de317
MT
71383 si.si_signo = SIGBUS;
71384 si.si_errno = 0;
71385 si.si_code = BUS_MCEERR_AO;
71386- si.si_addr = (void *)addr;
71387+ si.si_addr = (void __user *)addr;
71388 #ifdef __ARCH_SI_TRAPNO
71389 si.si_trapno = trapno;
71390 #endif
4c928ab7 71391@@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
fe2de317
MT
71392 }
71393
71394 nr_pages = 1 << compound_trans_order(hpage);
71395- atomic_long_add(nr_pages, &mce_bad_pages);
71396+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
71397
71398 /*
71399 * We need/can do nothing about count=0 pages.
4c928ab7 71400@@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
fe2de317
MT
71401 if (!PageHWPoison(hpage)
71402 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
71403 || (p != hpage && TestSetPageHWPoison(hpage))) {
71404- atomic_long_sub(nr_pages, &mce_bad_pages);
71405+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71406 return 0;
71407 }
71408 set_page_hwpoison_huge_page(hpage);
4c928ab7 71409@@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
fe2de317
MT
71410 }
71411 if (hwpoison_filter(p)) {
71412 if (TestClearPageHWPoison(p))
71413- atomic_long_sub(nr_pages, &mce_bad_pages);
71414+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71415 unlock_page(hpage);
71416 put_page(hpage);
71417 return 0;
4c928ab7 71418@@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
fe2de317
MT
71419 return 0;
71420 }
71421 if (TestClearPageHWPoison(p))
71422- atomic_long_sub(nr_pages, &mce_bad_pages);
71423+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71424 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
71425 return 0;
71426 }
4c928ab7 71427@@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
fe2de317
MT
71428 */
71429 if (TestClearPageHWPoison(page)) {
71430 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
71431- atomic_long_sub(nr_pages, &mce_bad_pages);
71432+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71433 freeit = 1;
71434 if (PageHuge(page))
71435 clear_page_hwpoison_huge_page(page);
4c928ab7 71436@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
fe2de317
MT
71437 }
71438 done:
71439 if (!PageHWPoison(hpage))
71440- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
71441+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
71442 set_page_hwpoison_huge_page(hpage);
71443 dequeue_hwpoisoned_huge_page(hpage);
71444 /* keep elevated page count for bad page */
4c928ab7 71445@@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
fe2de317
MT
71446 return ret;
71447
71448 done:
71449- atomic_long_add(1, &mce_bad_pages);
71450+ atomic_long_add_unchecked(1, &mce_bad_pages);
71451 SetPageHWPoison(page);
71452 /* keep elevated page count for bad page */
71453 return ret;
71454diff --git a/mm/memory.c b/mm/memory.c
5e856224 71455index 10b4dda..06857f3 100644
fe2de317
MT
71456--- a/mm/memory.c
71457+++ b/mm/memory.c
71458@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
df50ba0c
MT
71459 return;
71460
71461 pmd = pmd_offset(pud, start);
71462+
71463+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
71464 pud_clear(pud);
71465 pmd_free_tlb(tlb, pmd, start);
71466+#endif
71467+
71468 }
71469
71470 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
fe2de317 71471@@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
6892158b 71472 if (end - 1 > ceiling - 1)
df50ba0c
MT
71473 return;
71474
df50ba0c 71475+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
6892158b 71476 pud = pud_offset(pgd, start);
df50ba0c
MT
71477 pgd_clear(pgd);
71478 pud_free_tlb(tlb, pud, start);
71479+#endif
71480+
71481 }
71482
71483 /*
5e856224 71484@@ -1593,12 +1600,6 @@ no_page_table:
71d190be
MT
71485 return page;
71486 }
71487
71488-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
71489-{
66a7e928
MT
71490- return stack_guard_page_start(vma, addr) ||
71491- stack_guard_page_end(vma, addr+PAGE_SIZE);
71d190be
MT
71492-}
71493-
66a7e928
MT
71494 /**
71495 * __get_user_pages() - pin user pages in memory
71496 * @tsk: task_struct of target task
5e856224 71497@@ -1671,10 +1672,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
ae4e228f 71498 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
58c5fc13
MT
71499 i = 0;
71500
71501- do {
71502+ while (nr_pages) {
71503 struct vm_area_struct *vma;
58c5fc13
MT
71504
71505- vma = find_extend_vma(mm, start);
71506+ vma = find_vma(mm, start);
66a7e928 71507 if (!vma && in_gate_area(mm, start)) {
58c5fc13 71508 unsigned long pg = start & PAGE_MASK;
71d190be 71509 pgd_t *pgd;
5e856224 71510@@ -1722,7 +1723,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
71d190be 71511 goto next_page;
58c5fc13
MT
71512 }
71513
71514- if (!vma ||
71515+ if (!vma || start < vma->vm_start ||
71516 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
ae4e228f 71517 !(vm_flags & vma->vm_flags))
58c5fc13 71518 return i ? : -EFAULT;
5e856224 71519@@ -1749,11 +1750,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
66a7e928
MT
71520 int ret;
71521 unsigned int fault_flags = 0;
71522
71523- /* For mlock, just skip the stack guard page. */
71524- if (foll_flags & FOLL_MLOCK) {
71525- if (stack_guard_page(vma, start))
71526- goto next_page;
71527- }
71528 if (foll_flags & FOLL_WRITE)
71529 fault_flags |= FAULT_FLAG_WRITE;
71530 if (nonblocking)
5e856224 71531@@ -1827,7 +1823,7 @@ next_page:
58c5fc13
MT
71532 start += PAGE_SIZE;
71533 nr_pages--;
71534 } while (nr_pages && start < vma->vm_end);
71535- } while (nr_pages);
71536+ }
71537 return i;
71538 }
66a7e928 71539 EXPORT_SYMBOL(__get_user_pages);
5e856224 71540@@ -2034,6 +2030,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
6892158b
MT
71541 page_add_file_rmap(page);
71542 set_pte_at(mm, addr, pte, mk_pte(page, prot));
71543
71544+#ifdef CONFIG_PAX_SEGMEXEC
71545+ pax_mirror_file_pte(vma, addr, page, ptl);
71546+#endif
71547+
71548 retval = 0;
71549 pte_unmap_unlock(pte, ptl);
71550 return retval;
5e856224 71551@@ -2068,10 +2068,22 @@ out:
6892158b
MT
71552 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
71553 struct page *page)
71554 {
71555+
71556+#ifdef CONFIG_PAX_SEGMEXEC
71557+ struct vm_area_struct *vma_m;
71558+#endif
71559+
71560 if (addr < vma->vm_start || addr >= vma->vm_end)
71561 return -EFAULT;
71562 if (!page_count(page))
71563 return -EINVAL;
71564+
71565+#ifdef CONFIG_PAX_SEGMEXEC
71566+ vma_m = pax_find_mirror_vma(vma);
71567+ if (vma_m)
71568+ vma_m->vm_flags |= VM_INSERTPAGE;
71569+#endif
71570+
71571 vma->vm_flags |= VM_INSERTPAGE;
71572 return insert_page(vma, addr, page, vma->vm_page_prot);
71573 }
5e856224 71574@@ -2157,6 +2169,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
6892158b
MT
71575 unsigned long pfn)
71576 {
71577 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
71578+ BUG_ON(vma->vm_mirror);
71579
71580 if (addr < vma->vm_start || addr >= vma->vm_end)
71581 return -EFAULT;
5e856224
MT
71582@@ -2364,7 +2377,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
71583
71584 BUG_ON(pud_huge(*pud));
71585
71586- pmd = pmd_alloc(mm, pud, addr);
71587+ pmd = (mm == &init_mm) ?
71588+ pmd_alloc_kernel(mm, pud, addr) :
71589+ pmd_alloc(mm, pud, addr);
71590 if (!pmd)
71591 return -ENOMEM;
71592 do {
71593@@ -2384,7 +2399,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
71594 unsigned long next;
71595 int err;
71596
71597- pud = pud_alloc(mm, pgd, addr);
71598+ pud = (mm == &init_mm) ?
71599+ pud_alloc_kernel(mm, pgd, addr) :
71600+ pud_alloc(mm, pgd, addr);
71601 if (!pud)
71602 return -ENOMEM;
71603 do {
71604@@ -2472,6 +2489,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
58c5fc13
MT
71605 copy_user_highpage(dst, src, va, vma);
71606 }
71607
71608+#ifdef CONFIG_PAX_SEGMEXEC
71609+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
71610+{
71611+ struct mm_struct *mm = vma->vm_mm;
71612+ spinlock_t *ptl;
71613+ pte_t *pte, entry;
71614+
71615+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
71616+ entry = *pte;
71617+ if (!pte_present(entry)) {
71618+ if (!pte_none(entry)) {
71619+ BUG_ON(pte_file(entry));
71620+ free_swap_and_cache(pte_to_swp_entry(entry));
71621+ pte_clear_not_present_full(mm, address, pte, 0);
71622+ }
71623+ } else {
71624+ struct page *page;
71625+
71626+ flush_cache_page(vma, address, pte_pfn(entry));
71627+ entry = ptep_clear_flush(vma, address, pte);
71628+ BUG_ON(pte_dirty(entry));
71629+ page = vm_normal_page(vma, address, entry);
71630+ if (page) {
71631+ update_hiwater_rss(mm);
71632+ if (PageAnon(page))
df50ba0c 71633+ dec_mm_counter_fast(mm, MM_ANONPAGES);
58c5fc13 71634+ else
df50ba0c 71635+ dec_mm_counter_fast(mm, MM_FILEPAGES);
58c5fc13
MT
71636+ page_remove_rmap(page);
71637+ page_cache_release(page);
71638+ }
71639+ }
71640+ pte_unmap_unlock(pte, ptl);
71641+}
71642+
71643+/* PaX: if vma is mirrored, synchronize the mirror's PTE
71644+ *
71645+ * the ptl of the lower mapped page is held on entry and is not released on exit
71646+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
71647+ */
71648+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
71649+{
71650+ struct mm_struct *mm = vma->vm_mm;
71651+ unsigned long address_m;
71652+ spinlock_t *ptl_m;
71653+ struct vm_area_struct *vma_m;
71654+ pmd_t *pmd_m;
71655+ pte_t *pte_m, entry_m;
71656+
71657+ BUG_ON(!page_m || !PageAnon(page_m));
71658+
71659+ vma_m = pax_find_mirror_vma(vma);
71660+ if (!vma_m)
71661+ return;
71662+
71663+ BUG_ON(!PageLocked(page_m));
71664+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71665+ address_m = address + SEGMEXEC_TASK_SIZE;
71666+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
bc901d79 71667+ pte_m = pte_offset_map(pmd_m, address_m);
58c5fc13
MT
71668+ ptl_m = pte_lockptr(mm, pmd_m);
71669+ if (ptl != ptl_m) {
71670+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
71671+ if (!pte_none(*pte_m))
71672+ goto out;
71673+ }
71674+
71675+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
71676+ page_cache_get(page_m);
71677+ page_add_anon_rmap(page_m, vma_m, address_m);
df50ba0c 71678+ inc_mm_counter_fast(mm, MM_ANONPAGES);
58c5fc13
MT
71679+ set_pte_at(mm, address_m, pte_m, entry_m);
71680+ update_mmu_cache(vma_m, address_m, entry_m);
71681+out:
71682+ if (ptl != ptl_m)
71683+ spin_unlock(ptl_m);
bc901d79 71684+ pte_unmap(pte_m);
58c5fc13
MT
71685+ unlock_page(page_m);
71686+}
71687+
71688+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
71689+{
71690+ struct mm_struct *mm = vma->vm_mm;
71691+ unsigned long address_m;
71692+ spinlock_t *ptl_m;
71693+ struct vm_area_struct *vma_m;
71694+ pmd_t *pmd_m;
71695+ pte_t *pte_m, entry_m;
71696+
71697+ BUG_ON(!page_m || PageAnon(page_m));
71698+
71699+ vma_m = pax_find_mirror_vma(vma);
71700+ if (!vma_m)
71701+ return;
71702+
71703+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71704+ address_m = address + SEGMEXEC_TASK_SIZE;
71705+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
bc901d79 71706+ pte_m = pte_offset_map(pmd_m, address_m);
58c5fc13
MT
71707+ ptl_m = pte_lockptr(mm, pmd_m);
71708+ if (ptl != ptl_m) {
71709+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
71710+ if (!pte_none(*pte_m))
71711+ goto out;
71712+ }
71713+
71714+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
71715+ page_cache_get(page_m);
71716+ page_add_file_rmap(page_m);
df50ba0c 71717+ inc_mm_counter_fast(mm, MM_FILEPAGES);
58c5fc13
MT
71718+ set_pte_at(mm, address_m, pte_m, entry_m);
71719+ update_mmu_cache(vma_m, address_m, entry_m);
71720+out:
71721+ if (ptl != ptl_m)
71722+ spin_unlock(ptl_m);
bc901d79 71723+ pte_unmap(pte_m);
58c5fc13
MT
71724+}
71725+
71726+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
71727+{
71728+ struct mm_struct *mm = vma->vm_mm;
71729+ unsigned long address_m;
71730+ spinlock_t *ptl_m;
71731+ struct vm_area_struct *vma_m;
71732+ pmd_t *pmd_m;
71733+ pte_t *pte_m, entry_m;
71734+
71735+ vma_m = pax_find_mirror_vma(vma);
71736+ if (!vma_m)
71737+ return;
71738+
71739+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71740+ address_m = address + SEGMEXEC_TASK_SIZE;
71741+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
bc901d79 71742+ pte_m = pte_offset_map(pmd_m, address_m);
58c5fc13
MT
71743+ ptl_m = pte_lockptr(mm, pmd_m);
71744+ if (ptl != ptl_m) {
71745+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
71746+ if (!pte_none(*pte_m))
71747+ goto out;
71748+ }
71749+
71750+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
71751+ set_pte_at(mm, address_m, pte_m, entry_m);
71752+out:
71753+ if (ptl != ptl_m)
71754+ spin_unlock(ptl_m);
bc901d79 71755+ pte_unmap(pte_m);
58c5fc13
MT
71756+}
71757+
71758+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
71759+{
71760+ struct page *page_m;
71761+ pte_t entry;
71762+
71763+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
71764+ goto out;
71765+
71766+ entry = *pte;
71767+ page_m = vm_normal_page(vma, address, entry);
71768+ if (!page_m)
71769+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
71770+ else if (PageAnon(page_m)) {
71771+ if (pax_find_mirror_vma(vma)) {
71772+ pte_unmap_unlock(pte, ptl);
71773+ lock_page(page_m);
71774+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
71775+ if (pte_same(entry, *pte))
71776+ pax_mirror_anon_pte(vma, address, page_m, ptl);
71777+ else
71778+ unlock_page(page_m);
71779+ }
71780+ } else
71781+ pax_mirror_file_pte(vma, address, page_m, ptl);
71782+
71783+out:
71784+ pte_unmap_unlock(pte, ptl);
71785+}
71786+#endif
71787+
71788 /*
71789 * This routine handles present pages, when users try to write
71790 * to a shared page. It is done by copying the page to a new address
5e856224 71791@@ -2683,6 +2880,12 @@ gotten:
58c5fc13
MT
71792 */
71793 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
71794 if (likely(pte_same(*page_table, orig_pte))) {
71795+
71796+#ifdef CONFIG_PAX_SEGMEXEC
71797+ if (pax_find_mirror_vma(vma))
71798+ BUG_ON(!trylock_page(new_page));
71799+#endif
71800+
71801 if (old_page) {
71802 if (!PageAnon(old_page)) {
df50ba0c 71803 dec_mm_counter_fast(mm, MM_FILEPAGES);
5e856224 71804@@ -2734,6 +2937,10 @@ gotten:
58c5fc13
MT
71805 page_remove_rmap(old_page);
71806 }
71807
71808+#ifdef CONFIG_PAX_SEGMEXEC
71809+ pax_mirror_anon_pte(vma, address, new_page, ptl);
71810+#endif
71811+
71812 /* Free the old page.. */
71813 new_page = old_page;
71814 ret |= VM_FAULT_WRITE;
5e856224 71815@@ -3013,6 +3220,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
71816 swap_free(entry);
71817 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
71818 try_to_free_swap(page);
71819+
71820+#ifdef CONFIG_PAX_SEGMEXEC
71821+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
71822+#endif
71823+
71824 unlock_page(page);
bc901d79
MT
71825 if (swapcache) {
71826 /*
5e856224 71827@@ -3036,6 +3248,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
71828
71829 /* No need to invalidate - it was non-present before */
df50ba0c 71830 update_mmu_cache(vma, address, page_table);
58c5fc13
MT
71831+
71832+#ifdef CONFIG_PAX_SEGMEXEC
71833+ pax_mirror_anon_pte(vma, address, page, ptl);
71834+#endif
71835+
71836 unlock:
71837 pte_unmap_unlock(page_table, ptl);
71838 out:
5e856224 71839@@ -3055,40 +3272,6 @@ out_release:
57199397
MT
71840 }
71841
71842 /*
6892158b
MT
71843- * This is like a special single-page "expand_{down|up}wards()",
71844- * except we must first make sure that 'address{-|+}PAGE_SIZE'
57199397 71845- * doesn't hit another vma.
57199397
MT
71846- */
71847-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
71848-{
71849- address &= PAGE_MASK;
71850- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
71851- struct vm_area_struct *prev = vma->vm_prev;
71852-
71853- /*
71854- * Is there a mapping abutting this one below?
71855- *
71856- * That's only ok if it's the same stack mapping
71857- * that has gotten split..
71858- */
71859- if (prev && prev->vm_end == address)
71860- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
71861-
15a11c5b 71862- expand_downwards(vma, address - PAGE_SIZE);
57199397 71863- }
6892158b
MT
71864- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
71865- struct vm_area_struct *next = vma->vm_next;
71866-
71867- /* As VM_GROWSDOWN but s/below/above/ */
71868- if (next && next->vm_start == address + PAGE_SIZE)
71869- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
71870-
71871- expand_upwards(vma, address + PAGE_SIZE);
71872- }
57199397
MT
71873- return 0;
71874-}
71875-
71876-/*
71877 * We enter with non-exclusive mmap_sem (to exclude vma changes,
71878 * but allow concurrent faults), and pte mapped but not yet locked.
71879 * We return with mmap_sem still held, but pte unmapped and unlocked.
5e856224 71880@@ -3097,27 +3280,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
ae4e228f
MT
71881 unsigned long address, pte_t *page_table, pmd_t *pmd,
71882 unsigned int flags)
71883 {
71884- struct page *page;
71885+ struct page *page = NULL;
71886 spinlock_t *ptl;
71887 pte_t entry;
71888
57199397
MT
71889- pte_unmap(page_table);
71890-
71891- /* Check if we need to add a guard page to the stack */
71892- if (check_stack_guard_page(vma, address) < 0)
71893- return VM_FAULT_SIGBUS;
71894-
71895- /* Use the zero-page for reads */
71896 if (!(flags & FAULT_FLAG_WRITE)) {
71897 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
71898 vma->vm_page_prot));
71899- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
71900+ ptl = pte_lockptr(mm, pmd);
71901+ spin_lock(ptl);
71902 if (!pte_none(*page_table))
71903 goto unlock;
71904 goto setpte;
71905 }
71906
71907 /* Allocate our own private page. */
71908+ pte_unmap(page_table);
71909+
71910 if (unlikely(anon_vma_prepare(vma)))
71911 goto oom;
71912 page = alloc_zeroed_user_highpage_movable(vma, address);
5e856224 71913@@ -3136,6 +3315,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
71914 if (!pte_none(*page_table))
71915 goto release;
ae4e228f 71916
58c5fc13
MT
71917+#ifdef CONFIG_PAX_SEGMEXEC
71918+ if (pax_find_mirror_vma(vma))
71919+ BUG_ON(!trylock_page(page));
71920+#endif
71921+
df50ba0c 71922 inc_mm_counter_fast(mm, MM_ANONPAGES);
58c5fc13 71923 page_add_new_anon_rmap(page, vma, address);
ae4e228f 71924 setpte:
5e856224 71925@@ -3143,6 +3327,12 @@ setpte:
58c5fc13
MT
71926
71927 /* No need to invalidate - it was non-present before */
df50ba0c 71928 update_mmu_cache(vma, address, page_table);
58c5fc13
MT
71929+
71930+#ifdef CONFIG_PAX_SEGMEXEC
ae4e228f
MT
71931+ if (page)
71932+ pax_mirror_anon_pte(vma, address, page, ptl);
58c5fc13
MT
71933+#endif
71934+
71935 unlock:
71936 pte_unmap_unlock(page_table, ptl);
71937 return 0;
5e856224 71938@@ -3286,6 +3476,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
71939 */
71940 /* Only go through if we didn't race with anybody else... */
71941 if (likely(pte_same(*page_table, orig_pte))) {
71942+
71943+#ifdef CONFIG_PAX_SEGMEXEC
71944+ if (anon && pax_find_mirror_vma(vma))
71945+ BUG_ON(!trylock_page(page));
71946+#endif
71947+
71948 flush_icache_page(vma, page);
71949 entry = mk_pte(page, vma->vm_page_prot);
71950 if (flags & FAULT_FLAG_WRITE)
5e856224 71951@@ -3305,6 +3501,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
71952
71953 /* no need to invalidate: a not-present page won't be cached */
df50ba0c 71954 update_mmu_cache(vma, address, page_table);
58c5fc13
MT
71955+
71956+#ifdef CONFIG_PAX_SEGMEXEC
71957+ if (anon)
71958+ pax_mirror_anon_pte(vma, address, page, ptl);
71959+ else
71960+ pax_mirror_file_pte(vma, address, page, ptl);
71961+#endif
71962+
71963 } else {
6e9df6a3
MT
71964 if (cow_page)
71965 mem_cgroup_uncharge_page(cow_page);
5e856224 71966@@ -3458,6 +3662,12 @@ int handle_pte_fault(struct mm_struct *mm,
58c5fc13 71967 if (flags & FAULT_FLAG_WRITE)
bc901d79 71968 flush_tlb_fix_spurious_fault(vma, address);
58c5fc13
MT
71969 }
71970+
71971+#ifdef CONFIG_PAX_SEGMEXEC
71972+ pax_mirror_pte(vma, address, pte, pmd, ptl);
71973+ return 0;
71974+#endif
71975+
71976 unlock:
71977 pte_unmap_unlock(pte, ptl);
71978 return 0;
5e856224 71979@@ -3474,6 +3684,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
71980 pmd_t *pmd;
71981 pte_t *pte;
71982
71983+#ifdef CONFIG_PAX_SEGMEXEC
71984+ struct vm_area_struct *vma_m;
71985+#endif
71986+
71987 __set_current_state(TASK_RUNNING);
71988
71989 count_vm_event(PGFAULT);
5e856224 71990@@ -3485,6 +3699,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
71991 if (unlikely(is_vm_hugetlb_page(vma)))
71992 return hugetlb_fault(mm, vma, address, flags);
71993
71994+#ifdef CONFIG_PAX_SEGMEXEC
71995+ vma_m = pax_find_mirror_vma(vma);
71996+ if (vma_m) {
71997+ unsigned long address_m;
71998+ pgd_t *pgd_m;
71999+ pud_t *pud_m;
72000+ pmd_t *pmd_m;
72001+
72002+ if (vma->vm_start > vma_m->vm_start) {
72003+ address_m = address;
72004+ address -= SEGMEXEC_TASK_SIZE;
72005+ vma = vma_m;
72006+ } else
72007+ address_m = address + SEGMEXEC_TASK_SIZE;
72008+
72009+ pgd_m = pgd_offset(mm, address_m);
72010+ pud_m = pud_alloc(mm, pgd_m, address_m);
72011+ if (!pud_m)
72012+ return VM_FAULT_OOM;
72013+ pmd_m = pmd_alloc(mm, pud_m, address_m);
72014+ if (!pmd_m)
72015+ return VM_FAULT_OOM;
16454cff 72016+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
58c5fc13
MT
72017+ return VM_FAULT_OOM;
72018+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
72019+ }
72020+#endif
72021+
72022 pgd = pgd_offset(mm, address);
72023 pud = pud_alloc(mm, pgd, address);
72024 if (!pud)
5e856224 72025@@ -3514,7 +3756,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
66a7e928
MT
72026 * run pte_offset_map on the pmd, if an huge pmd could
72027 * materialize from under us from a different thread.
72028 */
72029- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
72030+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
72031 return VM_FAULT_OOM;
72032 /* if an huge pmd materialized from under us just retry later */
72033 if (unlikely(pmd_trans_huge(*pmd)))
5e856224
MT
72034@@ -3551,6 +3793,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
72035 spin_unlock(&mm->page_table_lock);
72036 return 0;
72037 }
72038+
72039+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
72040+{
72041+ pud_t *new = pud_alloc_one(mm, address);
72042+ if (!new)
72043+ return -ENOMEM;
72044+
72045+ smp_wmb(); /* See comment in __pte_alloc */
72046+
72047+ spin_lock(&mm->page_table_lock);
72048+ if (pgd_present(*pgd)) /* Another has populated it */
72049+ pud_free(mm, new);
72050+ else
72051+ pgd_populate_kernel(mm, pgd, new);
72052+ spin_unlock(&mm->page_table_lock);
72053+ return 0;
72054+}
72055 #endif /* __PAGETABLE_PUD_FOLDED */
72056
72057 #ifndef __PAGETABLE_PMD_FOLDED
72058@@ -3581,6 +3840,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
72059 spin_unlock(&mm->page_table_lock);
72060 return 0;
72061 }
72062+
72063+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
72064+{
72065+ pmd_t *new = pmd_alloc_one(mm, address);
72066+ if (!new)
72067+ return -ENOMEM;
72068+
72069+ smp_wmb(); /* See comment in __pte_alloc */
72070+
72071+ spin_lock(&mm->page_table_lock);
72072+#ifndef __ARCH_HAS_4LEVEL_HACK
72073+ if (pud_present(*pud)) /* Another has populated it */
72074+ pmd_free(mm, new);
72075+ else
72076+ pud_populate_kernel(mm, pud, new);
72077+#else
72078+ if (pgd_present(*pud)) /* Another has populated it */
72079+ pmd_free(mm, new);
72080+ else
72081+ pgd_populate_kernel(mm, pud, new);
72082+#endif /* __ARCH_HAS_4LEVEL_HACK */
72083+ spin_unlock(&mm->page_table_lock);
72084+ return 0;
72085+}
72086 #endif /* __PAGETABLE_PMD_FOLDED */
72087
72088 int make_pages_present(unsigned long addr, unsigned long end)
72089@@ -3618,7 +3901,7 @@ static int __init gate_vma_init(void)
58c5fc13
MT
72090 gate_vma.vm_start = FIXADDR_USER_START;
72091 gate_vma.vm_end = FIXADDR_USER_END;
72092 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
72093- gate_vma.vm_page_prot = __P101;
72094+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
72095 /*
72096 * Make sure the vDSO gets into every core dump.
72097 * Dumping its contents makes post-mortem fully interpretable later
fe2de317 72098diff --git a/mm/mempolicy.c b/mm/mempolicy.c
5e856224 72099index 0a37570..2048346 100644
fe2de317
MT
72100--- a/mm/mempolicy.c
72101+++ b/mm/mempolicy.c
4c928ab7 72102@@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
df50ba0c
MT
72103 unsigned long vmstart;
72104 unsigned long vmend;
58c5fc13
MT
72105
72106+#ifdef CONFIG_PAX_SEGMEXEC
72107+ struct vm_area_struct *vma_m;
72108+#endif
72109+
5e856224 72110 vma = find_vma(mm, start);
df50ba0c
MT
72111 if (!vma || vma->vm_start > start)
72112 return -EFAULT;
5e856224 72113@@ -679,6 +683,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
df50ba0c 72114 err = policy_vma(vma, new_pol);
58c5fc13 72115 if (err)
df50ba0c 72116 goto out;
58c5fc13
MT
72117+
72118+#ifdef CONFIG_PAX_SEGMEXEC
72119+ vma_m = pax_find_mirror_vma(vma);
72120+ if (vma_m) {
df50ba0c 72121+ err = policy_vma(vma_m, new_pol);
58c5fc13 72122+ if (err)
df50ba0c 72123+ goto out;
58c5fc13
MT
72124+ }
72125+#endif
72126+
72127 }
df50ba0c
MT
72128
72129 out:
5e856224 72130@@ -1112,6 +1126,17 @@ static long do_mbind(unsigned long start, unsigned long len,
58c5fc13
MT
72131
72132 if (end < start)
72133 return -EINVAL;
72134+
72135+#ifdef CONFIG_PAX_SEGMEXEC
72136+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
72137+ if (end > SEGMEXEC_TASK_SIZE)
72138+ return -EINVAL;
72139+ } else
72140+#endif
72141+
72142+ if (end > TASK_SIZE)
72143+ return -EINVAL;
72144+
72145 if (end == start)
72146 return 0;
72147
5e856224 72148@@ -1330,6 +1355,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
58c5fc13 72149 if (!mm)
6892158b 72150 goto out;
58c5fc13
MT
72151
72152+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72153+ if (mm != current->mm &&
72154+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
72155+ err = -EPERM;
72156+ goto out;
72157+ }
72158+#endif
72159+
72160 /*
72161 * Check if this process has the right to modify the specified
72162 * process. The right exists if the process has administrative
5e856224 72163@@ -1339,8 +1372,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
58c5fc13
MT
72164 rcu_read_lock();
72165 tcred = __task_cred(task);
72166 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
72167- cred->uid != tcred->suid && cred->uid != tcred->uid &&
72168- !capable(CAP_SYS_NICE)) {
72169+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
72170 rcu_read_unlock();
72171 err = -EPERM;
72172 goto out;
fe2de317 72173diff --git a/mm/migrate.c b/mm/migrate.c
5e856224 72174index 1503b6b..156c672 100644
fe2de317
MT
72175--- a/mm/migrate.c
72176+++ b/mm/migrate.c
5e856224 72177@@ -1370,6 +1370,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
58c5fc13
MT
72178 if (!mm)
72179 return -EINVAL;
72180
72181+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72182+ if (mm != current->mm &&
72183+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
72184+ err = -EPERM;
72185+ goto out;
72186+ }
72187+#endif
72188+
72189 /*
72190 * Check if this process has the right to modify the specified
72191 * process. The right exists if the process has administrative
5e856224 72192@@ -1379,8 +1387,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
58c5fc13
MT
72193 rcu_read_lock();
72194 tcred = __task_cred(task);
72195 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
72196- cred->uid != tcred->suid && cred->uid != tcred->uid &&
72197- !capable(CAP_SYS_NICE)) {
72198+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
72199 rcu_read_unlock();
72200 err = -EPERM;
72201 goto out;
fe2de317 72202diff --git a/mm/mlock.c b/mm/mlock.c
5e856224 72203index ef726e8..13e0901 100644
fe2de317
MT
72204--- a/mm/mlock.c
72205+++ b/mm/mlock.c
58c5fc13
MT
72206@@ -13,6 +13,7 @@
72207 #include <linux/pagemap.h>
72208 #include <linux/mempolicy.h>
72209 #include <linux/syscalls.h>
72210+#include <linux/security.h>
72211 #include <linux/sched.h>
4c928ab7 72212 #include <linux/export.h>
58c5fc13 72213 #include <linux/rmap.h>
4c928ab7 72214@@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
58c5fc13
MT
72215 return -EINVAL;
72216 if (end == start)
72217 return 0;
58c5fc13
MT
72218+ if (end > TASK_SIZE)
72219+ return -EINVAL;
72220+
5e856224 72221 vma = find_vma(current->mm, start);
58c5fc13
MT
72222 if (!vma || vma->vm_start > start)
72223 return -ENOMEM;
5e856224 72224@@ -396,6 +400,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
57199397 72225 for (nstart = start ; ; ) {
15a11c5b 72226 vm_flags_t newflags;
57199397
MT
72227
72228+#ifdef CONFIG_PAX_SEGMEXEC
72229+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
72230+ break;
72231+#endif
72232+
72233 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
72234
72235 newflags = vma->vm_flags | VM_LOCKED;
5e856224 72236@@ -501,6 +510,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
58c5fc13
MT
72237 lock_limit >>= PAGE_SHIFT;
72238
72239 /* check against resource limits */
72240+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
72241 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
72242 error = do_mlock(start, len, 1);
72243 up_write(&current->mm->mmap_sem);
5e856224 72244@@ -524,17 +534,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
58c5fc13
MT
72245 static int do_mlockall(int flags)
72246 {
72247 struct vm_area_struct * vma, * prev = NULL;
72248- unsigned int def_flags = 0;
58c5fc13
MT
72249
72250 if (flags & MCL_FUTURE)
72251- def_flags = VM_LOCKED;
57199397
MT
72252- current->mm->def_flags = def_flags;
72253+ current->mm->def_flags |= VM_LOCKED;
72254+ else
72255+ current->mm->def_flags &= ~VM_LOCKED;
58c5fc13
MT
72256 if (flags == MCL_FUTURE)
72257 goto out;
58c5fc13 72258
57199397 72259 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
15a11c5b
MT
72260 vm_flags_t newflags;
72261
58c5fc13
MT
72262+#ifdef CONFIG_PAX_SEGMEXEC
72263+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
72264+ break;
72265+#endif
15a11c5b 72266+
58c5fc13
MT
72267+ BUG_ON(vma->vm_end > TASK_SIZE);
72268 newflags = vma->vm_flags | VM_LOCKED;
72269 if (!(flags & MCL_CURRENT))
72270 newflags &= ~VM_LOCKED;
5e856224 72271@@ -567,6 +583,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
58c5fc13
MT
72272 lock_limit >>= PAGE_SHIFT;
72273
72274 ret = -ENOMEM;
57199397 72275+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
58c5fc13
MT
72276 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
72277 capable(CAP_IPC_LOCK))
72278 ret = do_mlockall(flags);
fe2de317 72279diff --git a/mm/mmap.c b/mm/mmap.c
5e856224 72280index da15a79..314aef3 100644
fe2de317
MT
72281--- a/mm/mmap.c
72282+++ b/mm/mmap.c
16454cff 72283@@ -46,6 +46,16 @@
58c5fc13
MT
72284 #define arch_rebalance_pgtables(addr, len) (addr)
72285 #endif
72286
72287+static inline void verify_mm_writelocked(struct mm_struct *mm)
72288+{
72289+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
72290+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
72291+ up_read(&mm->mmap_sem);
72292+ BUG();
72293+ }
72294+#endif
72295+}
72296+
72297 static void unmap_region(struct mm_struct *mm,
72298 struct vm_area_struct *vma, struct vm_area_struct *prev,
72299 unsigned long start, unsigned long end);
fe2de317 72300@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
58c5fc13
MT
72301 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
72302 *
72303 */
72304-pgprot_t protection_map[16] = {
72305+pgprot_t protection_map[16] __read_only = {
72306 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
72307 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
72308 };
72309
15a11c5b
MT
72310-pgprot_t vm_get_page_prot(unsigned long vm_flags)
72311+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
58c5fc13
MT
72312 {
72313- return __pgprot(pgprot_val(protection_map[vm_flags &
72314+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
72315 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
72316 pgprot_val(arch_vm_get_page_prot(vm_flags)));
72317+
72318+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
ae4e228f 72319+ if (!(__supported_pte_mask & _PAGE_NX) &&
58c5fc13
MT
72320+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
72321+ (vm_flags & (VM_READ | VM_WRITE)))
72322+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
72323+#endif
72324+
72325+ return prot;
72326 }
72327 EXPORT_SYMBOL(vm_get_page_prot);
72328
15a11c5b
MT
72329 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
72330 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
57199397
MT
72331 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
72332+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
57199397 72333 /*
15a11c5b
MT
72334 * Make sure vm_committed_as in one cacheline and not cacheline shared with
72335 * other variables. It can be updated by several CPUs frequently.
fe2de317 72336@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
58c5fc13
MT
72337 struct vm_area_struct *next = vma->vm_next;
72338
72339 might_sleep();
72340+ BUG_ON(vma->vm_mirror);
72341 if (vma->vm_ops && vma->vm_ops->close)
72342 vma->vm_ops->close(vma);
72343 if (vma->vm_file) {
6e9df6a3 72344@@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
58c5fc13
MT
72345 * not page aligned -Ram Gupta
72346 */
df50ba0c 72347 rlim = rlimit(RLIMIT_DATA);
58c5fc13
MT
72348+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
72349 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
72350 (mm->end_data - mm->start_data) > rlim)
72351 goto out;
6e9df6a3 72352@@ -689,6 +711,12 @@ static int
58c5fc13
MT
72353 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
72354 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
72355 {
72356+
72357+#ifdef CONFIG_PAX_SEGMEXEC
72358+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
72359+ return 0;
72360+#endif
72361+
72362 if (is_mergeable_vma(vma, file, vm_flags) &&
15a11c5b 72363 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
58c5fc13 72364 if (vma->vm_pgoff == vm_pgoff)
6e9df6a3 72365@@ -708,6 +736,12 @@ static int
58c5fc13
MT
72366 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
72367 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
72368 {
72369+
72370+#ifdef CONFIG_PAX_SEGMEXEC
72371+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
72372+ return 0;
72373+#endif
72374+
72375 if (is_mergeable_vma(vma, file, vm_flags) &&
15a11c5b 72376 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
58c5fc13 72377 pgoff_t vm_pglen;
fe2de317 72378@@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
58c5fc13
MT
72379 struct vm_area_struct *vma_merge(struct mm_struct *mm,
72380 struct vm_area_struct *prev, unsigned long addr,
72381 unsigned long end, unsigned long vm_flags,
72382- struct anon_vma *anon_vma, struct file *file,
72383+ struct anon_vma *anon_vma, struct file *file,
72384 pgoff_t pgoff, struct mempolicy *policy)
72385 {
72386 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
72387 struct vm_area_struct *area, *next;
df50ba0c 72388 int err;
58c5fc13
MT
72389
72390+#ifdef CONFIG_PAX_SEGMEXEC
72391+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
72392+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
72393+
72394+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
72395+#endif
72396+
72397 /*
72398 * We later require that vma->vm_flags == vm_flags,
72399 * so this tests vma->vm_flags & VM_SPECIAL, too.
fe2de317 72400@@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
58c5fc13
MT
72401 if (next && next->vm_end == end) /* cases 6, 7, 8 */
72402 next = next->vm_next;
72403
72404+#ifdef CONFIG_PAX_SEGMEXEC
72405+ if (prev)
72406+ prev_m = pax_find_mirror_vma(prev);
72407+ if (area)
72408+ area_m = pax_find_mirror_vma(area);
72409+ if (next)
72410+ next_m = pax_find_mirror_vma(next);
72411+#endif
72412+
72413 /*
72414 * Can it merge with the predecessor?
72415 */
fe2de317 72416@@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
58c5fc13 72417 /* cases 1, 6 */
df50ba0c 72418 err = vma_adjust(prev, prev->vm_start,
58c5fc13
MT
72419 next->vm_end, prev->vm_pgoff, NULL);
72420- } else /* cases 2, 5, 7 */
72421+
72422+#ifdef CONFIG_PAX_SEGMEXEC
df50ba0c
MT
72423+ if (!err && prev_m)
72424+ err = vma_adjust(prev_m, prev_m->vm_start,
58c5fc13
MT
72425+ next_m->vm_end, prev_m->vm_pgoff, NULL);
72426+#endif
72427+
72428+ } else { /* cases 2, 5, 7 */
df50ba0c 72429 err = vma_adjust(prev, prev->vm_start,
58c5fc13
MT
72430 end, prev->vm_pgoff, NULL);
72431+
72432+#ifdef CONFIG_PAX_SEGMEXEC
df50ba0c
MT
72433+ if (!err && prev_m)
72434+ err = vma_adjust(prev_m, prev_m->vm_start,
72435+ end_m, prev_m->vm_pgoff, NULL);
58c5fc13
MT
72436+#endif
72437+
72438+ }
df50ba0c
MT
72439 if (err)
72440 return NULL;
16454cff 72441 khugepaged_enter_vma_merge(prev);
fe2de317 72442@@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
58c5fc13
MT
72443 mpol_equal(policy, vma_policy(next)) &&
72444 can_vma_merge_before(next, vm_flags,
72445 anon_vma, file, pgoff+pglen)) {
72446- if (prev && addr < prev->vm_end) /* case 4 */
72447+ if (prev && addr < prev->vm_end) { /* case 4 */
df50ba0c 72448 err = vma_adjust(prev, prev->vm_start,
58c5fc13
MT
72449 addr, prev->vm_pgoff, NULL);
72450- else /* cases 3, 8 */
72451+
72452+#ifdef CONFIG_PAX_SEGMEXEC
df50ba0c
MT
72453+ if (!err && prev_m)
72454+ err = vma_adjust(prev_m, prev_m->vm_start,
72455+ addr_m, prev_m->vm_pgoff, NULL);
58c5fc13
MT
72456+#endif
72457+
72458+ } else { /* cases 3, 8 */
df50ba0c 72459 err = vma_adjust(area, addr, next->vm_end,
58c5fc13
MT
72460 next->vm_pgoff - pglen, NULL);
72461+
72462+#ifdef CONFIG_PAX_SEGMEXEC
df50ba0c
MT
72463+ if (!err && area_m)
72464+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
72465+ next_m->vm_pgoff - pglen, NULL);
58c5fc13
MT
72466+#endif
72467+
72468+ }
df50ba0c
MT
72469 if (err)
72470 return NULL;
16454cff 72471 khugepaged_enter_vma_merge(area);
6e9df6a3 72472@@ -921,14 +1001,11 @@ none:
58c5fc13
MT
72473 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
72474 struct file *file, long pages)
72475 {
72476- const unsigned long stack_flags
72477- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
72478-
72479 if (file) {
72480 mm->shared_vm += pages;
72481 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
72482 mm->exec_vm += pages;
72483- } else if (flags & stack_flags)
72484+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
72485 mm->stack_vm += pages;
72486 if (flags & (VM_RESERVED|VM_IO))
72487 mm->reserved_vm += pages;
fe2de317 72488@@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
58c5fc13
MT
72489 * (the exception is when the underlying filesystem is noexec
72490 * mounted, in which case we dont add PROT_EXEC.)
72491 */
72492- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72493+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72494 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
72495 prot |= PROT_EXEC;
72496
fe2de317 72497@@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
58c5fc13
MT
72498 /* Obtain the address to map to. we verify (or select) it and ensure
72499 * that it represents a valid section of the address space.
72500 */
72501- addr = get_unmapped_area(file, addr, len, pgoff, flags);
72502+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
72503 if (addr & ~PAGE_MASK)
72504 return addr;
72505
fe2de317 72506@@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
58c5fc13
MT
72507 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
72508 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
72509
58c5fc13 72510+#ifdef CONFIG_PAX_MPROTECT
57199397 72511+ if (mm->pax_flags & MF_PAX_MPROTECT) {
c52201e0 72512+#ifndef CONFIG_PAX_MPROTECT_COMPAT
6892158b
MT
72513+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
72514+ gr_log_rwxmmap(file);
57199397
MT
72515+
72516+#ifdef CONFIG_PAX_EMUPLT
72517+ vm_flags &= ~VM_EXEC;
72518+#else
72519+ return -EPERM;
58c5fc13
MT
72520+#endif
72521+
6892158b
MT
72522+ }
72523+
57199397
MT
72524+ if (!(vm_flags & VM_EXEC))
72525+ vm_flags &= ~VM_MAYEXEC;
c52201e0
MT
72526+#else
72527+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
72528+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
72529+#endif
57199397
MT
72530+ else
72531+ vm_flags &= ~VM_MAYWRITE;
58c5fc13
MT
72532+ }
72533+#endif
72534+
72535+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
72536+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
72537+ vm_flags &= ~VM_PAGEEXEC;
72538+#endif
72539+
ae4e228f 72540 if (flags & MAP_LOCKED)
58c5fc13
MT
72541 if (!can_do_mlock())
72542 return -EPERM;
fe2de317 72543@@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
58c5fc13 72544 locked += mm->locked_vm;
df50ba0c 72545 lock_limit = rlimit(RLIMIT_MEMLOCK);
58c5fc13
MT
72546 lock_limit >>= PAGE_SHIFT;
72547+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
72548 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
72549 return -EAGAIN;
72550 }
fe2de317 72551@@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
58c5fc13
MT
72552 if (error)
72553 return error;
72554
72555+ if (!gr_acl_handle_mmap(file, prot))
72556+ return -EACCES;
72557+
72558 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
72559 }
72560 EXPORT_SYMBOL(do_mmap_pgoff);
fe2de317 72561@@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
15a11c5b 72562 vm_flags_t vm_flags = vma->vm_flags;
58c5fc13
MT
72563
72564 /* If it was private or non-writable, the write bit is already clear */
72565- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
72566+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
72567 return 0;
72568
72569 /* The backer wishes to know when pages are first written to? */
fe2de317 72570@@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
58c5fc13
MT
72571 unsigned long charged = 0;
72572 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
72573
72574+#ifdef CONFIG_PAX_SEGMEXEC
72575+ struct vm_area_struct *vma_m = NULL;
72576+#endif
72577+
72578+ /*
72579+ * mm->mmap_sem is required to protect against another thread
72580+ * changing the mappings in case we sleep.
72581+ */
72582+ verify_mm_writelocked(mm);
72583+
72584 /* Clear old maps */
72585 error = -ENOMEM;
72586-munmap_back:
72587 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72588 if (vma && vma->vm_start < addr + len) {
72589 if (do_munmap(mm, addr, len))
72590 return -ENOMEM;
72591- goto munmap_back;
72592+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72593+ BUG_ON(vma && vma->vm_start < addr + len);
72594 }
72595
72596 /* Check against address space limit. */
6e9df6a3 72597@@ -1258,6 +1379,16 @@ munmap_back:
58c5fc13
MT
72598 goto unacct_error;
72599 }
72600
72601+#ifdef CONFIG_PAX_SEGMEXEC
72602+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
72603+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72604+ if (!vma_m) {
72605+ error = -ENOMEM;
72606+ goto free_vma;
72607+ }
72608+ }
72609+#endif
72610+
72611 vma->vm_mm = mm;
72612 vma->vm_start = addr;
72613 vma->vm_end = addr + len;
5e856224 72614@@ -1282,6 +1413,19 @@ munmap_back:
58c5fc13
MT
72615 error = file->f_op->mmap(file, vma);
72616 if (error)
72617 goto unmap_and_free_vma;
72618+
72619+#ifdef CONFIG_PAX_SEGMEXEC
72620+ if (vma_m && (vm_flags & VM_EXECUTABLE))
72621+ added_exe_file_vma(mm);
72622+#endif
72623+
72624+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
72625+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
72626+ vma->vm_flags |= VM_PAGEEXEC;
72627+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
72628+ }
72629+#endif
72630+
72631 if (vm_flags & VM_EXECUTABLE)
72632 added_exe_file_vma(mm);
ae4e228f 72633
5e856224 72634@@ -1319,6 +1463,11 @@ munmap_back:
58c5fc13
MT
72635 vma_link(mm, vma, prev, rb_link, rb_parent);
72636 file = vma->vm_file;
72637
72638+#ifdef CONFIG_PAX_SEGMEXEC
72639+ if (vma_m)
df50ba0c 72640+ BUG_ON(pax_mirror_vma(vma_m, vma));
58c5fc13
MT
72641+#endif
72642+
72643 /* Once vma denies write, undo our temporary denial count */
72644 if (correct_wcount)
72645 atomic_inc(&inode->i_writecount);
5e856224 72646@@ -1327,6 +1476,7 @@ out:
58c5fc13
MT
72647
72648 mm->total_vm += len >> PAGE_SHIFT;
72649 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
72650+ track_exec_limit(mm, addr, addr + len, vm_flags);
72651 if (vm_flags & VM_LOCKED) {
df50ba0c
MT
72652 if (!mlock_vma_pages_range(vma, addr, addr + len))
72653 mm->locked_vm += (len >> PAGE_SHIFT);
5e856224 72654@@ -1344,6 +1494,12 @@ unmap_and_free_vma:
58c5fc13
MT
72655 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
72656 charged = 0;
72657 free_vma:
72658+
72659+#ifdef CONFIG_PAX_SEGMEXEC
72660+ if (vma_m)
72661+ kmem_cache_free(vm_area_cachep, vma_m);
72662+#endif
72663+
72664 kmem_cache_free(vm_area_cachep, vma);
72665 unacct_error:
72666 if (charged)
5e856224 72667@@ -1351,6 +1507,44 @@ unacct_error:
57199397
MT
72668 return error;
72669 }
72670
16454cff 72671+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
57199397
MT
72672+{
72673+ if (!vma) {
72674+#ifdef CONFIG_STACK_GROWSUP
72675+ if (addr > sysctl_heap_stack_gap)
72676+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
72677+ else
72678+ vma = find_vma(current->mm, 0);
72679+ if (vma && (vma->vm_flags & VM_GROWSUP))
72680+ return false;
72681+#endif
72682+ return true;
72683+ }
72684+
72685+ if (addr + len > vma->vm_start)
72686+ return false;
72687+
72688+ if (vma->vm_flags & VM_GROWSDOWN)
72689+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
72690+#ifdef CONFIG_STACK_GROWSUP
72691+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
72692+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
72693+#endif
72694+
72695+ return true;
72696+}
16454cff
MT
72697+
72698+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
72699+{
72700+ if (vma->vm_start < len)
72701+ return -ENOMEM;
72702+ if (!(vma->vm_flags & VM_GROWSDOWN))
72703+ return vma->vm_start - len;
72704+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
72705+ return vma->vm_start - len - sysctl_heap_stack_gap;
72706+ return -ENOMEM;
72707+}
57199397
MT
72708+
72709 /* Get an address range which is currently unmapped.
72710 * For shmat() with addr=0.
72711 *
5e856224 72712@@ -1377,18 +1571,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
58c5fc13
MT
72713 if (flags & MAP_FIXED)
72714 return addr;
72715
72716+#ifdef CONFIG_PAX_RANDMMAP
72717+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
72718+#endif
72719+
72720 if (addr) {
72721 addr = PAGE_ALIGN(addr);
57199397
MT
72722- vma = find_vma(mm, addr);
72723- if (TASK_SIZE - len >= addr &&
72724- (!vma || addr + len <= vma->vm_start))
72725- return addr;
72726+ if (TASK_SIZE - len >= addr) {
72727+ vma = find_vma(mm, addr);
72728+ if (check_heap_stack_gap(vma, addr, len))
72729+ return addr;
72730+ }
58c5fc13
MT
72731 }
72732 if (len > mm->cached_hole_size) {
72733- start_addr = addr = mm->free_area_cache;
72734+ start_addr = addr = mm->free_area_cache;
72735 } else {
72736- start_addr = addr = TASK_UNMAPPED_BASE;
72737- mm->cached_hole_size = 0;
72738+ start_addr = addr = mm->mmap_base;
72739+ mm->cached_hole_size = 0;
72740 }
72741
72742 full_search:
5e856224 72743@@ -1399,34 +1598,40 @@ full_search:
58c5fc13
MT
72744 * Start a new search - just in case we missed
72745 * some holes.
72746 */
72747- if (start_addr != TASK_UNMAPPED_BASE) {
72748- addr = TASK_UNMAPPED_BASE;
72749- start_addr = addr;
72750+ if (start_addr != mm->mmap_base) {
72751+ start_addr = addr = mm->mmap_base;
72752 mm->cached_hole_size = 0;
72753 goto full_search;
72754 }
57199397
MT
72755 return -ENOMEM;
72756 }
72757- if (!vma || addr + len <= vma->vm_start) {
72758- /*
72759- * Remember the place where we stopped the search:
72760- */
72761- mm->free_area_cache = addr + len;
72762- return addr;
72763- }
72764+ if (check_heap_stack_gap(vma, addr, len))
72765+ break;
72766 if (addr + mm->cached_hole_size < vma->vm_start)
72767 mm->cached_hole_size = vma->vm_start - addr;
72768 addr = vma->vm_end;
72769 }
72770+
72771+ /*
72772+ * Remember the place where we stopped the search:
72773+ */
72774+ mm->free_area_cache = addr + len;
72775+ return addr;
72776 }
72777 #endif
58c5fc13
MT
72778
72779 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
72780 {
72781+
72782+#ifdef CONFIG_PAX_SEGMEXEC
72783+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
72784+ return;
72785+#endif
72786+
72787 /*
72788 * Is this a new hole at the lowest possible address?
72789 */
72790- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
72791+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
72792 mm->free_area_cache = addr;
72793 mm->cached_hole_size = ~0UL;
72794 }
5e856224 72795@@ -1444,7 +1649,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
58c5fc13
MT
72796 {
72797 struct vm_area_struct *vma;
72798 struct mm_struct *mm = current->mm;
72799- unsigned long addr = addr0;
72800+ unsigned long base = mm->mmap_base, addr = addr0;
72801
72802 /* requested length too big for entire address space */
72803 if (len > TASK_SIZE)
5e856224 72804@@ -1453,13 +1658,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
58c5fc13
MT
72805 if (flags & MAP_FIXED)
72806 return addr;
72807
72808+#ifdef CONFIG_PAX_RANDMMAP
72809+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
72810+#endif
72811+
72812 /* requesting a specific address */
72813 if (addr) {
72814 addr = PAGE_ALIGN(addr);
57199397
MT
72815- vma = find_vma(mm, addr);
72816- if (TASK_SIZE - len >= addr &&
72817- (!vma || addr + len <= vma->vm_start))
72818- return addr;
72819+ if (TASK_SIZE - len >= addr) {
72820+ vma = find_vma(mm, addr);
72821+ if (check_heap_stack_gap(vma, addr, len))
72822+ return addr;
72823+ }
72824 }
72825
72826 /* check if free_area_cache is useful for us */
5e856224 72827@@ -1474,7 +1684,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
72828 /* make sure it can fit in the remaining address space */
72829 if (addr > len) {
72830 vma = find_vma(mm, addr-len);
72831- if (!vma || addr <= vma->vm_start)
72832+ if (check_heap_stack_gap(vma, addr - len, len))
72833 /* remember the address as a hint for next time */
72834 return (mm->free_area_cache = addr-len);
72835 }
5e856224 72836@@ -1491,7 +1701,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
72837 * return with success:
72838 */
72839 vma = find_vma(mm, addr);
72840- if (!vma || addr+len <= vma->vm_start)
72841+ if (check_heap_stack_gap(vma, addr, len))
72842 /* remember the address as a hint for next time */
72843 return (mm->free_area_cache = addr);
72844
5e856224 72845@@ -1500,8 +1710,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
72846 mm->cached_hole_size = vma->vm_start - addr;
72847
72848 /* try just below the current vma->vm_start */
72849- addr = vma->vm_start-len;
72850- } while (len < vma->vm_start);
72851+ addr = skip_heap_stack_gap(vma, len);
72852+ } while (!IS_ERR_VALUE(addr));
72853
72854 bottomup:
72855 /*
5e856224 72856@@ -1510,13 +1720,21 @@ bottomup:
58c5fc13
MT
72857 * can happen with large stack limits and large mmap()
72858 * allocations.
72859 */
72860+ mm->mmap_base = TASK_UNMAPPED_BASE;
72861+
72862+#ifdef CONFIG_PAX_RANDMMAP
72863+ if (mm->pax_flags & MF_PAX_RANDMMAP)
72864+ mm->mmap_base += mm->delta_mmap;
72865+#endif
72866+
72867+ mm->free_area_cache = mm->mmap_base;
72868 mm->cached_hole_size = ~0UL;
72869- mm->free_area_cache = TASK_UNMAPPED_BASE;
72870 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
72871 /*
72872 * Restore the topdown base:
72873 */
72874- mm->free_area_cache = mm->mmap_base;
72875+ mm->mmap_base = base;
72876+ mm->free_area_cache = base;
72877 mm->cached_hole_size = ~0UL;
72878
72879 return addr;
5e856224 72880@@ -1525,6 +1743,12 @@ bottomup:
58c5fc13
MT
72881
72882 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
72883 {
72884+
72885+#ifdef CONFIG_PAX_SEGMEXEC
72886+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
72887+ return;
72888+#endif
72889+
72890 /*
72891 * Is this a new hole at the highest possible address?
72892 */
5e856224 72893@@ -1532,8 +1756,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
58c5fc13
MT
72894 mm->free_area_cache = addr;
72895
72896 /* dont allow allocations above current base */
72897- if (mm->free_area_cache > mm->mmap_base)
72898+ if (mm->free_area_cache > mm->mmap_base) {
72899 mm->free_area_cache = mm->mmap_base;
72900+ mm->cached_hole_size = ~0UL;
72901+ }
72902 }
72903
72904 unsigned long
5e856224
MT
72905@@ -1629,6 +1855,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
72906 return vma;
72907 }
4c928ab7 72908
58c5fc13
MT
72909+#ifdef CONFIG_PAX_SEGMEXEC
72910+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
72911+{
72912+ struct vm_area_struct *vma_m;
5e856224 72913+
58c5fc13
MT
72914+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
72915+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
72916+ BUG_ON(vma->vm_mirror);
72917+ return NULL;
72918+ }
72919+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
72920+ vma_m = vma->vm_mirror;
72921+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
72922+ BUG_ON(vma->vm_file != vma_m->vm_file);
72923+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
57199397 72924+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
6892158b
MT
72925+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
72926+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
58c5fc13 72927+ return vma_m;
5e856224 72928+}
58c5fc13 72929+#endif
5e856224 72930+
58c5fc13
MT
72931 /*
72932 * Verify that the stack growth is acceptable and
5e856224
MT
72933 * update accounting. This is shared with both the
72934@@ -1645,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
58c5fc13
MT
72935 return -ENOMEM;
72936
72937 /* Stack limit test */
72938+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
df50ba0c 72939 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
58c5fc13
MT
72940 return -ENOMEM;
72941
5e856224 72942@@ -1655,6 +1904,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
58c5fc13 72943 locked = mm->locked_vm + grow;
df50ba0c
MT
72944 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
72945 limit >>= PAGE_SHIFT;
58c5fc13
MT
72946+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
72947 if (locked > limit && !capable(CAP_IPC_LOCK))
72948 return -ENOMEM;
72949 }
5e856224 72950@@ -1685,37 +1935,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
6892158b
MT
72951 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
72952 * vma is the last one with address > vma->vm_end. Have to extend vma.
72953 */
72954+#ifndef CONFIG_IA64
72955+static
72956+#endif
58c5fc13
MT
72957 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
72958 {
bc901d79
MT
72959 int error;
72960+ bool locknext;
58c5fc13
MT
72961
72962 if (!(vma->vm_flags & VM_GROWSUP))
72963 return -EFAULT;
72964
72965+ /* Also guard against wrapping around to address 0. */
72966+ if (address < PAGE_ALIGN(address+1))
72967+ address = PAGE_ALIGN(address+1);
72968+ else
72969+ return -ENOMEM;
72970+
72971 /*
72972 * We must make sure the anon_vma is allocated
72973 * so that the anon_vma locking is not a noop.
72974 */
72975 if (unlikely(anon_vma_prepare(vma)))
72976 return -ENOMEM;
72977+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
57199397 72978+ if (locknext && anon_vma_prepare(vma->vm_next))
58c5fc13 72979+ return -ENOMEM;
6892158b 72980 vma_lock_anon_vma(vma);
58c5fc13 72981+ if (locknext)
6892158b 72982+ vma_lock_anon_vma(vma->vm_next);
58c5fc13
MT
72983
72984 /*
72985 * vma->vm_start/vm_end cannot change under us because the caller
72986 * is required to hold the mmap_sem in read mode. We need the
72987- * anon_vma lock to serialize against concurrent expand_stacks.
72988- * Also guard against wrapping around to address 0.
72989+ * anon_vma locks to serialize against concurrent expand_stacks
72990+ * and expand_upwards.
72991 */
72992- if (address < PAGE_ALIGN(address+4))
72993- address = PAGE_ALIGN(address+4);
72994- else {
6892158b 72995- vma_unlock_anon_vma(vma);
58c5fc13
MT
72996- return -ENOMEM;
72997- }
72998 error = 0;
72999
73000 /* Somebody else might have raced and expanded it already */
73001- if (address > vma->vm_end) {
57199397
MT
73002+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
73003+ error = -ENOMEM;
73004+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
58c5fc13
MT
73005 unsigned long size, grow;
73006
73007 size = address - vma->vm_start;
5e856224 73008@@ -1730,6 +1991,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
66a7e928 73009 }
6892158b 73010 }
58c5fc13
MT
73011 }
73012+ if (locknext)
6892158b
MT
73013+ vma_unlock_anon_vma(vma->vm_next);
73014 vma_unlock_anon_vma(vma);
16454cff 73015 khugepaged_enter_vma_merge(vma);
58c5fc13 73016 return error;
5e856224 73017@@ -1743,6 +2006,8 @@ int expand_downwards(struct vm_area_struct *vma,
58c5fc13
MT
73018 unsigned long address)
73019 {
bc901d79
MT
73020 int error;
73021+ bool lockprev = false;
57199397 73022+ struct vm_area_struct *prev;
58c5fc13
MT
73023
73024 /*
73025 * We must make sure the anon_vma is allocated
5e856224 73026@@ -1756,6 +2021,15 @@ int expand_downwards(struct vm_area_struct *vma,
58c5fc13
MT
73027 if (error)
73028 return error;
73029
57199397 73030+ prev = vma->vm_prev;
58c5fc13 73031+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
58c5fc13
MT
73032+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
73033+#endif
57199397 73034+ if (lockprev && anon_vma_prepare(prev))
58c5fc13
MT
73035+ return -ENOMEM;
73036+ if (lockprev)
6892158b 73037+ vma_lock_anon_vma(prev);
58c5fc13 73038+
6892158b 73039 vma_lock_anon_vma(vma);
58c5fc13
MT
73040
73041 /*
5e856224 73042@@ -1765,9 +2039,17 @@ int expand_downwards(struct vm_area_struct *vma,
58c5fc13
MT
73043 */
73044
73045 /* Somebody else might have raced and expanded it already */
73046- if (address < vma->vm_start) {
57199397
MT
73047+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
73048+ error = -ENOMEM;
73049+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
58c5fc13
MT
73050 unsigned long size, grow;
73051
73052+#ifdef CONFIG_PAX_SEGMEXEC
73053+ struct vm_area_struct *vma_m;
73054+
73055+ vma_m = pax_find_mirror_vma(vma);
73056+#endif
73057+
73058 size = vma->vm_end - address;
73059 grow = (vma->vm_start - address) >> PAGE_SHIFT;
73060
5e856224 73061@@ -1777,11 +2059,22 @@ int expand_downwards(struct vm_area_struct *vma,
71d190be
MT
73062 if (!error) {
73063 vma->vm_start = address;
73064 vma->vm_pgoff -= grow;
73065+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
58c5fc13
MT
73066+
73067+#ifdef CONFIG_PAX_SEGMEXEC
71d190be
MT
73068+ if (vma_m) {
73069+ vma_m->vm_start -= grow << PAGE_SHIFT;
73070+ vma_m->vm_pgoff -= grow;
73071+ }
58c5fc13
MT
73072+#endif
73073+
71d190be
MT
73074 perf_event_mmap(vma);
73075 }
58c5fc13
MT
73076 }
73077 }
6892158b 73078 vma_unlock_anon_vma(vma);
58c5fc13 73079+ if (lockprev)
6892158b 73080+ vma_unlock_anon_vma(prev);
16454cff 73081 khugepaged_enter_vma_merge(vma);
58c5fc13
MT
73082 return error;
73083 }
5e856224 73084@@ -1851,6 +2144,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
58c5fc13
MT
73085 do {
73086 long nrpages = vma_pages(vma);
73087
73088+#ifdef CONFIG_PAX_SEGMEXEC
73089+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
73090+ vma = remove_vma(vma);
73091+ continue;
73092+ }
73093+#endif
73094+
73095 mm->total_vm -= nrpages;
73096 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
73097 vma = remove_vma(vma);
5e856224 73098@@ -1896,6 +2196,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13 73099 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
57199397 73100 vma->vm_prev = NULL;
58c5fc13
MT
73101 do {
73102+
73103+#ifdef CONFIG_PAX_SEGMEXEC
73104+ if (vma->vm_mirror) {
73105+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
73106+ vma->vm_mirror->vm_mirror = NULL;
73107+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
73108+ vma->vm_mirror = NULL;
73109+ }
73110+#endif
73111+
73112 rb_erase(&vma->vm_rb, &mm->mm_rb);
73113 mm->map_count--;
73114 tail_vma = vma;
5e856224 73115@@ -1924,14 +2234,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
ae4e228f 73116 struct vm_area_struct *new;
df50ba0c 73117 int err = -ENOMEM;
ae4e228f 73118
58c5fc13 73119+#ifdef CONFIG_PAX_SEGMEXEC
ae4e228f 73120+ struct vm_area_struct *vma_m, *new_m = NULL;
58c5fc13 73121+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
ae4e228f 73122+#endif
58c5fc13 73123+
ae4e228f
MT
73124 if (is_vm_hugetlb_page(vma) && (addr &
73125 ~(huge_page_mask(hstate_vma(vma)))))
73126 return -EINVAL;
73127
73128+#ifdef CONFIG_PAX_SEGMEXEC
58c5fc13 73129+ vma_m = pax_find_mirror_vma(vma);
ae4e228f 73130+#endif
58c5fc13 73131+
ae4e228f
MT
73132 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
73133 if (!new)
df50ba0c 73134 goto out_err;
ae4e228f
MT
73135
73136+#ifdef CONFIG_PAX_SEGMEXEC
58c5fc13
MT
73137+ if (vma_m) {
73138+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
73139+ if (!new_m) {
73140+ kmem_cache_free(vm_area_cachep, new);
df50ba0c 73141+ goto out_err;
58c5fc13
MT
73142+ }
73143+ }
ae4e228f 73144+#endif
58c5fc13 73145+
ae4e228f
MT
73146 /* most fields are the same, copy all, and then fixup */
73147 *new = *vma;
73148
5e856224 73149@@ -1944,6 +2273,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
ae4e228f
MT
73150 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
73151 }
73152
73153+#ifdef CONFIG_PAX_SEGMEXEC
58c5fc13
MT
73154+ if (vma_m) {
73155+ *new_m = *vma_m;
df50ba0c 73156+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
58c5fc13
MT
73157+ new_m->vm_mirror = new;
73158+ new->vm_mirror = new_m;
73159+
73160+ if (new_below)
73161+ new_m->vm_end = addr_m;
73162+ else {
73163+ new_m->vm_start = addr_m;
73164+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
73165+ }
73166+ }
ae4e228f
MT
73167+#endif
73168+
73169 pol = mpol_dup(vma_policy(vma));
73170 if (IS_ERR(pol)) {
df50ba0c 73171 err = PTR_ERR(pol);
5e856224 73172@@ -1969,6 +2314,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
ae4e228f 73173 else
df50ba0c 73174 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
ae4e228f
MT
73175
73176+#ifdef CONFIG_PAX_SEGMEXEC
df50ba0c
MT
73177+ if (!err && vma_m) {
73178+ if (anon_vma_clone(new_m, vma_m))
73179+ goto out_free_mpol;
73180+
58c5fc13
MT
73181+ mpol_get(pol);
73182+ vma_set_policy(new_m, pol);
73183+
73184+ if (new_m->vm_file) {
73185+ get_file(new_m->vm_file);
73186+ if (vma_m->vm_flags & VM_EXECUTABLE)
73187+ added_exe_file_vma(mm);
73188+ }
73189+
73190+ if (new_m->vm_ops && new_m->vm_ops->open)
73191+ new_m->vm_ops->open(new_m);
73192+
73193+ if (new_below)
df50ba0c 73194+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
58c5fc13
MT
73195+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
73196+ else
df50ba0c
MT
73197+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
73198+
73199+ if (err) {
73200+ if (new_m->vm_ops && new_m->vm_ops->close)
73201+ new_m->vm_ops->close(new_m);
73202+ if (new_m->vm_file) {
73203+ if (vma_m->vm_flags & VM_EXECUTABLE)
73204+ removed_exe_file_vma(mm);
73205+ fput(new_m->vm_file);
73206+ }
73207+ mpol_put(pol);
73208+ }
58c5fc13 73209+ }
ae4e228f 73210+#endif
58c5fc13 73211+
df50ba0c
MT
73212 /* Success. */
73213 if (!err)
73214 return 0;
5e856224 73215@@ -1981,10 +2362,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
6892158b
MT
73216 removed_exe_file_vma(mm);
73217 fput(new->vm_file);
73218 }
73219- unlink_anon_vmas(new);
df50ba0c
MT
73220 out_free_mpol:
73221 mpol_put(pol);
73222 out_free_vma:
73223+
73224+#ifdef CONFIG_PAX_SEGMEXEC
73225+ if (new_m) {
73226+ unlink_anon_vmas(new_m);
73227+ kmem_cache_free(vm_area_cachep, new_m);
73228+ }
73229+#endif
73230+
73231+ unlink_anon_vmas(new);
73232 kmem_cache_free(vm_area_cachep, new);
73233 out_err:
73234 return err;
5e856224 73235@@ -1997,6 +2386,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
ae4e228f
MT
73236 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
73237 unsigned long addr, int new_below)
73238 {
73239+
73240+#ifdef CONFIG_PAX_SEGMEXEC
73241+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
73242+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
73243+ if (mm->map_count >= sysctl_max_map_count-1)
73244+ return -ENOMEM;
73245+ } else
58c5fc13 73246+#endif
ae4e228f
MT
73247+
73248 if (mm->map_count >= sysctl_max_map_count)
73249 return -ENOMEM;
58c5fc13 73250
5e856224 73251@@ -2008,11 +2406,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
73252 * work. This now handles partial unmappings.
73253 * Jeremy Fitzhardinge <jeremy@goop.org>
73254 */
73255+#ifdef CONFIG_PAX_SEGMEXEC
15a11c5b
MT
73256 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73257 {
58c5fc13
MT
73258+ int ret = __do_munmap(mm, start, len);
73259+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
73260+ return ret;
73261+
73262+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
73263+}
73264+
73265+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73266+#else
15a11c5b 73267+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
58c5fc13 73268+#endif
15a11c5b 73269+{
58c5fc13
MT
73270 unsigned long end;
73271 struct vm_area_struct *vma, *prev, *last;
73272
73273+ /*
73274+ * mm->mmap_sem is required to protect against another thread
73275+ * changing the mappings in case we sleep.
73276+ */
73277+ verify_mm_writelocked(mm);
73278+
73279 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
73280 return -EINVAL;
73281
5e856224 73282@@ -2087,6 +2504,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
58c5fc13
MT
73283 /* Fix up all other VM information */
73284 remove_vma_list(mm, vma);
73285
73286+ track_exec_limit(mm, start, end, 0UL);
73287+
73288 return 0;
73289 }
73290
5e856224 73291@@ -2099,22 +2518,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
58c5fc13
MT
73292
73293 profile_munmap(addr);
73294
73295+#ifdef CONFIG_PAX_SEGMEXEC
73296+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
73297+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
73298+ return -EINVAL;
73299+#endif
73300+
73301 down_write(&mm->mmap_sem);
73302 ret = do_munmap(mm, addr, len);
73303 up_write(&mm->mmap_sem);
73304 return ret;
73305 }
73306
73307-static inline void verify_mm_writelocked(struct mm_struct *mm)
73308-{
73309-#ifdef CONFIG_DEBUG_VM
73310- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
73311- WARN_ON(1);
73312- up_read(&mm->mmap_sem);
73313- }
73314-#endif
73315-}
73316-
73317 /*
73318 * this is really a simplified "do_mmap". it only handles
73319 * anonymous maps. eventually we may be able to do some
5e856224 73320@@ -2128,6 +2543,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
58c5fc13
MT
73321 struct rb_node ** rb_link, * rb_parent;
73322 pgoff_t pgoff = addr >> PAGE_SHIFT;
73323 int error;
73324+ unsigned long charged;
58c5fc13
MT
73325
73326 len = PAGE_ALIGN(len);
73327 if (!len)
5e856224 73328@@ -2139,16 +2555,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
58c5fc13
MT
73329
73330 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
73331
73332+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
73333+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
73334+ flags &= ~VM_EXEC;
73335+
73336+#ifdef CONFIG_PAX_MPROTECT
73337+ if (mm->pax_flags & MF_PAX_MPROTECT)
73338+ flags &= ~VM_MAYEXEC;
73339+#endif
73340+
73341+ }
73342+#endif
73343+
ae4e228f
MT
73344 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
73345 if (error & ~PAGE_MASK)
58c5fc13
MT
73346 return error;
73347
73348+ charged = len >> PAGE_SHIFT;
73349+
73350 /*
73351 * mlock MCL_FUTURE?
73352 */
73353 if (mm->def_flags & VM_LOCKED) {
73354 unsigned long locked, lock_limit;
73355- locked = len >> PAGE_SHIFT;
73356+ locked = charged;
73357 locked += mm->locked_vm;
df50ba0c 73358 lock_limit = rlimit(RLIMIT_MEMLOCK);
58c5fc13 73359 lock_limit >>= PAGE_SHIFT;
5e856224 73360@@ -2165,22 +2595,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
58c5fc13
MT
73361 /*
73362 * Clear old maps. this also does some error checking for us
73363 */
73364- munmap_back:
73365 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
73366 if (vma && vma->vm_start < addr + len) {
73367 if (do_munmap(mm, addr, len))
73368 return -ENOMEM;
73369- goto munmap_back;
73370+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
73371+ BUG_ON(vma && vma->vm_start < addr + len);
73372 }
73373
73374 /* Check against address space limits *after* clearing old maps... */
73375- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
73376+ if (!may_expand_vm(mm, charged))
73377 return -ENOMEM;
73378
73379 if (mm->map_count > sysctl_max_map_count)
73380 return -ENOMEM;
73381
73382- if (security_vm_enough_memory(len >> PAGE_SHIFT))
73383+ if (security_vm_enough_memory(charged))
73384 return -ENOMEM;
73385
73386 /* Can we just expand an old private anonymous mapping? */
5e856224 73387@@ -2194,7 +2624,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
58c5fc13
MT
73388 */
73389 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73390 if (!vma) {
73391- vm_unacct_memory(len >> PAGE_SHIFT);
73392+ vm_unacct_memory(charged);
73393 return -ENOMEM;
73394 }
73395
5e856224 73396@@ -2208,11 +2638,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
58c5fc13
MT
73397 vma_link(mm, vma, prev, rb_link, rb_parent);
73398 out:
6892158b 73399 perf_event_mmap(vma);
58c5fc13
MT
73400- mm->total_vm += len >> PAGE_SHIFT;
73401+ mm->total_vm += charged;
73402 if (flags & VM_LOCKED) {
73403 if (!mlock_vma_pages_range(vma, addr, addr + len))
73404- mm->locked_vm += (len >> PAGE_SHIFT);
73405+ mm->locked_vm += charged;
73406 }
73407+ track_exec_limit(mm, addr, addr + len, flags);
73408 return addr;
73409 }
73410
5e856224 73411@@ -2259,8 +2690,10 @@ void exit_mmap(struct mm_struct *mm)
58c5fc13
MT
73412 * Walk the list again, actually closing and freeing it,
73413 * with preemption enabled, without holding any MM locks.
73414 */
73415- while (vma)
73416+ while (vma) {
73417+ vma->vm_mirror = NULL;
73418 vma = remove_vma(vma);
73419+ }
73420
73421 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
73422 }
5e856224 73423@@ -2274,6 +2707,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
58c5fc13
MT
73424 struct vm_area_struct * __vma, * prev;
73425 struct rb_node ** rb_link, * rb_parent;
73426
73427+#ifdef CONFIG_PAX_SEGMEXEC
73428+ struct vm_area_struct *vma_m = NULL;
73429+#endif
bc901d79
MT
73430+
73431+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
73432+ return -EPERM;
58c5fc13
MT
73433+
73434 /*
73435 * The vm_pgoff of a purely anonymous vma should be irrelevant
73436 * until its first write fault, when page's anon_vma and index
5e856224 73437@@ -2296,7 +2736,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
58c5fc13
MT
73438 if ((vma->vm_flags & VM_ACCOUNT) &&
73439 security_vm_enough_memory_mm(mm, vma_pages(vma)))
73440 return -ENOMEM;
73441+
73442+#ifdef CONFIG_PAX_SEGMEXEC
73443+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
73444+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73445+ if (!vma_m)
73446+ return -ENOMEM;
73447+ }
73448+#endif
73449+
73450 vma_link(mm, vma, prev, rb_link, rb_parent);
73451+
73452+#ifdef CONFIG_PAX_SEGMEXEC
73453+ if (vma_m)
df50ba0c 73454+ BUG_ON(pax_mirror_vma(vma_m, vma));
58c5fc13
MT
73455+#endif
73456+
73457 return 0;
73458 }
73459
5e856224 73460@@ -2315,6 +2770,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
58c5fc13 73461 struct mempolicy *pol;
5e856224 73462 bool faulted_in_anon_vma = true;
58c5fc13
MT
73463
73464+ BUG_ON(vma->vm_mirror);
73465+
73466 /*
73467 * If anonymous vma has not yet been faulted, update new pgoff
73468 * to match new location, to increase its chance of merging.
5e856224 73469@@ -2382,6 +2839,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
df50ba0c 73470 return NULL;
58c5fc13 73471 }
15a11c5b 73472
58c5fc13 73473+#ifdef CONFIG_PAX_SEGMEXEC
df50ba0c 73474+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
58c5fc13
MT
73475+{
73476+ struct vm_area_struct *prev_m;
73477+ struct rb_node **rb_link_m, *rb_parent_m;
73478+ struct mempolicy *pol_m;
73479+
73480+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
73481+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
73482+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
73483+ *vma_m = *vma;
df50ba0c
MT
73484+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
73485+ if (anon_vma_clone(vma_m, vma))
73486+ return -ENOMEM;
58c5fc13
MT
73487+ pol_m = vma_policy(vma_m);
73488+ mpol_get(pol_m);
73489+ vma_set_policy(vma_m, pol_m);
73490+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
73491+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
73492+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
73493+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
73494+ if (vma_m->vm_file)
73495+ get_file(vma_m->vm_file);
73496+ if (vma_m->vm_ops && vma_m->vm_ops->open)
73497+ vma_m->vm_ops->open(vma_m);
73498+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
73499+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
73500+ vma_m->vm_mirror = vma;
73501+ vma->vm_mirror = vma_m;
df50ba0c 73502+ return 0;
58c5fc13
MT
73503+}
73504+#endif
15a11c5b 73505+
58c5fc13
MT
73506 /*
73507 * Return true if the calling process may expand its vm space by the passed
15a11c5b 73508 * number of pages
5e856224 73509@@ -2393,6 +2883,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
58c5fc13 73510
df50ba0c 73511 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
5e856224
MT
73512
73513+#ifdef CONFIG_PAX_RANDMMAP
73514+ if (mm->pax_flags & MF_PAX_RANDMMAP)
73515+ cur -= mm->brk_gap;
73516+#endif
73517+
58c5fc13
MT
73518+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
73519 if (cur + npages > lim)
73520 return 0;
73521 return 1;
5e856224 73522@@ -2463,6 +2959,22 @@ int install_special_mapping(struct mm_struct *mm,
58c5fc13
MT
73523 vma->vm_start = addr;
73524 vma->vm_end = addr + len;
73525
73526+#ifdef CONFIG_PAX_MPROTECT
73527+ if (mm->pax_flags & MF_PAX_MPROTECT) {
c52201e0 73528+#ifndef CONFIG_PAX_MPROTECT_COMPAT
57199397
MT
73529+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
73530+ return -EPERM;
73531+ if (!(vm_flags & VM_EXEC))
73532+ vm_flags &= ~VM_MAYEXEC;
c52201e0
MT
73533+#else
73534+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
73535+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
73536+#endif
58c5fc13 73537+ else
57199397 73538+ vm_flags &= ~VM_MAYWRITE;
58c5fc13
MT
73539+ }
73540+#endif
73541+
73542 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
73543 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
73544
fe2de317 73545diff --git a/mm/mprotect.c b/mm/mprotect.c
5e856224 73546index f437d05..e3763f6 100644
fe2de317
MT
73547--- a/mm/mprotect.c
73548+++ b/mm/mprotect.c
df50ba0c 73549@@ -23,10 +23,16 @@
58c5fc13
MT
73550 #include <linux/mmu_notifier.h>
73551 #include <linux/migrate.h>
ae4e228f 73552 #include <linux/perf_event.h>
58c5fc13
MT
73553+
73554+#ifdef CONFIG_PAX_MPROTECT
73555+#include <linux/elf.h>
73556+#endif
73557+
73558 #include <asm/uaccess.h>
73559 #include <asm/pgtable.h>
73560 #include <asm/cacheflush.h>
73561 #include <asm/tlbflush.h>
73562+#include <asm/mmu_context.h>
73563
73564 #ifndef pgprot_modify
73565 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
fe2de317 73566@@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
58c5fc13
MT
73567 flush_tlb_range(vma, start, end);
73568 }
73569
73570+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
73571+/* called while holding the mmap semaphor for writing except stack expansion */
73572+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
73573+{
73574+ unsigned long oldlimit, newlimit = 0UL;
73575+
ae4e228f 73576+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
58c5fc13
MT
73577+ return;
73578+
73579+ spin_lock(&mm->page_table_lock);
73580+ oldlimit = mm->context.user_cs_limit;
73581+ if ((prot & VM_EXEC) && oldlimit < end)
73582+ /* USER_CS limit moved up */
73583+ newlimit = end;
73584+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
73585+ /* USER_CS limit moved down */
73586+ newlimit = start;
73587+
73588+ if (newlimit) {
73589+ mm->context.user_cs_limit = newlimit;
73590+
73591+#ifdef CONFIG_SMP
73592+ wmb();
73593+ cpus_clear(mm->context.cpu_user_cs_mask);
73594+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
73595+#endif
73596+
73597+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
73598+ }
73599+ spin_unlock(&mm->page_table_lock);
73600+ if (newlimit == end) {
73601+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
73602+
73603+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
73604+ if (is_vm_hugetlb_page(vma))
73605+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
73606+ else
73607+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
73608+ }
73609+}
73610+#endif
73611+
73612 int
73613 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
73614 unsigned long start, unsigned long end, unsigned long newflags)
fe2de317 73615@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
58c5fc13
MT
73616 int error;
73617 int dirty_accountable = 0;
73618
73619+#ifdef CONFIG_PAX_SEGMEXEC
73620+ struct vm_area_struct *vma_m = NULL;
73621+ unsigned long start_m, end_m;
73622+
73623+ start_m = start + SEGMEXEC_TASK_SIZE;
73624+ end_m = end + SEGMEXEC_TASK_SIZE;
73625+#endif
73626+
73627 if (newflags == oldflags) {
73628 *pprev = vma;
73629 return 0;
57199397
MT
73630 }
73631
73632+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
73633+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
73634+
73635+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
73636+ return -ENOMEM;
73637+
73638+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
73639+ return -ENOMEM;
73640+ }
73641+
73642 /*
73643 * If we make a private mapping writable we increase our commit;
73644 * but (without finer accounting) cannot reduce our commit if we
fe2de317 73645@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
58c5fc13
MT
73646 }
73647 }
73648
73649+#ifdef CONFIG_PAX_SEGMEXEC
73650+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
73651+ if (start != vma->vm_start) {
73652+ error = split_vma(mm, vma, start, 1);
73653+ if (error)
73654+ goto fail;
73655+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
73656+ *pprev = (*pprev)->vm_next;
73657+ }
73658+
73659+ if (end != vma->vm_end) {
73660+ error = split_vma(mm, vma, end, 0);
73661+ if (error)
73662+ goto fail;
73663+ }
73664+
73665+ if (pax_find_mirror_vma(vma)) {
73666+ error = __do_munmap(mm, start_m, end_m - start_m);
73667+ if (error)
73668+ goto fail;
73669+ } else {
73670+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73671+ if (!vma_m) {
73672+ error = -ENOMEM;
73673+ goto fail;
73674+ }
73675+ vma->vm_flags = newflags;
df50ba0c
MT
73676+ error = pax_mirror_vma(vma_m, vma);
73677+ if (error) {
73678+ vma->vm_flags = oldflags;
73679+ goto fail;
73680+ }
58c5fc13
MT
73681+ }
73682+ }
73683+#endif
73684+
73685 /*
73686 * First try to merge with previous and/or next vma.
73687 */
16454cff 73688@@ -204,9 +306,21 @@ success:
df50ba0c 73689 * vm_flags and vm_page_prot are protected by the mmap_sem
58c5fc13
MT
73690 * held in write mode.
73691 */
df50ba0c
MT
73692+
73693+#ifdef CONFIG_PAX_SEGMEXEC
73694+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
73695+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
73696+#endif
73697+
58c5fc13
MT
73698 vma->vm_flags = newflags;
73699+
73700+#ifdef CONFIG_PAX_MPROTECT
ae4e228f
MT
73701+ if (mm->binfmt && mm->binfmt->handle_mprotect)
73702+ mm->binfmt->handle_mprotect(vma, newflags);
58c5fc13
MT
73703+#endif
73704+
73705 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
73706- vm_get_page_prot(newflags));
73707+ vm_get_page_prot(vma->vm_flags));
73708
73709 if (vma_wants_writenotify(vma)) {
73710 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
fe2de317 73711@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
58c5fc13
MT
73712 end = start + len;
73713 if (end <= start)
73714 return -ENOMEM;
73715+
73716+#ifdef CONFIG_PAX_SEGMEXEC
73717+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
73718+ if (end > SEGMEXEC_TASK_SIZE)
73719+ return -EINVAL;
73720+ } else
73721+#endif
73722+
73723+ if (end > TASK_SIZE)
73724+ return -EINVAL;
73725+
73726 if (!arch_validate_prot(prot))
73727 return -EINVAL;
73728
fe2de317 73729@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
58c5fc13
MT
73730 /*
73731 * Does the application expect PROT_READ to imply PROT_EXEC:
73732 */
73733- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
73734+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
73735 prot |= PROT_EXEC;
73736
73737 vm_flags = calc_vm_prot_bits(prot);
5e856224 73738@@ -288,6 +413,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
58c5fc13
MT
73739 if (start > vma->vm_start)
73740 prev = vma;
73741
58c5fc13 73742+#ifdef CONFIG_PAX_MPROTECT
ae4e228f
MT
73743+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
73744+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
58c5fc13
MT
73745+#endif
73746+
73747 for (nstart = start ; ; ) {
73748 unsigned long newflags;
73749
5e856224 73750@@ -297,6 +427,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
6892158b
MT
73751
73752 /* newflags >> 4 shift VM_MAY% in place of VM_% */
73753 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
73754+ if (prot & (PROT_WRITE | PROT_EXEC))
73755+ gr_log_rwxmprotect(vma->vm_file);
73756+
73757+ error = -EACCES;
73758+ goto out;
73759+ }
73760+
73761+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
73762 error = -EACCES;
73763 goto out;
73764 }
5e856224 73765@@ -311,6 +449,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
bc901d79 73766 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
58c5fc13
MT
73767 if (error)
73768 goto out;
58c5fc13
MT
73769+
73770+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
73771+
73772 nstart = tmp;
73773
73774 if (nstart < prev->vm_end)
fe2de317 73775diff --git a/mm/mremap.c b/mm/mremap.c
5e856224 73776index 87bb839..c3bfadb 100644
fe2de317
MT
73777--- a/mm/mremap.c
73778+++ b/mm/mremap.c
4c928ab7 73779@@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
58c5fc13 73780 continue;
4c928ab7 73781 pte = ptep_get_and_clear(mm, old_addr, old_pte);
58c5fc13
MT
73782 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
73783+
73784+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
ae4e228f 73785+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
58c5fc13
MT
73786+ pte = pte_exprotect(pte);
73787+#endif
73788+
73789 set_pte_at(mm, new_addr, new_pte, pte);
73790 }
73791
5e856224 73792@@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
ae4e228f
MT
73793 if (is_vm_hugetlb_page(vma))
73794 goto Einval;
73795
73796+#ifdef CONFIG_PAX_SEGMEXEC
73797+ if (pax_find_mirror_vma(vma))
73798+ goto Einval;
73799+#endif
73800+
73801 /* We can't remap across vm area boundaries */
73802 if (old_len > vma->vm_end - addr)
73803 goto Efault;
5e856224 73804@@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
ae4e228f
MT
73805 unsigned long ret = -EINVAL;
73806 unsigned long charged = 0;
73807 unsigned long map_flags;
73808+ unsigned long pax_task_size = TASK_SIZE;
73809
73810 if (new_addr & ~PAGE_MASK)
73811 goto out;
73812
73813- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
73814+#ifdef CONFIG_PAX_SEGMEXEC
73815+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
73816+ pax_task_size = SEGMEXEC_TASK_SIZE;
73817+#endif
73818+
6892158b
MT
73819+ pax_task_size -= PAGE_SIZE;
73820+
ae4e228f
MT
73821+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
73822 goto out;
73823
73824 /* Check if the location we're moving into overlaps the
73825 * old location at all, and fail if it does.
73826 */
73827- if ((new_addr <= addr) && (new_addr+new_len) > addr)
73828- goto out;
73829-
73830- if ((addr <= new_addr) && (addr+old_len) > new_addr)
73831+ if (addr + old_len > new_addr && new_addr + new_len > addr)
73832 goto out;
73833
73834 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
5e856224 73835@@ -440,6 +456,7 @@ unsigned long do_mremap(unsigned long addr,
58c5fc13
MT
73836 struct vm_area_struct *vma;
73837 unsigned long ret = -EINVAL;
73838 unsigned long charged = 0;
73839+ unsigned long pax_task_size = TASK_SIZE;
73840
73841 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
73842 goto out;
5e856224 73843@@ -458,6 +475,17 @@ unsigned long do_mremap(unsigned long addr,
58c5fc13
MT
73844 if (!new_len)
73845 goto out;
73846
73847+#ifdef CONFIG_PAX_SEGMEXEC
ae4e228f 73848+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
58c5fc13
MT
73849+ pax_task_size = SEGMEXEC_TASK_SIZE;
73850+#endif
73851+
6892158b
MT
73852+ pax_task_size -= PAGE_SIZE;
73853+
58c5fc13
MT
73854+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
73855+ old_len > pax_task_size || addr > pax_task_size-old_len)
73856+ goto out;
73857+
58c5fc13 73858 if (flags & MREMAP_FIXED) {
ae4e228f
MT
73859 if (flags & MREMAP_MAYMOVE)
73860 ret = mremap_to(addr, old_len, new_addr, new_len);
5e856224 73861@@ -507,6 +535,7 @@ unsigned long do_mremap(unsigned long addr,
58c5fc13
MT
73862 addr + new_len);
73863 }
73864 ret = addr;
73865+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
73866 goto out;
73867 }
73868 }
5e856224 73869@@ -533,7 +562,13 @@ unsigned long do_mremap(unsigned long addr,
ae4e228f
MT
73870 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
73871 if (ret)
73872 goto out;
73873+
58c5fc13
MT
73874+ map_flags = vma->vm_flags;
73875 ret = move_vma(vma, addr, old_len, new_len, new_addr);
73876+ if (!(ret & ~PAGE_MASK)) {
73877+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
73878+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
73879+ }
73880 }
73881 out:
73882 if (ret & ~PAGE_MASK)
fe2de317 73883diff --git a/mm/nommu.c b/mm/nommu.c
4c928ab7 73884index f59e170..34e2a2b 100644
fe2de317
MT
73885--- a/mm/nommu.c
73886+++ b/mm/nommu.c
73887@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
57199397
MT
73888 int sysctl_overcommit_ratio = 50; /* default is 50% */
73889 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
73890 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
73891-int heap_stack_gap = 0;
73892
73893 atomic_long_t mmap_pages_allocated;
73894
4c928ab7 73895@@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
58c5fc13
MT
73896 EXPORT_SYMBOL(find_vma);
73897
73898 /*
73899- * find a VMA
73900- * - we don't extend stack VMAs under NOMMU conditions
73901- */
73902-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
73903-{
73904- return find_vma(mm, addr);
73905-}
73906-
73907-/*
73908 * expand a stack to a given address
73909 * - not supported under NOMMU conditions
73910 */
4c928ab7 73911@@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
df50ba0c
MT
73912
73913 /* most fields are the same, copy all, and then fixup */
73914 *new = *vma;
73915+ INIT_LIST_HEAD(&new->anon_vma_chain);
73916 *region = *vma->vm_region;
73917 new->vm_region = region;
73918
fe2de317 73919diff --git a/mm/page_alloc.c b/mm/page_alloc.c
5e856224 73920index a13ded1..b949d15 100644
fe2de317
MT
73921--- a/mm/page_alloc.c
73922+++ b/mm/page_alloc.c
5e856224 73923@@ -335,7 +335,7 @@ out:
15a11c5b
MT
73924 * This usage means that zero-order pages may not be compound.
73925 */
73926
73927-static void free_compound_page(struct page *page)
73928+void free_compound_page(struct page *page)
73929 {
73930 __free_pages_ok(page, compound_order(page));
73931 }
5e856224 73932@@ -692,6 +692,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
57199397 73933 int i;
58c5fc13 73934 int bad = 0;
58c5fc13
MT
73935
73936+#ifdef CONFIG_PAX_MEMORY_SANITIZE
73937+ unsigned long index = 1UL << order;
73938+#endif
73939+
5e856224 73940 trace_mm_page_free(page, order);
58c5fc13
MT
73941 kmemcheck_free_shadow(page, order);
73942
5e856224 73943@@ -707,6 +711,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
58c5fc13
MT
73944 debug_check_no_obj_freed(page_address(page),
73945 PAGE_SIZE << order);
73946 }
73947+
73948+#ifdef CONFIG_PAX_MEMORY_SANITIZE
73949+ for (; index; --index)
73950+ sanitize_highpage(page + index - 1);
73951+#endif
73952+
73953 arch_free_page(page, order);
73954 kernel_map_pages(page, 1 << order, 0);
73955
5e856224 73956@@ -830,8 +840,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
58c5fc13
MT
73957 arch_alloc_page(page, order);
73958 kernel_map_pages(page, 1 << order, 1);
73959
73960+#ifndef CONFIG_PAX_MEMORY_SANITIZE
73961 if (gfp_flags & __GFP_ZERO)
73962 prep_zero_page(page, order, gfp_flags);
73963+#endif
73964
73965 if (order && (gfp_flags & __GFP_COMP))
73966 prep_compound_page(page, order);
5e856224 73967@@ -3468,7 +3480,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
6e9df6a3
MT
73968 unsigned long pfn;
73969
73970 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
73971+#ifdef CONFIG_X86_32
73972+ /* boot failures in VMware 8 on 32bit vanilla since
73973+ this change */
73974+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
73975+#else
73976 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
73977+#endif
73978 return 1;
73979 }
73980 return 0;
fe2de317 73981diff --git a/mm/percpu.c b/mm/percpu.c
5e856224 73982index f47af91..7eeef99 100644
fe2de317
MT
73983--- a/mm/percpu.c
73984+++ b/mm/percpu.c
5e856224 73985@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
4c928ab7 73986 static unsigned int pcpu_high_unit_cpu __read_mostly;
58c5fc13
MT
73987
73988 /* the address of the first chunk which starts with the kernel static area */
73989-void *pcpu_base_addr __read_mostly;
73990+void *pcpu_base_addr __read_only;
73991 EXPORT_SYMBOL_GPL(pcpu_base_addr);
73992
ae4e228f 73993 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
4c928ab7 73994diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
5e856224 73995index c20ff48..137702a 100644
4c928ab7
MT
73996--- a/mm/process_vm_access.c
73997+++ b/mm/process_vm_access.c
73998@@ -13,6 +13,7 @@
73999 #include <linux/uio.h>
74000 #include <linux/sched.h>
74001 #include <linux/highmem.h>
74002+#include <linux/security.h>
74003 #include <linux/ptrace.h>
74004 #include <linux/slab.h>
74005 #include <linux/syscalls.h>
74006@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
74007 size_t iov_l_curr_offset = 0;
74008 ssize_t iov_len;
74009
74010+ return -ENOSYS; // PaX: until properly audited
74011+
74012 /*
74013 * Work out how many pages of struct pages we're going to need
74014 * when eventually calling get_user_pages
74015 */
74016 for (i = 0; i < riovcnt; i++) {
74017 iov_len = rvec[i].iov_len;
74018- if (iov_len > 0) {
74019- nr_pages_iov = ((unsigned long)rvec[i].iov_base
74020- + iov_len)
74021- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
74022- / PAGE_SIZE + 1;
74023- nr_pages = max(nr_pages, nr_pages_iov);
74024- }
74025+ if (iov_len <= 0)
74026+ continue;
74027+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
74028+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
74029+ nr_pages = max(nr_pages, nr_pages_iov);
74030 }
74031
74032 if (nr_pages == 0)
5e856224 74033@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
4c928ab7
MT
74034 goto free_proc_pages;
74035 }
74036
4c928ab7 74037+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
5e856224
MT
74038+ rc = -EPERM;
74039+ goto put_task_struct;
74040+ }
74041+
74042 mm = mm_access(task, PTRACE_MODE_ATTACH);
74043 if (!mm || IS_ERR(mm)) {
74044 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
fe2de317 74045diff --git a/mm/rmap.c b/mm/rmap.c
5e856224 74046index c8454e0..b04f3a2 100644
fe2de317
MT
74047--- a/mm/rmap.c
74048+++ b/mm/rmap.c
74049@@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
df50ba0c
MT
74050 struct anon_vma *anon_vma = vma->anon_vma;
74051 struct anon_vma_chain *avc;
74052
74053+#ifdef CONFIG_PAX_SEGMEXEC
74054+ struct anon_vma_chain *avc_m = NULL;
74055+#endif
74056+
74057 might_sleep();
74058 if (unlikely(!anon_vma)) {
58c5fc13 74059 struct mm_struct *mm = vma->vm_mm;
fe2de317 74060@@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
df50ba0c
MT
74061 if (!avc)
74062 goto out_enomem;
74063
74064+#ifdef CONFIG_PAX_SEGMEXEC
15a11c5b 74065+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
df50ba0c
MT
74066+ if (!avc_m)
74067+ goto out_enomem_free_avc;
74068+#endif
58c5fc13
MT
74069+
74070 anon_vma = find_mergeable_anon_vma(vma);
74071 allocated = NULL;
74072 if (!anon_vma) {
fe2de317 74073@@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
57199397
MT
74074 /* page_table_lock to protect against threads */
74075 spin_lock(&mm->page_table_lock);
74076 if (likely(!vma->anon_vma)) {
58c5fc13
MT
74077+
74078+#ifdef CONFIG_PAX_SEGMEXEC
57199397
MT
74079+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
74080+
58c5fc13 74081+ if (vma_m) {
df50ba0c 74082+ BUG_ON(vma_m->anon_vma);
58c5fc13 74083+ vma_m->anon_vma = anon_vma;
df50ba0c
MT
74084+ avc_m->anon_vma = anon_vma;
74085+ avc_m->vma = vma;
74086+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
74087+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
74088+ avc_m = NULL;
58c5fc13
MT
74089+ }
74090+#endif
74091+
57199397
MT
74092 vma->anon_vma = anon_vma;
74093 avc->anon_vma = anon_vma;
74094 avc->vma = vma;
fe2de317 74095@@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
df50ba0c
MT
74096
74097 if (unlikely(allocated))
66a7e928 74098 put_anon_vma(allocated);
df50ba0c
MT
74099+
74100+#ifdef CONFIG_PAX_SEGMEXEC
74101+ if (unlikely(avc_m))
74102+ anon_vma_chain_free(avc_m);
74103+#endif
74104+
74105 if (unlikely(avc))
74106 anon_vma_chain_free(avc);
74107 }
74108 return 0;
74109
74110 out_enomem_free_avc:
74111+
74112+#ifdef CONFIG_PAX_SEGMEXEC
74113+ if (avc_m)
74114+ anon_vma_chain_free(avc_m);
74115+#endif
74116+
74117 anon_vma_chain_free(avc);
74118 out_enomem:
74119 return -ENOMEM;
fe2de317 74120@@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
57199397
MT
74121 * Attach the anon_vmas from src to dst.
74122 * Returns 0 on success, -ENOMEM on failure.
74123 */
74124-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
74125+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
74126 {
74127 struct anon_vma_chain *avc, *pavc;
15a11c5b 74128 struct anon_vma *root = NULL;
5e856224 74129@@ -321,7 +358,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
57199397
MT
74130 * the corresponding VMA in the parent process is attached to.
74131 * Returns 0 on success, non-zero on failure.
74132 */
74133-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
74134+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
74135 {
74136 struct anon_vma_chain *avc;
74137 struct anon_vma *anon_vma;
fe2de317 74138diff --git a/mm/shmem.c b/mm/shmem.c
5e856224 74139index 269d049..a9d2b50 100644
fe2de317
MT
74140--- a/mm/shmem.c
74141+++ b/mm/shmem.c
6892158b 74142@@ -31,7 +31,7 @@
4c928ab7 74143 #include <linux/export.h>
58c5fc13 74144 #include <linux/swap.h>
58c5fc13
MT
74145
74146-static struct vfsmount *shm_mnt;
74147+struct vfsmount *shm_mnt;
74148
74149 #ifdef CONFIG_SHMEM
74150 /*
6e9df6a3
MT
74151@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
74152 #define BOGO_DIRENT_SIZE 20
74153
74154 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
74155-#define SHORT_SYMLINK_LEN 128
74156+#define SHORT_SYMLINK_LEN 64
74157
74158 struct shmem_xattr {
74159 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
4c928ab7 74160@@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
66a7e928
MT
74161 int err = -ENOMEM;
74162
74163 /* Round up to L1_CACHE_BYTES to resist false sharing */
74164- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
74165- L1_CACHE_BYTES), GFP_KERNEL);
74166+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
74167 if (!sbinfo)
74168 return -ENOMEM;
74169
fe2de317 74170diff --git a/mm/slab.c b/mm/slab.c
5e856224 74171index f0bd785..348b96a 100644
fe2de317
MT
74172--- a/mm/slab.c
74173+++ b/mm/slab.c
5e856224 74174@@ -153,7 +153,7 @@
71d190be
MT
74175
74176 /* Legal flag mask for kmem_cache_create(). */
74177 #if DEBUG
74178-# define CREATE_MASK (SLAB_RED_ZONE | \
74179+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
74180 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
74181 SLAB_CACHE_DMA | \
74182 SLAB_STORE_USER | \
5e856224 74183@@ -161,7 +161,7 @@
71d190be
MT
74184 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
74185 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
74186 #else
74187-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
74188+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
74189 SLAB_CACHE_DMA | \
74190 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
74191 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
5e856224 74192@@ -290,7 +290,7 @@ struct kmem_list3 {
58c5fc13
MT
74193 * Need this for bootstrapping a per node allocator.
74194 */
74195 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
16454cff
MT
74196-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
74197+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
58c5fc13
MT
74198 #define CACHE_CACHE 0
74199 #define SIZE_AC MAX_NUMNODES
74200 #define SIZE_L3 (2 * MAX_NUMNODES)
5e856224 74201@@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
8308f9c9
MT
74202 if ((x)->max_freeable < i) \
74203 (x)->max_freeable = i; \
74204 } while (0)
74205-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
74206-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
74207-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
74208-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
74209+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
74210+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
74211+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
74212+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
74213 #else
74214 #define STATS_INC_ACTIVE(x) do { } while (0)
74215 #define STATS_DEC_ACTIVE(x) do { } while (0)
5e856224 74216@@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
58c5fc13
MT
74217 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
74218 */
74219 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
74220- const struct slab *slab, void *obj)
74221+ const struct slab *slab, const void *obj)
74222 {
74223 u32 offset = (obj - slab->s_mem);
74224 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
5e856224 74225@@ -568,7 +568,7 @@ struct cache_names {
58c5fc13
MT
74226 static struct cache_names __initdata cache_names[] = {
74227 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
74228 #include <linux/kmalloc_sizes.h>
74229- {NULL,}
71d190be 74230+ {NULL}
58c5fc13
MT
74231 #undef CACHE
74232 };
74233
5e856224 74234@@ -1588,7 +1588,7 @@ void __init kmem_cache_init(void)
71d190be
MT
74235 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
74236 sizes[INDEX_AC].cs_size,
74237 ARCH_KMALLOC_MINALIGN,
74238- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
74239+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
74240 NULL);
74241
74242 if (INDEX_AC != INDEX_L3) {
5e856224 74243@@ -1596,7 +1596,7 @@ void __init kmem_cache_init(void)
71d190be
MT
74244 kmem_cache_create(names[INDEX_L3].name,
74245 sizes[INDEX_L3].cs_size,
74246 ARCH_KMALLOC_MINALIGN,
74247- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
74248+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
74249 NULL);
74250 }
74251
5e856224 74252@@ -1614,7 +1614,7 @@ void __init kmem_cache_init(void)
71d190be
MT
74253 sizes->cs_cachep = kmem_cache_create(names->name,
74254 sizes->cs_size,
74255 ARCH_KMALLOC_MINALIGN,
74256- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
74257+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
74258 NULL);
74259 }
74260 #ifdef CONFIG_ZONE_DMA
5e856224 74261@@ -4339,10 +4339,10 @@ static int s_show(struct seq_file *m, void *p)
8308f9c9
MT
74262 }
74263 /* cpu stats */
74264 {
74265- unsigned long allochit = atomic_read(&cachep->allochit);
74266- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
74267- unsigned long freehit = atomic_read(&cachep->freehit);
74268- unsigned long freemiss = atomic_read(&cachep->freemiss);
74269+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
74270+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
74271+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
74272+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
74273
74274 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
74275 allochit, allocmiss, freehit, freemiss);
5e856224 74276@@ -4601,13 +4601,62 @@ static int __init slab_proc_init(void)
58c5fc13 74277 {
4c928ab7 74278 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
df50ba0c
MT
74279 #ifdef CONFIG_DEBUG_SLAB_LEAK
74280- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4c928ab7 74281+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
df50ba0c
MT
74282 #endif
74283 return 0;
74284 }
58c5fc13
MT
74285 module_init(slab_proc_init);
74286 #endif
74287
74288+void check_object_size(const void *ptr, unsigned long n, bool to)
74289+{
74290+
74291+#ifdef CONFIG_PAX_USERCOPY
58c5fc13 74292+ struct page *page;
71d190be
MT
74293+ struct kmem_cache *cachep = NULL;
74294+ struct slab *slabp;
58c5fc13
MT
74295+ unsigned int objnr;
74296+ unsigned long offset;
6e9df6a3 74297+ const char *type;
58c5fc13
MT
74298+
74299+ if (!n)
74300+ return;
74301+
6e9df6a3 74302+ type = "<null>";
58c5fc13
MT
74303+ if (ZERO_OR_NULL_PTR(ptr))
74304+ goto report;
74305+
74306+ if (!virt_addr_valid(ptr))
74307+ return;
74308+
74309+ page = virt_to_head_page(ptr);
74310+
6e9df6a3 74311+ type = "<process stack>";
ae4e228f
MT
74312+ if (!PageSlab(page)) {
74313+ if (object_is_on_stack(ptr, n) == -1)
74314+ goto report;
58c5fc13 74315+ return;
ae4e228f 74316+ }
58c5fc13
MT
74317+
74318+ cachep = page_get_cache(page);
6e9df6a3 74319+ type = cachep->name;
71d190be
MT
74320+ if (!(cachep->flags & SLAB_USERCOPY))
74321+ goto report;
74322+
58c5fc13
MT
74323+ slabp = page_get_slab(page);
74324+ objnr = obj_to_index(cachep, slabp, ptr);
74325+ BUG_ON(objnr >= cachep->num);
74326+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
74327+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
74328+ return;
74329+
74330+report:
6e9df6a3 74331+ pax_report_usercopy(ptr, n, to, type);
58c5fc13
MT
74332+#endif
74333+
74334+}
74335+EXPORT_SYMBOL(check_object_size);
74336+
74337 /**
74338 * ksize - get the actual amount of memory allocated for a given object
74339 * @objp: Pointer to the object
fe2de317 74340diff --git a/mm/slob.c b/mm/slob.c
4c928ab7 74341index 8105be4..e045f96 100644
fe2de317
MT
74342--- a/mm/slob.c
74343+++ b/mm/slob.c
58c5fc13
MT
74344@@ -29,7 +29,7 @@
74345 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
74346 * alloc_pages() directly, allocating compound pages so the page order
74347 * does not have to be separately tracked, and also stores the exact
74348- * allocation size in page->private so that it can be used to accurately
74349+ * allocation size in slob_page->size so that it can be used to accurately
74350 * provide ksize(). These objects are detected in kfree() because slob_page()
74351 * is false for them.
74352 *
74353@@ -58,6 +58,7 @@
74354 */
74355
74356 #include <linux/kernel.h>
74357+#include <linux/sched.h>
74358 #include <linux/slab.h>
74359 #include <linux/mm.h>
74360 #include <linux/swap.h> /* struct reclaim_state */
6892158b 74361@@ -102,7 +103,8 @@ struct slob_page {
58c5fc13
MT
74362 unsigned long flags; /* mandatory */
74363 atomic_t _count; /* mandatory */
74364 slobidx_t units; /* free units left in page */
74365- unsigned long pad[2];
74366+ unsigned long pad[1];
74367+ unsigned long size; /* size when >=PAGE_SIZE */
74368 slob_t *free; /* first free slob_t in page */
74369 struct list_head list; /* linked list of free pages */
74370 };
6892158b 74371@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
58c5fc13
MT
74372 */
74373 static inline int is_slob_page(struct slob_page *sp)
74374 {
74375- return PageSlab((struct page *)sp);
74376+ return PageSlab((struct page *)sp) && !sp->size;
74377 }
74378
74379 static inline void set_slob_page(struct slob_page *sp)
fe2de317 74380@@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
58c5fc13
MT
74381
74382 static inline struct slob_page *slob_page(const void *addr)
74383 {
74384- return (struct slob_page *)virt_to_page(addr);
74385+ return (struct slob_page *)virt_to_head_page(addr);
74386 }
74387
74388 /*
fe2de317 74389@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
58c5fc13
MT
74390 /*
74391 * Return the size of a slob block.
74392 */
74393-static slobidx_t slob_units(slob_t *s)
74394+static slobidx_t slob_units(const slob_t *s)
74395 {
74396 if (s->units > 0)
74397 return s->units;
6892158b 74398@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
58c5fc13
MT
74399 /*
74400 * Return the next free slob block pointer after this one.
74401 */
74402-static slob_t *slob_next(slob_t *s)
74403+static slob_t *slob_next(const slob_t *s)
74404 {
74405 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
74406 slobidx_t next;
6892158b 74407@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
58c5fc13
MT
74408 /*
74409 * Returns true if s is the last free block in its page.
74410 */
74411-static int slob_last(slob_t *s)
74412+static int slob_last(const slob_t *s)
74413 {
74414 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
74415 }
fe2de317 74416@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
58c5fc13
MT
74417 if (!page)
74418 return NULL;
74419
74420+ set_slob_page(page);
74421 return page_address(page);
74422 }
74423
fe2de317 74424@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
58c5fc13
MT
74425 if (!b)
74426 return NULL;
74427 sp = slob_page(b);
74428- set_slob_page(sp);
74429
74430 spin_lock_irqsave(&slob_lock, flags);
74431 sp->units = SLOB_UNITS(PAGE_SIZE);
74432 sp->free = b;
74433+ sp->size = 0;
74434 INIT_LIST_HEAD(&sp->list);
74435 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
74436 set_slob_page_free(sp, slob_list);
6892158b 74437@@ -476,10 +479,9 @@ out:
57199397
MT
74438 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
74439 */
58c5fc13
MT
74440
74441-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74442+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
74443 {
74444- unsigned int *m;
74445- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74446+ slob_t *m;
74447 void *ret;
74448
6e9df6a3 74449 gfp &= gfp_allowed_mask;
fe2de317 74450@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
58c5fc13
MT
74451
74452 if (!m)
74453 return NULL;
74454- *m = size;
74455+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
74456+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
74457+ m[0].units = size;
74458+ m[1].units = align;
74459 ret = (void *)m + align;
74460
74461 trace_kmalloc_node(_RET_IP_, ret,
fe2de317 74462@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
bc901d79
MT
74463 gfp |= __GFP_COMP;
74464 ret = slob_new_pages(gfp, order, node);
58c5fc13
MT
74465 if (ret) {
74466- struct page *page;
74467- page = virt_to_page(ret);
74468- page->private = size;
74469+ struct slob_page *sp;
74470+ sp = slob_page(ret);
74471+ sp->size = size;
74472 }
74473
74474 trace_kmalloc_node(_RET_IP_, ret,
15a11c5b
MT
74475 size, PAGE_SIZE << order, gfp, node);
74476 }
74477
74478- kmemleak_alloc(ret, size, 1, gfp);
74479+ return ret;
74480+}
58c5fc13
MT
74481+
74482+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74483+{
74484+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
15a11c5b 74485+ void *ret = __kmalloc_node_align(size, gfp, node, align);
58c5fc13 74486+
15a11c5b
MT
74487+ if (!ZERO_OR_NULL_PTR(ret))
74488+ kmemleak_alloc(ret, size, 1, gfp);
74489 return ret;
74490 }
58c5fc13 74491 EXPORT_SYMBOL(__kmalloc_node);
6e9df6a3 74492@@ -533,13 +547,92 @@ void kfree(const void *block)
58c5fc13
MT
74493 sp = slob_page(block);
74494 if (is_slob_page(sp)) {
74495 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74496- unsigned int *m = (unsigned int *)(block - align);
74497- slob_free(m, *m + align);
74498- } else
74499+ slob_t *m = (slob_t *)(block - align);
74500+ slob_free(m, m[0].units + align);
74501+ } else {
74502+ clear_slob_page(sp);
74503+ free_slob_page(sp);
74504+ sp->size = 0;
74505 put_page(&sp->page);
74506+ }
74507 }
74508 EXPORT_SYMBOL(kfree);
74509
74510+void check_object_size(const void *ptr, unsigned long n, bool to)
74511+{
74512+
74513+#ifdef CONFIG_PAX_USERCOPY
74514+ struct slob_page *sp;
74515+ const slob_t *free;
74516+ const void *base;
15a11c5b 74517+ unsigned long flags;
6e9df6a3 74518+ const char *type;
58c5fc13
MT
74519+
74520+ if (!n)
74521+ return;
74522+
6e9df6a3 74523+ type = "<null>";
58c5fc13
MT
74524+ if (ZERO_OR_NULL_PTR(ptr))
74525+ goto report;
74526+
74527+ if (!virt_addr_valid(ptr))
74528+ return;
74529+
6e9df6a3 74530+ type = "<process stack>";
58c5fc13 74531+ sp = slob_page(ptr);
4c928ab7 74532+ if (!PageSlab((struct page *)sp)) {
ae4e228f
MT
74533+ if (object_is_on_stack(ptr, n) == -1)
74534+ goto report;
58c5fc13 74535+ return;
ae4e228f 74536+ }
58c5fc13 74537+
6e9df6a3 74538+ type = "<slob>";
58c5fc13
MT
74539+ if (sp->size) {
74540+ base = page_address(&sp->page);
74541+ if (base <= ptr && n <= sp->size - (ptr - base))
74542+ return;
74543+ goto report;
74544+ }
74545+
74546+ /* some tricky double walking to find the chunk */
15a11c5b 74547+ spin_lock_irqsave(&slob_lock, flags);
58c5fc13
MT
74548+ base = (void *)((unsigned long)ptr & PAGE_MASK);
74549+ free = sp->free;
74550+
74551+ while (!slob_last(free) && (void *)free <= ptr) {
74552+ base = free + slob_units(free);
74553+ free = slob_next(free);
74554+ }
74555+
74556+ while (base < (void *)free) {
74557+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
74558+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
74559+ int offset;
74560+
74561+ if (ptr < base + align)
15a11c5b 74562+ break;
58c5fc13
MT
74563+
74564+ offset = ptr - base - align;
15a11c5b
MT
74565+ if (offset >= m) {
74566+ base += size;
74567+ continue;
58c5fc13 74568+ }
15a11c5b
MT
74569+
74570+ if (n > m - offset)
74571+ break;
74572+
74573+ spin_unlock_irqrestore(&slob_lock, flags);
74574+ return;
58c5fc13
MT
74575+ }
74576+
15a11c5b 74577+ spin_unlock_irqrestore(&slob_lock, flags);
58c5fc13 74578+report:
6e9df6a3 74579+ pax_report_usercopy(ptr, n, to, type);
58c5fc13
MT
74580+#endif
74581+
74582+}
74583+EXPORT_SYMBOL(check_object_size);
74584+
74585 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
74586 size_t ksize(const void *block)
74587 {
6e9df6a3 74588@@ -552,10 +645,10 @@ size_t ksize(const void *block)
58c5fc13
MT
74589 sp = slob_page(block);
74590 if (is_slob_page(sp)) {
74591 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74592- unsigned int *m = (unsigned int *)(block - align);
74593- return SLOB_UNITS(*m) * SLOB_UNIT;
74594+ slob_t *m = (slob_t *)(block - align);
74595+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
74596 } else
74597- return sp->page.private;
74598+ return sp->size;
74599 }
74600 EXPORT_SYMBOL(ksize);
74601
fe2de317 74602@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
15a11c5b
MT
74603 {
74604 struct kmem_cache *c;
74605
74606+#ifdef CONFIG_PAX_USERCOPY
74607+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
74608+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
74609+#else
74610 c = slob_alloc(sizeof(struct kmem_cache),
74611 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
74612+#endif
74613
74614 if (c) {
74615 c->name = name;
fe2de317 74616@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
6e9df6a3
MT
74617
74618 lockdep_trace_alloc(flags);
58c5fc13
MT
74619
74620+#ifdef CONFIG_PAX_USERCOPY
74621+ b = __kmalloc_node_align(c->size, flags, node, c->align);
74622+#else
74623 if (c->size < PAGE_SIZE) {
74624 b = slob_alloc(c->size, flags, c->align, node);
74625 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
74626 SLOB_UNITS(c->size) * SLOB_UNIT,
74627 flags, node);
74628 } else {
74629+ struct slob_page *sp;
74630+
74631 b = slob_new_pages(flags, get_order(c->size), node);
74632+ sp = slob_page(b);
74633+ sp->size = c->size;
74634 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
74635 PAGE_SIZE << get_order(c->size),
74636 flags, node);
74637 }
74638+#endif
74639
74640 if (c->ctor)
74641 c->ctor(b);
6e9df6a3 74642@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
58c5fc13
MT
74643
74644 static void __kmem_cache_free(void *b, int size)
74645 {
74646- if (size < PAGE_SIZE)
74647+ struct slob_page *sp = slob_page(b);
74648+
74649+ if (is_slob_page(sp))
74650 slob_free(b, size);
74651- else
74652+ else {
74653+ clear_slob_page(sp);
74654+ free_slob_page(sp);
74655+ sp->size = 0;
74656 slob_free_pages(b, get_order(size));
74657+ }
74658 }
74659
74660 static void kmem_rcu_free(struct rcu_head *head)
fe2de317 74661@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
58c5fc13
MT
74662
74663 void kmem_cache_free(struct kmem_cache *c, void *b)
74664 {
74665+ int size = c->size;
74666+
74667+#ifdef CONFIG_PAX_USERCOPY
74668+ if (size + c->align < PAGE_SIZE) {
74669+ size += c->align;
74670+ b -= c->align;
74671+ }
74672+#endif
74673+
74674 kmemleak_free_recursive(b, c->flags);
74675 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
74676 struct slob_rcu *slob_rcu;
74677- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
58c5fc13 74678- slob_rcu->size = c->size;
6892158b 74679+ slob_rcu = b + (size - sizeof(struct slob_rcu));
58c5fc13
MT
74680+ slob_rcu->size = size;
74681 call_rcu(&slob_rcu->head, kmem_rcu_free);
74682 } else {
74683- __kmem_cache_free(b, c->size);
74684+ __kmem_cache_free(b, size);
74685 }
74686
15a11c5b
MT
74687+#ifdef CONFIG_PAX_USERCOPY
74688+ trace_kfree(_RET_IP_, b);
74689+#else
58c5fc13 74690 trace_kmem_cache_free(_RET_IP_, b);
15a11c5b
MT
74691+#endif
74692+
74693 }
74694 EXPORT_SYMBOL(kmem_cache_free);
74695
fe2de317 74696diff --git a/mm/slub.c b/mm/slub.c
5e856224 74697index 0342a5d..8180ae9 100644
fe2de317
MT
74698--- a/mm/slub.c
74699+++ b/mm/slub.c
6e9df6a3 74700@@ -208,7 +208,7 @@ struct track {
15a11c5b
MT
74701
74702 enum track_item { TRACK_ALLOC, TRACK_FREE };
74703
74704-#ifdef CONFIG_SYSFS
74705+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74706 static int sysfs_slab_add(struct kmem_cache *);
74707 static int sysfs_slab_alias(struct kmem_cache *, const char *);
74708 static void sysfs_slab_remove(struct kmem_cache *);
5e856224 74709@@ -532,7 +532,7 @@ static void print_track(const char *s, struct track *t)
bc901d79
MT
74710 if (!t->addr)
74711 return;
74712
74713- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
74714+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
74715 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
6e9df6a3
MT
74716 #ifdef CONFIG_STACKTRACE
74717 {
5e856224 74718@@ -2571,6 +2571,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
ae4e228f
MT
74719
74720 page = virt_to_head_page(x);
74721
74722+ BUG_ON(!PageSlab(page));
74723+
74724 slab_free(s, page, x, _RET_IP_);
74725
74726 trace_kmem_cache_free(_RET_IP_, x);
5e856224 74727@@ -2604,7 +2606,7 @@ static int slub_min_objects;
58c5fc13
MT
74728 * Merge control. If this is set then no merging of slab caches will occur.
74729 * (Could be removed. This was introduced to pacify the merge skeptics.)
74730 */
74731-static int slub_nomerge;
74732+static int slub_nomerge = 1;
74733
74734 /*
74735 * Calculate the order of allocation given an slab object size.
5e856224 74736@@ -3057,7 +3059,7 @@ static int kmem_cache_open(struct kmem_cache *s,
4c928ab7
MT
74737 else
74738 s->cpu_partial = 30;
74739
58c5fc13
MT
74740- s->refcount = 1;
74741+ atomic_set(&s->refcount, 1);
74742 #ifdef CONFIG_NUMA
74743 s->remote_node_defrag_ratio = 1000;
74744 #endif
5e856224 74745@@ -3161,8 +3163,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
58c5fc13
MT
74746 void kmem_cache_destroy(struct kmem_cache *s)
74747 {
74748 down_write(&slub_lock);
74749- s->refcount--;
74750- if (!s->refcount) {
74751+ if (atomic_dec_and_test(&s->refcount)) {
74752 list_del(&s->list);
4c928ab7 74753 up_write(&slub_lock);
58c5fc13 74754 if (kmem_cache_close(s)) {
5e856224 74755@@ -3373,6 +3374,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
58c5fc13
MT
74756 EXPORT_SYMBOL(__kmalloc_node);
74757 #endif
74758
74759+void check_object_size(const void *ptr, unsigned long n, bool to)
74760+{
74761+
74762+#ifdef CONFIG_PAX_USERCOPY
74763+ struct page *page;
71d190be 74764+ struct kmem_cache *s = NULL;
58c5fc13 74765+ unsigned long offset;
6e9df6a3 74766+ const char *type;
58c5fc13
MT
74767+
74768+ if (!n)
74769+ return;
74770+
6e9df6a3 74771+ type = "<null>";
58c5fc13
MT
74772+ if (ZERO_OR_NULL_PTR(ptr))
74773+ goto report;
74774+
74775+ if (!virt_addr_valid(ptr))
74776+ return;
74777+
16454cff 74778+ page = virt_to_head_page(ptr);
58c5fc13 74779+
6e9df6a3 74780+ type = "<process stack>";
16454cff 74781+ if (!PageSlab(page)) {
ae4e228f
MT
74782+ if (object_is_on_stack(ptr, n) == -1)
74783+ goto report;
58c5fc13 74784+ return;
ae4e228f 74785+ }
58c5fc13
MT
74786+
74787+ s = page->slab;
6e9df6a3 74788+ type = s->name;
71d190be
MT
74789+ if (!(s->flags & SLAB_USERCOPY))
74790+ goto report;
74791+
58c5fc13
MT
74792+ offset = (ptr - page_address(page)) % s->size;
74793+ if (offset <= s->objsize && n <= s->objsize - offset)
74794+ return;
74795+
74796+report:
6e9df6a3 74797+ pax_report_usercopy(ptr, n, to, type);
58c5fc13
MT
74798+#endif
74799+
74800+}
74801+EXPORT_SYMBOL(check_object_size);
74802+
74803 size_t ksize(const void *object)
74804 {
74805 struct page *page;
5e856224 74806@@ -3647,7 +3692,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
bc901d79
MT
74807 int node;
74808
74809 list_add(&s->list, &slab_caches);
74810- s->refcount = -1;
74811+ atomic_set(&s->refcount, -1);
74812
74813 for_each_node_state(node, N_NORMAL_MEMORY) {
74814 struct kmem_cache_node *n = get_node(s, node);
5e856224 74815@@ -3767,17 +3812,17 @@ void __init kmem_cache_init(void)
71d190be
MT
74816
74817 /* Caches that are not of the two-to-the-power-of size */
74818 if (KMALLOC_MIN_SIZE <= 32) {
74819- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
74820+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
74821 caches++;
74822 }
74823
74824 if (KMALLOC_MIN_SIZE <= 64) {
74825- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
74826+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
74827 caches++;
74828 }
74829
74830 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
74831- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
74832+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
74833 caches++;
74834 }
74835
5e856224 74836@@ -3845,7 +3890,7 @@ static int slab_unmergeable(struct kmem_cache *s)
58c5fc13
MT
74837 /*
74838 * We may have set a slab to be unmergeable during bootstrap.
74839 */
74840- if (s->refcount < 0)
74841+ if (atomic_read(&s->refcount) < 0)
74842 return 1;
74843
74844 return 0;
5e856224 74845@@ -3904,7 +3949,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
df50ba0c
MT
74846 down_write(&slub_lock);
74847 s = find_mergeable(size, align, flags, name, ctor);
58c5fc13 74848 if (s) {
58c5fc13
MT
74849- s->refcount++;
74850+ atomic_inc(&s->refcount);
74851 /*
74852 * Adjust the object sizes so that we clear
74853 * the complete object on kzalloc.
5e856224 74854@@ -3913,7 +3958,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
6892158b 74855 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
58c5fc13
MT
74856
74857 if (sysfs_slab_alias(s, name)) {
58c5fc13
MT
74858- s->refcount--;
74859+ atomic_dec(&s->refcount);
58c5fc13
MT
74860 goto err;
74861 }
6892158b 74862 up_write(&slub_lock);
5e856224 74863@@ -4042,7 +4087,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
15a11c5b
MT
74864 }
74865 #endif
74866
74867-#ifdef CONFIG_SYSFS
74868+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74869 static int count_inuse(struct page *page)
74870 {
74871 return page->inuse;
5e856224 74872@@ -4429,12 +4474,12 @@ static void resiliency_test(void)
15a11c5b
MT
74873 validate_slab_cache(kmalloc_caches[9]);
74874 }
74875 #else
74876-#ifdef CONFIG_SYSFS
74877+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74878 static void resiliency_test(void) {};
74879 #endif
74880 #endif
74881
74882-#ifdef CONFIG_SYSFS
74883+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74884 enum slab_stat_type {
74885 SL_ALL, /* All slabs */
74886 SL_PARTIAL, /* Only partially allocated slabs */
5e856224 74887@@ -4677,7 +4722,7 @@ SLAB_ATTR_RO(ctor);
58c5fc13
MT
74888
74889 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
74890 {
74891- return sprintf(buf, "%d\n", s->refcount - 1);
74892+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
74893 }
74894 SLAB_ATTR_RO(aliases);
74895
5e856224 74896@@ -5244,6 +5289,7 @@ static char *create_unique_id(struct kmem_cache *s)
15a11c5b
MT
74897 return name;
74898 }
74899
74900+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74901 static int sysfs_slab_add(struct kmem_cache *s)
74902 {
74903 int err;
5e856224 74904@@ -5306,6 +5352,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
15a11c5b
MT
74905 kobject_del(&s->kobj);
74906 kobject_put(&s->kobj);
74907 }
74908+#endif
74909
74910 /*
74911 * Need to buffer aliases during bootup until sysfs becomes
5e856224 74912@@ -5319,6 +5366,7 @@ struct saved_alias {
15a11c5b
MT
74913
74914 static struct saved_alias *alias_list;
74915
74916+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74917 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
74918 {
74919 struct saved_alias *al;
5e856224 74920@@ -5341,6 +5389,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
15a11c5b
MT
74921 alias_list = al;
74922 return 0;
74923 }
74924+#endif
74925
74926 static int __init slab_sysfs_init(void)
74927 {
5e856224
MT
74928diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
74929index 1b7e22a..3fcd4f3 100644
74930--- a/mm/sparse-vmemmap.c
74931+++ b/mm/sparse-vmemmap.c
74932@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
74933 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
74934 if (!p)
74935 return NULL;
74936- pud_populate(&init_mm, pud, p);
74937+ pud_populate_kernel(&init_mm, pud, p);
74938 }
74939 return pud;
74940 }
74941@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
74942 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
74943 if (!p)
74944 return NULL;
74945- pgd_populate(&init_mm, pgd, p);
74946+ pgd_populate_kernel(&init_mm, pgd, p);
74947 }
74948 return pgd;
74949 }
fe2de317 74950diff --git a/mm/swap.c b/mm/swap.c
5e856224 74951index 14380e9..e244704 100644
fe2de317
MT
74952--- a/mm/swap.c
74953+++ b/mm/swap.c
5e856224 74954@@ -30,6 +30,7 @@
15a11c5b
MT
74955 #include <linux/backing-dev.h>
74956 #include <linux/memcontrol.h>
74957 #include <linux/gfp.h>
74958+#include <linux/hugetlb.h>
74959
74960 #include "internal.h"
74961
5e856224 74962@@ -70,6 +71,8 @@ static void __put_compound_page(struct page *page)
15a11c5b
MT
74963
74964 __page_cache_release(page);
74965 dtor = get_compound_page_dtor(page);
74966+ if (!PageHuge(page))
74967+ BUG_ON(dtor != free_compound_page);
74968 (*dtor)(page);
74969 }
74970
fe2de317 74971diff --git a/mm/swapfile.c b/mm/swapfile.c
5e856224 74972index f31b29d..8bdcae2 100644
fe2de317
MT
74973--- a/mm/swapfile.c
74974+++ b/mm/swapfile.c
4c928ab7 74975@@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
8308f9c9
MT
74976
74977 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
74978 /* Activity counter to indicate that a swapon or swapoff has occurred */
74979-static atomic_t proc_poll_event = ATOMIC_INIT(0);
74980+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
74981
74982 static inline unsigned char swap_count(unsigned char ent)
74983 {
5e856224 74984@@ -1669,7 +1669,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
8308f9c9
MT
74985 }
74986 filp_close(swap_file, NULL);
74987 err = 0;
74988- atomic_inc(&proc_poll_event);
74989+ atomic_inc_unchecked(&proc_poll_event);
74990 wake_up_interruptible(&proc_poll_wait);
74991
74992 out_dput:
5e856224 74993@@ -1685,8 +1685,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
8308f9c9
MT
74994
74995 poll_wait(file, &proc_poll_wait, wait);
74996
6e9df6a3
MT
74997- if (seq->poll_event != atomic_read(&proc_poll_event)) {
74998- seq->poll_event = atomic_read(&proc_poll_event);
74999+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
75000+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
8308f9c9
MT
75001 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
75002 }
75003
5e856224 75004@@ -1784,7 +1784,7 @@ static int swaps_open(struct inode *inode, struct file *file)
6e9df6a3 75005 return ret;
8308f9c9 75006
6e9df6a3
MT
75007 seq = file->private_data;
75008- seq->poll_event = atomic_read(&proc_poll_event);
75009+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
75010 return 0;
8308f9c9
MT
75011 }
75012
5e856224 75013@@ -2122,7 +2122,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
66a7e928
MT
75014 (p->flags & SWP_DISCARDABLE) ? "D" : "");
75015
8308f9c9
MT
75016 mutex_unlock(&swapon_mutex);
75017- atomic_inc(&proc_poll_event);
75018+ atomic_inc_unchecked(&proc_poll_event);
75019 wake_up_interruptible(&proc_poll_wait);
75020
66a7e928 75021 if (S_ISREG(inode->i_mode))
fe2de317 75022diff --git a/mm/util.c b/mm/util.c
4c928ab7 75023index 136ac4f..f917fa9 100644
fe2de317
MT
75024--- a/mm/util.c
75025+++ b/mm/util.c
4c928ab7 75026@@ -243,6 +243,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
75027 void arch_pick_mmap_layout(struct mm_struct *mm)
75028 {
75029 mm->mmap_base = TASK_UNMAPPED_BASE;
75030+
75031+#ifdef CONFIG_PAX_RANDMMAP
75032+ if (mm->pax_flags & MF_PAX_RANDMMAP)
75033+ mm->mmap_base += mm->delta_mmap;
75034+#endif
75035+
75036 mm->get_unmapped_area = arch_get_unmapped_area;
75037 mm->unmap_area = arch_unmap_area;
75038 }
fe2de317 75039diff --git a/mm/vmalloc.c b/mm/vmalloc.c
5e856224 75040index 86ce9a5..e0bd080 100644
fe2de317
MT
75041--- a/mm/vmalloc.c
75042+++ b/mm/vmalloc.c
75043@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
ae4e228f
MT
75044
75045 pte = pte_offset_kernel(pmd, addr);
75046 do {
75047- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
75048- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
75049+
75050+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75051+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
75052+ BUG_ON(!pte_exec(*pte));
75053+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
75054+ continue;
75055+ }
75056+#endif
75057+
75058+ {
75059+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
75060+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
75061+ }
75062 } while (pte++, addr += PAGE_SIZE, addr != end);
75063 }
75064
fe2de317 75065@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
58c5fc13
MT
75066 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
75067 {
75068 pte_t *pte;
75069+ int ret = -ENOMEM;
58c5fc13
MT
75070
75071 /*
75072 * nr is a running index into the array which helps higher level
fe2de317 75073@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
58c5fc13
MT
75074 pte = pte_alloc_kernel(pmd, addr);
75075 if (!pte)
75076 return -ENOMEM;
75077+
ae4e228f 75078+ pax_open_kernel();
58c5fc13
MT
75079 do {
75080 struct page *page = pages[*nr];
75081
75082- if (WARN_ON(!pte_none(*pte)))
75083- return -EBUSY;
75084- if (WARN_ON(!page))
75085- return -ENOMEM;
ae4e228f 75086+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57199397 75087+ if (pgprot_val(prot) & _PAGE_NX)
ae4e228f
MT
75088+#endif
75089+
58c5fc13
MT
75090+ if (WARN_ON(!pte_none(*pte))) {
75091+ ret = -EBUSY;
75092+ goto out;
75093+ }
75094+ if (WARN_ON(!page)) {
75095+ ret = -ENOMEM;
75096+ goto out;
75097+ }
75098 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
75099 (*nr)++;
75100 } while (pte++, addr += PAGE_SIZE, addr != end);
75101- return 0;
75102+ ret = 0;
75103+out:
ae4e228f
MT
75104+ pax_close_kernel();
75105+ return ret;
75106 }
75107
75108 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
5e856224
MT
75109@@ -119,7 +144,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
75110 pmd_t *pmd;
75111 unsigned long next;
75112
75113- pmd = pmd_alloc(&init_mm, pud, addr);
75114+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
75115 if (!pmd)
75116 return -ENOMEM;
75117 do {
75118@@ -136,7 +161,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
75119 pud_t *pud;
75120 unsigned long next;
75121
75122- pud = pud_alloc(&init_mm, pgd, addr);
75123+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
75124 if (!pud)
75125 return -ENOMEM;
75126 do {
fe2de317 75127@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
ae4e228f
MT
75128 * and fall back on vmalloc() if that fails. Others
75129 * just put it in the vmalloc space.
75130 */
75131-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
75132+#ifdef CONFIG_MODULES
75133+#ifdef MODULES_VADDR
75134 unsigned long addr = (unsigned long)x;
75135 if (addr >= MODULES_VADDR && addr < MODULES_END)
75136 return 1;
75137 #endif
58c5fc13 75138+
ae4e228f
MT
75139+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75140+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
75141+ return 1;
58c5fc13
MT
75142+#endif
75143+
ae4e228f
MT
75144+#endif
75145+
75146 return is_vmalloc_addr(x);
58c5fc13
MT
75147 }
75148
fe2de317 75149@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
57199397
MT
75150
75151 if (!pgd_none(*pgd)) {
75152 pud_t *pud = pud_offset(pgd, addr);
75153+#ifdef CONFIG_X86
75154+ if (!pud_large(*pud))
75155+#endif
75156 if (!pud_none(*pud)) {
75157 pmd_t *pmd = pmd_offset(pud, addr);
75158+#ifdef CONFIG_X86
75159+ if (!pmd_large(*pmd))
75160+#endif
75161 if (!pmd_none(*pmd)) {
75162 pte_t *ptep, pte;
75163
5e856224 75164@@ -1319,6 +1359,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
ae4e228f 75165 struct vm_struct *area;
58c5fc13
MT
75166
75167 BUG_ON(in_interrupt());
75168+
df50ba0c 75169+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
58c5fc13
MT
75170+ if (flags & VM_KERNEXEC) {
75171+ if (start != VMALLOC_START || end != VMALLOC_END)
75172+ return NULL;
df50ba0c
MT
75173+ start = (unsigned long)MODULES_EXEC_VADDR;
75174+ end = (unsigned long)MODULES_EXEC_END;
58c5fc13
MT
75175+ }
75176+#endif
75177+
75178 if (flags & VM_IOREMAP) {
75179 int bit = fls(size);
75180
5e856224 75181@@ -1551,6 +1601,11 @@ void *vmap(struct page **pages, unsigned int count,
ae4e228f 75182 if (count > totalram_pages)
58c5fc13
MT
75183 return NULL;
75184
df50ba0c 75185+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
58c5fc13
MT
75186+ if (!(pgprot_val(prot) & _PAGE_NX))
75187+ flags |= VM_KERNEXEC;
75188+#endif
75189+
75190 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
75191 __builtin_return_address(0));
75192 if (!area)
5e856224 75193@@ -1652,6 +1707,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
ae4e228f 75194 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
4c928ab7 75195 goto fail;
58c5fc13 75196
df50ba0c 75197+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
58c5fc13 75198+ if (!(pgprot_val(prot) & _PAGE_NX))
6e9df6a3
MT
75199+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
75200+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
58c5fc13
MT
75201+ else
75202+#endif
75203+
6e9df6a3
MT
75204 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
75205 start, end, node, gfp_mask, caller);
4c928ab7 75206 if (!area)
5e856224 75207@@ -1825,10 +1887,9 @@ EXPORT_SYMBOL(vzalloc_node);
58c5fc13
MT
75208 * For tight control over page level allocator and protection flags
75209 * use __vmalloc() instead.
75210 */
75211-
58c5fc13
MT
75212 void *vmalloc_exec(unsigned long size)
75213 {
ae4e228f
MT
75214- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
75215+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
58c5fc13
MT
75216 -1, __builtin_return_address(0));
75217 }
75218
5e856224 75219@@ -2123,6 +2184,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
6892158b
MT
75220 unsigned long uaddr = vma->vm_start;
75221 unsigned long usize = vma->vm_end - vma->vm_start;
75222
75223+ BUG_ON(vma->vm_mirror);
75224+
75225 if ((PAGE_SIZE-1) & (unsigned long)addr)
75226 return -EINVAL;
75227
fe2de317 75228diff --git a/mm/vmstat.c b/mm/vmstat.c
5e856224 75229index f600557..1459fc8 100644
fe2de317
MT
75230--- a/mm/vmstat.c
75231+++ b/mm/vmstat.c
bc901d79 75232@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
57199397
MT
75233 *
75234 * vm_stat contains the global counters
75235 */
4c928ab7
MT
75236-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
75237+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
57199397
MT
75238 EXPORT_SYMBOL(vm_stat);
75239
75240 #ifdef CONFIG_SMP
66a7e928 75241@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
57199397
MT
75242 v = p->vm_stat_diff[i];
75243 p->vm_stat_diff[i] = 0;
75244 local_irq_restore(flags);
75245- atomic_long_add(v, &zone->vm_stat[i]);
75246+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
75247 global_diff[i] += v;
75248 #ifdef CONFIG_NUMA
75249 /* 3 seconds idle till flush */
66a7e928 75250@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
57199397
MT
75251
75252 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
75253 if (global_diff[i])
75254- atomic_long_add(global_diff[i], &vm_stat[i]);
75255+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
75256 }
75257
75258 #endif
4c928ab7 75259@@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
57199397
MT
75260 start_cpu_timer(cpu);
75261 #endif
75262 #ifdef CONFIG_PROC_FS
75263- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
75264- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
75265- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
75266- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
75267+ {
75268+ mode_t gr_mode = S_IRUGO;
75269+#ifdef CONFIG_GRKERNSEC_PROC_ADD
75270+ gr_mode = S_IRUSR;
75271+#endif
75272+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
75273+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
bc901d79
MT
75274+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
75275+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
75276+#else
57199397 75277+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
bc901d79 75278+#endif
57199397
MT
75279+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
75280+ }
75281 #endif
75282 return 0;
75283 }
fe2de317 75284diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
5e856224 75285index efea35b..9c8dd0b 100644
fe2de317
MT
75286--- a/net/8021q/vlan.c
75287+++ b/net/8021q/vlan.c
5e856224 75288@@ -554,8 +554,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
df50ba0c
MT
75289 err = -EPERM;
75290 if (!capable(CAP_NET_ADMIN))
75291 break;
75292- if ((args.u.name_type >= 0) &&
75293- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
75294+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
75295 struct vlan_net *vn;
75296
75297 vn = net_generic(net, vlan_net_id);
fe2de317 75298diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
5e856224 75299index fccae26..e7ece2f 100644
fe2de317
MT
75300--- a/net/9p/trans_fd.c
75301+++ b/net/9p/trans_fd.c
5e856224 75302@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
6e9df6a3
MT
75303 oldfs = get_fs();
75304 set_fs(get_ds());
75305 /* The cast to a user pointer is valid due to the set_fs() */
75306- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
75307+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
75308 set_fs(oldfs);
75309
75310 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
fe2de317 75311diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
5e856224 75312index 876fbe8..8bbea9f 100644
fe2de317
MT
75313--- a/net/atm/atm_misc.c
75314+++ b/net/atm/atm_misc.c
75315@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
58c5fc13
MT
75316 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
75317 return 1;
df50ba0c 75318 atm_return(vcc, truesize);
58c5fc13
MT
75319- atomic_inc(&vcc->stats->rx_drop);
75320+ atomic_inc_unchecked(&vcc->stats->rx_drop);
75321 return 0;
75322 }
df50ba0c 75323 EXPORT_SYMBOL(atm_charge);
fe2de317 75324@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
58c5fc13
MT
75325 }
75326 }
df50ba0c 75327 atm_return(vcc, guess);
58c5fc13
MT
75328- atomic_inc(&vcc->stats->rx_drop);
75329+ atomic_inc_unchecked(&vcc->stats->rx_drop);
75330 return NULL;
75331 }
df50ba0c
MT
75332 EXPORT_SYMBOL(atm_alloc_charge);
75333@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
58c5fc13 75334
df50ba0c 75335 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
58c5fc13
MT
75336 {
75337-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
75338+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
75339 __SONET_ITEMS
75340 #undef __HANDLE_ITEM
75341 }
df50ba0c 75342@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
58c5fc13 75343
df50ba0c 75344 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
58c5fc13 75345 {
df50ba0c 75346-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
58c5fc13
MT
75347+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
75348 __SONET_ITEMS
75349 #undef __HANDLE_ITEM
75350 }
fe2de317
MT
75351diff --git a/net/atm/lec.h b/net/atm/lec.h
75352index dfc0719..47c5322 100644
75353--- a/net/atm/lec.h
75354+++ b/net/atm/lec.h
15a11c5b
MT
75355@@ -48,7 +48,7 @@ struct lane2_ops {
75356 const u8 *tlvs, u32 sizeoftlvs);
75357 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
75358 const u8 *tlvs, u32 sizeoftlvs);
75359-};
75360+} __no_const;
75361
75362 /*
75363 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
fe2de317
MT
75364diff --git a/net/atm/mpc.h b/net/atm/mpc.h
75365index 0919a88..a23d54e 100644
75366--- a/net/atm/mpc.h
75367+++ b/net/atm/mpc.h
15a11c5b
MT
75368@@ -33,7 +33,7 @@ struct mpoa_client {
75369 struct mpc_parameters parameters; /* parameters for this client */
75370
75371 const struct net_device_ops *old_ops;
75372- struct net_device_ops new_ops;
75373+ net_device_ops_no_const new_ops;
75374 };
75375
75376
fe2de317
MT
75377diff --git a/net/atm/proc.c b/net/atm/proc.c
75378index 0d020de..011c7bb 100644
75379--- a/net/atm/proc.c
75380+++ b/net/atm/proc.c
75381@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
58c5fc13
MT
75382 const struct k_atm_aal_stats *stats)
75383 {
75384 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
df50ba0c
MT
75385- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
75386- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
75387- atomic_read(&stats->rx_drop));
75388+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
75389+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
75390+ atomic_read_unchecked(&stats->rx_drop));
58c5fc13
MT
75391 }
75392
75393 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
fe2de317
MT
75394diff --git a/net/atm/resources.c b/net/atm/resources.c
75395index 23f45ce..c748f1a 100644
75396--- a/net/atm/resources.c
75397+++ b/net/atm/resources.c
bc901d79 75398@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
58c5fc13
MT
75399 static void copy_aal_stats(struct k_atm_aal_stats *from,
75400 struct atm_aal_stats *to)
75401 {
75402-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
75403+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
75404 __AAL_STAT_ITEMS
75405 #undef __HANDLE_ITEM
75406 }
fe2de317 75407@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
58c5fc13
MT
75408 static void subtract_aal_stats(struct k_atm_aal_stats *from,
75409 struct atm_aal_stats *to)
75410 {
75411-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
75412+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
75413 __AAL_STAT_ITEMS
75414 #undef __HANDLE_ITEM
75415 }
4c928ab7
MT
75416diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
75417index 3512e25..2b33401 100644
75418--- a/net/batman-adv/bat_iv_ogm.c
75419+++ b/net/batman-adv/bat_iv_ogm.c
75420@@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
75421
75422 /* change sequence number to network order */
75423 batman_ogm_packet->seqno =
75424- htonl((uint32_t)atomic_read(&hard_iface->seqno));
75425+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
75426
75427 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
75428 batman_ogm_packet->tt_crc = htons((uint16_t)
75429@@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
75430 else
75431 batman_ogm_packet->gw_flags = NO_FLAGS;
75432
75433- atomic_inc(&hard_iface->seqno);
75434+ atomic_inc_unchecked(&hard_iface->seqno);
75435
75436 slide_own_bcast_window(hard_iface);
75437 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
75438@@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
75439 return;
75440
75441 /* could be changed by schedule_own_packet() */
75442- if_incoming_seqno = atomic_read(&if_incoming->seqno);
75443+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
75444
75445 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
75446
fe2de317 75447diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
4c928ab7 75448index 7704df4..beb4e16 100644
fe2de317
MT
75449--- a/net/batman-adv/hard-interface.c
75450+++ b/net/batman-adv/hard-interface.c
4c928ab7 75451@@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
66a7e928
MT
75452 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
75453 dev_add_pack(&hard_iface->batman_adv_ptype);
75454
75455- atomic_set(&hard_iface->seqno, 1);
75456- atomic_set(&hard_iface->frag_seqno, 1);
75457+ atomic_set_unchecked(&hard_iface->seqno, 1);
75458+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
75459 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
75460 hard_iface->net_dev->name);
75461
fe2de317 75462diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
5e856224 75463index 987c75a..20d6f36 100644
fe2de317
MT
75464--- a/net/batman-adv/soft-interface.c
75465+++ b/net/batman-adv/soft-interface.c
5e856224 75466@@ -645,7 +645,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
8308f9c9
MT
75467
75468 /* set broadcast sequence number */
75469 bcast_packet->seqno =
75470- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
75471+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
75472
6e9df6a3 75473 add_bcast_packet_to_list(bat_priv, skb, 1);
8308f9c9 75474
5e856224 75475@@ -843,7 +843,7 @@ struct net_device *softif_create(const char *name)
8308f9c9
MT
75476 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
75477
75478 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
75479- atomic_set(&bat_priv->bcast_seqno, 1);
75480+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
6e9df6a3
MT
75481 atomic_set(&bat_priv->ttvn, 0);
75482 atomic_set(&bat_priv->tt_local_changes, 0);
75483 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
fe2de317 75484diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
5e856224 75485index e9eb043..d174eeb 100644
fe2de317
MT
75486--- a/net/batman-adv/types.h
75487+++ b/net/batman-adv/types.h
66a7e928 75488@@ -38,8 +38,8 @@ struct hard_iface {
8308f9c9
MT
75489 int16_t if_num;
75490 char if_status;
75491 struct net_device *net_dev;
75492- atomic_t seqno;
75493- atomic_t frag_seqno;
75494+ atomic_unchecked_t seqno;
75495+ atomic_unchecked_t frag_seqno;
75496 unsigned char *packet_buff;
75497 int packet_len;
75498 struct kobject *hardif_obj;
4c928ab7 75499@@ -154,7 +154,7 @@ struct bat_priv {
8308f9c9
MT
75500 atomic_t orig_interval; /* uint */
75501 atomic_t hop_penalty; /* uint */
75502 atomic_t log_level; /* uint */
75503- atomic_t bcast_seqno;
75504+ atomic_unchecked_t bcast_seqno;
75505 atomic_t bcast_queue_left;
75506 atomic_t batman_queue_left;
4c928ab7 75507 atomic_t ttvn; /* translation table version number */
fe2de317 75508diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
4c928ab7 75509index 07d1c1d..7e9bea9 100644
fe2de317
MT
75510--- a/net/batman-adv/unicast.c
75511+++ b/net/batman-adv/unicast.c
75512@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
66a7e928
MT
75513 frag1->flags = UNI_FRAG_HEAD | large_tail;
75514 frag2->flags = large_tail;
75515
75516- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
75517+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
75518 frag1->seqno = htons(seqno - 1);
75519 frag2->seqno = htons(seqno);
75520
fe2de317 75521diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
5e856224 75522index 280953b..cd219bb 100644
fe2de317
MT
75523--- a/net/bluetooth/hci_conn.c
75524+++ b/net/bluetooth/hci_conn.c
fe2de317 75525@@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
6e9df6a3
MT
75526 memset(&cp, 0, sizeof(cp));
75527
75528 cp.handle = cpu_to_le16(conn->handle);
75529- memcpy(cp.ltk, ltk, sizeof(ltk));
75530+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
75531
75532 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
75533 }
4c928ab7 75534diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
5e856224 75535index 32d338c..d24bcdb 100644
4c928ab7
MT
75536--- a/net/bluetooth/l2cap_core.c
75537+++ b/net/bluetooth/l2cap_core.c
5e856224 75538@@ -2418,8 +2418,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
4c928ab7
MT
75539 break;
75540
75541 case L2CAP_CONF_RFC:
75542- if (olen == sizeof(rfc))
75543- memcpy(&rfc, (void *)val, olen);
75544+ if (olen != sizeof(rfc))
75545+ break;
75546+
75547+ memcpy(&rfc, (void *)val, olen);
75548
75549 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
75550 rfc.mode != chan->mode)
5e856224 75551@@ -2537,8 +2539,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4c928ab7
MT
75552
75553 switch (type) {
75554 case L2CAP_CONF_RFC:
75555- if (olen == sizeof(rfc))
75556- memcpy(&rfc, (void *)val, olen);
75557+ if (olen != sizeof(rfc))
75558+ break;
75559+
75560+ memcpy(&rfc, (void *)val, olen);
75561 goto done;
75562 }
75563 }
5e856224
MT
75564diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
75565index 5449294..7da9a5f 100644
75566--- a/net/bridge/netfilter/ebt_ulog.c
75567+++ b/net/bridge/netfilter/ebt_ulog.c
75568@@ -96,6 +96,7 @@ static void ulog_timer(unsigned long data)
75569 spin_unlock_bh(&ulog_buffers[data].lock);
75570 }
6892158b 75571
5e856224
MT
75572+static struct sk_buff *ulog_alloc_skb(unsigned int size) __size_overflow(1);
75573 static struct sk_buff *ulog_alloc_skb(unsigned int size)
75574 {
75575 struct sk_buff *skb;
fe2de317 75576diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
5e856224 75577index 5fe2ff3..10968b5 100644
fe2de317
MT
75578--- a/net/bridge/netfilter/ebtables.c
75579+++ b/net/bridge/netfilter/ebtables.c
5e856224 75580@@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
ae4e228f
MT
75581 tmp.valid_hooks = t->table->valid_hooks;
75582 }
75583 mutex_unlock(&ebt_mutex);
75584- if (copy_to_user(user, &tmp, *len) != 0){
75585+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
75586 BUGPRINT("c2u Didn't work\n");
75587 ret = -EFAULT;
75588 break;
fe2de317 75589diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
5e856224 75590index a97d97a..6f679ed 100644
fe2de317
MT
75591--- a/net/caif/caif_socket.c
75592+++ b/net/caif/caif_socket.c
15a11c5b 75593@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
8308f9c9
MT
75594 #ifdef CONFIG_DEBUG_FS
75595 struct debug_fs_counter {
75596 atomic_t caif_nr_socks;
15a11c5b 75597- atomic_t caif_sock_create;
8308f9c9
MT
75598- atomic_t num_connect_req;
75599- atomic_t num_connect_resp;
75600- atomic_t num_connect_fail_resp;
75601- atomic_t num_disconnect;
75602- atomic_t num_remote_shutdown_ind;
75603- atomic_t num_tx_flow_off_ind;
75604- atomic_t num_tx_flow_on_ind;
75605- atomic_t num_rx_flow_off;
75606- atomic_t num_rx_flow_on;
15a11c5b 75607+ atomic_unchecked_t caif_sock_create;
8308f9c9
MT
75608+ atomic_unchecked_t num_connect_req;
75609+ atomic_unchecked_t num_connect_resp;
75610+ atomic_unchecked_t num_connect_fail_resp;
75611+ atomic_unchecked_t num_disconnect;
75612+ atomic_unchecked_t num_remote_shutdown_ind;
75613+ atomic_unchecked_t num_tx_flow_off_ind;
75614+ atomic_unchecked_t num_tx_flow_on_ind;
75615+ atomic_unchecked_t num_rx_flow_off;
75616+ atomic_unchecked_t num_rx_flow_on;
75617 };
75618 static struct debug_fs_counter cnt;
15a11c5b
MT
75619 #define dbfs_atomic_inc(v) atomic_inc_return(v)
75620+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
75621 #define dbfs_atomic_dec(v) atomic_dec_return(v)
8308f9c9 75622 #else
15a11c5b 75623 #define dbfs_atomic_inc(v) 0
fe2de317 75624@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
15a11c5b
MT
75625 atomic_read(&cf_sk->sk.sk_rmem_alloc),
75626 sk_rcvbuf_lowwater(cf_sk));
8308f9c9
MT
75627 set_rx_flow_off(cf_sk);
75628- dbfs_atomic_inc(&cnt.num_rx_flow_off);
75629+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
75630 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
75631 }
75632
fe2de317 75633@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
8308f9c9 75634 set_rx_flow_off(cf_sk);
15a11c5b
MT
75635 if (net_ratelimit())
75636 pr_debug("sending flow OFF due to rmem_schedule\n");
8308f9c9
MT
75637- dbfs_atomic_inc(&cnt.num_rx_flow_off);
75638+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
75639 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
75640 }
75641 skb->dev = NULL;
fe2de317 75642@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
8308f9c9
MT
75643 switch (flow) {
75644 case CAIF_CTRLCMD_FLOW_ON_IND:
75645 /* OK from modem to start sending again */
75646- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
75647+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
75648 set_tx_flow_on(cf_sk);
75649 cf_sk->sk.sk_state_change(&cf_sk->sk);
75650 break;
75651
75652 case CAIF_CTRLCMD_FLOW_OFF_IND:
75653 /* Modem asks us to shut up */
75654- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
75655+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
75656 set_tx_flow_off(cf_sk);
75657 cf_sk->sk.sk_state_change(&cf_sk->sk);
75658 break;
fe2de317 75659@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
8308f9c9 75660 /* We're now connected */
15a11c5b
MT
75661 caif_client_register_refcnt(&cf_sk->layer,
75662 cfsk_hold, cfsk_put);
8308f9c9
MT
75663- dbfs_atomic_inc(&cnt.num_connect_resp);
75664+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
75665 cf_sk->sk.sk_state = CAIF_CONNECTED;
75666 set_tx_flow_on(cf_sk);
75667 cf_sk->sk.sk_state_change(&cf_sk->sk);
fe2de317 75668@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
8308f9c9
MT
75669
75670 case CAIF_CTRLCMD_INIT_FAIL_RSP:
75671 /* Connect request failed */
75672- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
75673+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
75674 cf_sk->sk.sk_err = ECONNREFUSED;
75675 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
75676 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
fe2de317 75677@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
8308f9c9
MT
75678
75679 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
75680 /* Modem has closed this connection, or device is down. */
75681- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
75682+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
75683 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
75684 cf_sk->sk.sk_err = ECONNRESET;
75685 set_rx_flow_on(cf_sk);
fe2de317 75686@@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
8308f9c9
MT
75687 return;
75688
75689 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
75690- dbfs_atomic_inc(&cnt.num_rx_flow_on);
75691+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
75692 set_rx_flow_on(cf_sk);
75693 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
75694 }
5e856224 75695@@ -856,7 +857,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
8308f9c9
MT
75696 /*ifindex = id of the interface.*/
75697 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
75698
75699- dbfs_atomic_inc(&cnt.num_connect_req);
75700+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
75701 cf_sk->layer.receive = caif_sktrecv_cb;
15a11c5b
MT
75702
75703 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
5e856224 75704@@ -945,7 +946,7 @@ static int caif_release(struct socket *sock)
15a11c5b 75705 spin_unlock_bh(&sk->sk_receive_queue.lock);
8308f9c9
MT
75706 sock->sk = NULL;
75707
75708- dbfs_atomic_inc(&cnt.num_disconnect);
75709+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
75710
15a11c5b 75711 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
8308f9c9 75712 if (cf_sk->debugfs_socket_dir != NULL)
5e856224 75713@@ -1124,7 +1125,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
15a11c5b
MT
75714 cf_sk->conn_req.protocol = protocol;
75715 /* Increase the number of sockets created. */
75716 dbfs_atomic_inc(&cnt.caif_nr_socks);
75717- num = dbfs_atomic_inc(&cnt.caif_sock_create);
75718+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
75719 #ifdef CONFIG_DEBUG_FS
75720 if (!IS_ERR(debugfsdir)) {
75721
fe2de317 75722diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
4c928ab7 75723index 5cf5222..6f704ad 100644
fe2de317
MT
75724--- a/net/caif/cfctrl.c
75725+++ b/net/caif/cfctrl.c
66a7e928
MT
75726@@ -9,6 +9,7 @@
75727 #include <linux/stddef.h>
75728 #include <linux/spinlock.h>
75729 #include <linux/slab.h>
75730+#include <linux/sched.h>
75731 #include <net/caif/caif_layer.h>
75732 #include <net/caif/cfpkt.h>
75733 #include <net/caif/cfctrl.h>
4c928ab7
MT
75734@@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
75735 memset(&dev_info, 0, sizeof(dev_info));
8308f9c9 75736 dev_info.id = 0xff;
8308f9c9
MT
75737 cfsrvl_init(&this->serv, 0, &dev_info, false);
75738- atomic_set(&this->req_seq_no, 1);
75739- atomic_set(&this->rsp_seq_no, 1);
75740+ atomic_set_unchecked(&this->req_seq_no, 1);
75741+ atomic_set_unchecked(&this->rsp_seq_no, 1);
75742 this->serv.layer.receive = cfctrl_recv;
75743 sprintf(this->serv.layer.name, "ctrl");
75744 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
4c928ab7 75745@@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
8308f9c9
MT
75746 struct cfctrl_request_info *req)
75747 {
15a11c5b 75748 spin_lock_bh(&ctrl->info_list_lock);
8308f9c9
MT
75749- atomic_inc(&ctrl->req_seq_no);
75750- req->sequence_no = atomic_read(&ctrl->req_seq_no);
75751+ atomic_inc_unchecked(&ctrl->req_seq_no);
75752+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
75753 list_add_tail(&req->list, &ctrl->list);
15a11c5b 75754 spin_unlock_bh(&ctrl->info_list_lock);
8308f9c9 75755 }
4c928ab7 75756@@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
8308f9c9
MT
75757 if (p != first)
75758 pr_warn("Requests are not received in order\n");
75759
75760- atomic_set(&ctrl->rsp_seq_no,
75761+ atomic_set_unchecked(&ctrl->rsp_seq_no,
75762 p->sequence_no);
75763 list_del(&p->list);
75764 goto out;
4c928ab7
MT
75765diff --git a/net/can/gw.c b/net/can/gw.c
75766index 3d79b12..8de85fa 100644
75767--- a/net/can/gw.c
75768+++ b/net/can/gw.c
75769@@ -96,7 +96,7 @@ struct cf_mod {
75770 struct {
75771 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
75772 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
75773- } csumfunc;
75774+ } __no_const csumfunc;
75775 };
66a7e928 75776
66a7e928 75777
fe2de317 75778diff --git a/net/compat.c b/net/compat.c
4c928ab7 75779index 6def90e..c6992fa 100644
fe2de317
MT
75780--- a/net/compat.c
75781+++ b/net/compat.c
4c928ab7 75782@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
6e9df6a3
MT
75783 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
75784 __get_user(kmsg->msg_flags, &umsg->msg_flags))
75785 return -EFAULT;
75786- kmsg->msg_name = compat_ptr(tmp1);
75787- kmsg->msg_iov = compat_ptr(tmp2);
75788- kmsg->msg_control = compat_ptr(tmp3);
75789+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
75790+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
75791+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
75792 return 0;
75793 }
75794
4c928ab7 75795@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
6e9df6a3
MT
75796
75797 if (kern_msg->msg_namelen) {
75798 if (mode == VERIFY_READ) {
75799- int err = move_addr_to_kernel(kern_msg->msg_name,
75800+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
75801 kern_msg->msg_namelen,
75802 kern_address);
75803 if (err < 0)
4c928ab7 75804@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
6e9df6a3
MT
75805 kern_msg->msg_name = NULL;
75806
75807 tot_len = iov_from_user_compat_to_kern(kern_iov,
75808- (struct compat_iovec __user *)kern_msg->msg_iov,
75809+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
75810 kern_msg->msg_iovlen);
75811 if (tot_len >= 0)
75812 kern_msg->msg_iov = kern_iov;
4c928ab7 75813@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
6e9df6a3
MT
75814
75815 #define CMSG_COMPAT_FIRSTHDR(msg) \
75816 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
75817- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
75818+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
75819 (struct compat_cmsghdr __user *)NULL)
75820
75821 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
75822 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
75823 (ucmlen) <= (unsigned long) \
75824 ((mhdr)->msg_controllen - \
75825- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
75826+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
75827
75828 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
75829 struct compat_cmsghdr __user *cmsg, int cmsg_len)
75830 {
75831 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
75832- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
75833+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
75834 msg->msg_controllen)
75835 return NULL;
75836 return (struct compat_cmsghdr __user *)ptr;
4c928ab7 75837@@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
6e9df6a3
MT
75838 {
75839 struct compat_timeval ctv;
75840 struct compat_timespec cts[3];
75841- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
75842+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
75843 struct compat_cmsghdr cmhdr;
75844 int cmlen;
75845
4c928ab7 75846@@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
6e9df6a3
MT
75847
75848 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
75849 {
75850- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
75851+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
75852 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
75853 int fdnum = scm->fp->count;
75854 struct file **fp = scm->fp->fp;
4c928ab7 75855@@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
6e9df6a3
MT
75856 return -EFAULT;
75857 old_fs = get_fs();
75858 set_fs(KERNEL_DS);
75859- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
75860+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
75861 set_fs(old_fs);
75862
75863 return err;
4c928ab7 75864@@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
6e9df6a3
MT
75865 len = sizeof(ktime);
75866 old_fs = get_fs();
75867 set_fs(KERNEL_DS);
75868- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
75869+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
75870 set_fs(old_fs);
75871
75872 if (!err) {
4c928ab7 75873@@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
6e9df6a3
MT
75874 case MCAST_JOIN_GROUP:
75875 case MCAST_LEAVE_GROUP:
75876 {
75877- struct compat_group_req __user *gr32 = (void *)optval;
75878+ struct compat_group_req __user *gr32 = (void __user *)optval;
75879 struct group_req __user *kgr =
75880 compat_alloc_user_space(sizeof(struct group_req));
75881 u32 interface;
4c928ab7 75882@@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
6e9df6a3
MT
75883 case MCAST_BLOCK_SOURCE:
75884 case MCAST_UNBLOCK_SOURCE:
75885 {
75886- struct compat_group_source_req __user *gsr32 = (void *)optval;
75887+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
75888 struct group_source_req __user *kgsr = compat_alloc_user_space(
75889 sizeof(struct group_source_req));
75890 u32 interface;
4c928ab7 75891@@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
6e9df6a3
MT
75892 }
75893 case MCAST_MSFILTER:
75894 {
75895- struct compat_group_filter __user *gf32 = (void *)optval;
75896+ struct compat_group_filter __user *gf32 = (void __user *)optval;
75897 struct group_filter __user *kgf;
75898 u32 interface, fmode, numsrc;
75899
4c928ab7 75900@@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
6e9df6a3
MT
75901 char __user *optval, int __user *optlen,
75902 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
75903 {
75904- struct compat_group_filter __user *gf32 = (void *)optval;
75905+ struct compat_group_filter __user *gf32 = (void __user *)optval;
75906 struct group_filter __user *kgf;
75907 int __user *koptlen;
75908 u32 interface, fmode, numsrc;
fe2de317 75909diff --git a/net/core/datagram.c b/net/core/datagram.c
4c928ab7 75910index 68bbf9f..5ef0d12 100644
fe2de317
MT
75911--- a/net/core/datagram.c
75912+++ b/net/core/datagram.c
75913@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
8308f9c9
MT
75914 }
75915
75916 kfree_skb(skb);
75917- atomic_inc(&sk->sk_drops);
75918+ atomic_inc_unchecked(&sk->sk_drops);
75919 sk_mem_reclaim_partial(sk);
75920
75921 return err;
fe2de317 75922diff --git a/net/core/dev.c b/net/core/dev.c
5e856224 75923index 0336374..659088a 100644
fe2de317
MT
75924--- a/net/core/dev.c
75925+++ b/net/core/dev.c
5e856224 75926@@ -1138,10 +1138,14 @@ void dev_load(struct net *net, const char *name)
16454cff
MT
75927 if (no_module && capable(CAP_NET_ADMIN))
75928 no_module = request_module("netdev-%s", name);
75929 if (no_module && capable(CAP_SYS_MODULE)) {
71d190be
MT
75930+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75931+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
75932+#else
75933 if (!request_module("%s", name))
16454cff
MT
75934 pr_err("Loading kernel module for a network device "
75935 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
75936 "instead\n", name);
71d190be
MT
75937+#endif
75938 }
75939 }
75940 EXPORT_SYMBOL(dev_load);
5e856224 75941@@ -1605,7 +1609,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
4c928ab7
MT
75942 {
75943 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
75944 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
75945- atomic_long_inc(&dev->rx_dropped);
75946+ atomic_long_inc_unchecked(&dev->rx_dropped);
75947 kfree_skb(skb);
75948 return NET_RX_DROP;
75949 }
5e856224 75950@@ -1615,7 +1619,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
4c928ab7
MT
75951 nf_reset(skb);
75952
75953 if (unlikely(!is_skb_forwardable(dev, skb))) {
75954- atomic_long_inc(&dev->rx_dropped);
75955+ atomic_long_inc_unchecked(&dev->rx_dropped);
75956 kfree_skb(skb);
75957 return NET_RX_DROP;
75958 }
5e856224 75959@@ -2077,7 +2081,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
66a7e928 75960
15a11c5b
MT
75961 struct dev_gso_cb {
75962 void (*destructor)(struct sk_buff *skb);
75963-};
75964+} __no_const;
66a7e928 75965
15a11c5b
MT
75966 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
75967
5e856224 75968@@ -2933,7 +2937,7 @@ enqueue:
4c928ab7
MT
75969
75970 local_irq_restore(flags);
75971
75972- atomic_long_inc(&skb->dev->rx_dropped);
75973+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
75974 kfree_skb(skb);
75975 return NET_RX_DROP;
75976 }
5e856224 75977@@ -3005,7 +3009,7 @@ int netif_rx_ni(struct sk_buff *skb)
ae4e228f
MT
75978 }
75979 EXPORT_SYMBOL(netif_rx_ni);
75980
75981-static void net_tx_action(struct softirq_action *h)
75982+static void net_tx_action(void)
75983 {
75984 struct softnet_data *sd = &__get_cpu_var(softnet_data);
75985
5e856224 75986@@ -3293,7 +3297,7 @@ ncls:
4c928ab7
MT
75987 if (pt_prev) {
75988 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
75989 } else {
75990- atomic_long_inc(&skb->dev->rx_dropped);
75991+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
75992 kfree_skb(skb);
75993 /* Jamal, now you will not able to escape explaining
75994 * me how you were going to use this. :-)
5e856224 75995@@ -3853,7 +3857,7 @@ void netif_napi_del(struct napi_struct *napi)
57199397 75996 }
ae4e228f
MT
75997 EXPORT_SYMBOL(netif_napi_del);
75998
ae4e228f
MT
75999-static void net_rx_action(struct softirq_action *h)
76000+static void net_rx_action(void)
76001 {
57199397 76002 struct softnet_data *sd = &__get_cpu_var(softnet_data);
ae4e228f 76003 unsigned long time_limit = jiffies + 2;
5e856224 76004@@ -5878,7 +5882,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
4c928ab7
MT
76005 } else {
76006 netdev_stats_to_stats64(storage, &dev->stats);
76007 }
76008- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
76009+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
76010 return storage;
76011 }
76012 EXPORT_SYMBOL(dev_get_stats);
fe2de317 76013diff --git a/net/core/flow.c b/net/core/flow.c
4c928ab7 76014index e318c7e..168b1d0 100644
fe2de317
MT
76015--- a/net/core/flow.c
76016+++ b/net/core/flow.c
6e9df6a3 76017@@ -61,7 +61,7 @@ struct flow_cache {
8308f9c9
MT
76018 struct timer_list rnd_timer;
76019 };
76020
76021-atomic_t flow_cache_genid = ATOMIC_INIT(0);
76022+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
76023 EXPORT_SYMBOL(flow_cache_genid);
76024 static struct flow_cache flow_cache_global;
76025 static struct kmem_cache *flow_cachep __read_mostly;
fe2de317 76026@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
8308f9c9
MT
76027
76028 static int flow_entry_valid(struct flow_cache_entry *fle)
76029 {
76030- if (atomic_read(&flow_cache_genid) != fle->genid)
76031+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
76032 return 0;
76033 if (fle->object && !fle->object->ops->check(fle->object))
76034 return 0;
fe2de317 76035@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
8308f9c9
MT
76036 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
76037 fcp->hash_count++;
76038 }
76039- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
76040+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
76041 flo = fle->object;
76042 if (!flo)
76043 goto ret_object;
6e9df6a3 76044@@ -280,7 +280,7 @@ nocache:
8308f9c9
MT
76045 }
76046 flo = resolver(net, key, family, dir, flo, ctx);
76047 if (fle) {
76048- fle->genid = atomic_read(&flow_cache_genid);
76049+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
76050 if (!IS_ERR(flo))
76051 fle->object = flo;
76052 else
fe2de317
MT
76053diff --git a/net/core/iovec.c b/net/core/iovec.c
76054index c40f27e..7f49254 100644
76055--- a/net/core/iovec.c
76056+++ b/net/core/iovec.c
76057@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
6e9df6a3
MT
76058 if (m->msg_namelen) {
76059 if (mode == VERIFY_READ) {
76060 void __user *namep;
76061- namep = (void __user __force *) m->msg_name;
76062+ namep = (void __force_user *) m->msg_name;
76063 err = move_addr_to_kernel(namep, m->msg_namelen,
76064 address);
76065 if (err < 0)
fe2de317 76066@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
6e9df6a3
MT
76067 }
76068
76069 size = m->msg_iovlen * sizeof(struct iovec);
76070- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
76071+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
76072 return -EFAULT;
76073
76074 m->msg_iov = iov;
fe2de317 76075diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
5e856224 76076index 5c30296..ebe7b61 100644
fe2de317
MT
76077--- a/net/core/rtnetlink.c
76078+++ b/net/core/rtnetlink.c
6e9df6a3 76079@@ -57,7 +57,7 @@ struct rtnl_link {
15a11c5b
MT
76080 rtnl_doit_func doit;
76081 rtnl_dumpit_func dumpit;
6e9df6a3 76082 rtnl_calcit_func calcit;
15a11c5b
MT
76083-};
76084+} __no_const;
76085
76086 static DEFINE_MUTEX(rtnl_mutex);
5e856224 76087
fe2de317 76088diff --git a/net/core/scm.c b/net/core/scm.c
4c928ab7 76089index ff52ad0..aff1c0f 100644
fe2de317
MT
76090--- a/net/core/scm.c
76091+++ b/net/core/scm.c
4c928ab7 76092@@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
6e9df6a3
MT
76093 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
76094 {
76095 struct cmsghdr __user *cm
76096- = (__force struct cmsghdr __user *)msg->msg_control;
76097+ = (struct cmsghdr __force_user *)msg->msg_control;
76098 struct cmsghdr cmhdr;
76099 int cmlen = CMSG_LEN(len);
76100 int err;
4c928ab7 76101@@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
6e9df6a3
MT
76102 err = -EFAULT;
76103 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
76104 goto out;
76105- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
76106+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
76107 goto out;
76108 cmlen = CMSG_SPACE(len);
76109 if (msg->msg_controllen < cmlen)
4c928ab7 76110@@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
6e9df6a3
MT
76111 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
76112 {
76113 struct cmsghdr __user *cm
76114- = (__force struct cmsghdr __user*)msg->msg_control;
76115+ = (struct cmsghdr __force_user *)msg->msg_control;
76116
76117 int fdmax = 0;
76118 int fdnum = scm->fp->count;
4c928ab7 76119@@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
6e9df6a3
MT
76120 if (fdnum < fdmax)
76121 fdmax = fdnum;
76122
76123- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
76124+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
76125 i++, cmfptr++)
76126 {
76127 int new_fd;
fe2de317 76128diff --git a/net/core/sock.c b/net/core/sock.c
5e856224 76129index 02f8dfe..86dfd4a 100644
fe2de317
MT
76130--- a/net/core/sock.c
76131+++ b/net/core/sock.c
5e856224 76132@@ -341,7 +341,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
4c928ab7
MT
76133 struct sk_buff_head *list = &sk->sk_receive_queue;
76134
76135 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
8308f9c9
MT
76136- atomic_inc(&sk->sk_drops);
76137+ atomic_inc_unchecked(&sk->sk_drops);
6e9df6a3 76138 trace_sock_rcvqueue_full(sk, skb);
8308f9c9
MT
76139 return -ENOMEM;
76140 }
5e856224 76141@@ -351,7 +351,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
8308f9c9
MT
76142 return err;
76143
76144 if (!sk_rmem_schedule(sk, skb->truesize)) {
76145- atomic_inc(&sk->sk_drops);
76146+ atomic_inc_unchecked(&sk->sk_drops);
76147 return -ENOBUFS;
76148 }
76149
5e856224 76150@@ -371,7 +371,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
8308f9c9
MT
76151 skb_dst_force(skb);
76152
76153 spin_lock_irqsave(&list->lock, flags);
76154- skb->dropcount = atomic_read(&sk->sk_drops);
76155+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
76156 __skb_queue_tail(list, skb);
76157 spin_unlock_irqrestore(&list->lock, flags);
76158
5e856224 76159@@ -391,7 +391,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
8308f9c9
MT
76160 skb->dev = NULL;
76161
76162 if (sk_rcvqueues_full(sk, skb)) {
76163- atomic_inc(&sk->sk_drops);
76164+ atomic_inc_unchecked(&sk->sk_drops);
76165 goto discard_and_relse;
76166 }
76167 if (nested)
5e856224 76168@@ -409,7 +409,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
8308f9c9
MT
76169 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
76170 } else if (sk_add_backlog(sk, skb)) {
76171 bh_unlock_sock(sk);
76172- atomic_inc(&sk->sk_drops);
76173+ atomic_inc_unchecked(&sk->sk_drops);
76174 goto discard_and_relse;
76175 }
76176
5e856224 76177@@ -974,7 +974,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
15a11c5b
MT
76178 if (len > sizeof(peercred))
76179 len = sizeof(peercred);
76180 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
76181- if (copy_to_user(optval, &peercred, len))
76182+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
76183 return -EFAULT;
76184 goto lenout;
76185 }
5e856224 76186@@ -987,7 +987,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
ae4e228f
MT
76187 return -ENOTCONN;
76188 if (lv < len)
76189 return -EINVAL;
76190- if (copy_to_user(optval, address, len))
76191+ if (len > sizeof(address) || copy_to_user(optval, address, len))
76192 return -EFAULT;
76193 goto lenout;
76194 }
5e856224 76195@@ -1024,7 +1024,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
ae4e228f
MT
76196
76197 if (len > lv)
76198 len = lv;
76199- if (copy_to_user(optval, &v, len))
76200+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
76201 return -EFAULT;
76202 lenout:
76203 if (put_user(len, optlen))
5e856224 76204@@ -2108,7 +2108,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
8308f9c9
MT
76205 */
76206 smp_wmb();
76207 atomic_set(&sk->sk_refcnt, 1);
76208- atomic_set(&sk->sk_drops, 0);
76209+ atomic_set_unchecked(&sk->sk_drops, 0);
76210 }
76211 EXPORT_SYMBOL(sock_init_data);
76212
5e856224
MT
76213diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
76214index b9868e1..849f809 100644
76215--- a/net/core/sock_diag.c
76216+++ b/net/core/sock_diag.c
76217@@ -16,20 +16,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
76218
76219 int sock_diag_check_cookie(void *sk, __u32 *cookie)
76220 {
76221+#ifndef CONFIG_GRKERNSEC_HIDESYM
76222 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
76223 cookie[1] != INET_DIAG_NOCOOKIE) &&
76224 ((u32)(unsigned long)sk != cookie[0] ||
76225 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
76226 return -ESTALE;
76227 else
76228+#endif
76229 return 0;
76230 }
76231 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
76232
76233 void sock_diag_save_cookie(void *sk, __u32 *cookie)
76234 {
76235+#ifdef CONFIG_GRKERNSEC_HIDESYM
76236+ cookie[0] = 0;
76237+ cookie[1] = 0;
76238+#else
76239 cookie[0] = (u32)(unsigned long)sk;
76240 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
76241+#endif
76242 }
76243 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
76244
fe2de317
MT
76245diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
76246index 02e75d1..9a57a7c 100644
76247--- a/net/decnet/sysctl_net_decnet.c
76248+++ b/net/decnet/sysctl_net_decnet.c
76249@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
ae4e228f
MT
76250
76251 if (len > *lenp) len = *lenp;
76252
76253- if (copy_to_user(buffer, addr, len))
bc901d79 76254+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
ae4e228f
MT
76255 return -EFAULT;
76256
76257 *lenp = len;
fe2de317 76258@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
ae4e228f
MT
76259
76260 if (len > *lenp) len = *lenp;
76261
76262- if (copy_to_user(buffer, devname, len))
bc901d79 76263+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
ae4e228f
MT
76264 return -EFAULT;
76265
76266 *lenp = len;
fe2de317
MT
76267diff --git a/net/econet/Kconfig b/net/econet/Kconfig
76268index 39a2d29..f39c0fe 100644
76269--- a/net/econet/Kconfig
76270+++ b/net/econet/Kconfig
bc901d79
MT
76271@@ -4,7 +4,7 @@
76272
76273 config ECONET
76274 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
76275- depends on EXPERIMENTAL && INET
76276+ depends on EXPERIMENTAL && INET && BROKEN
76277 ---help---
76278 Econet is a fairly old and slow networking protocol mainly used by
76279 Acorn computers to access file and print servers. It uses native
4c928ab7
MT
76280diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
76281index 36d1440..44ff28b 100644
76282--- a/net/ipv4/ah4.c
76283+++ b/net/ipv4/ah4.c
76284@@ -19,6 +19,8 @@ struct ah_skb_cb {
76285 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
76286
76287 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
76288+ unsigned int size) __size_overflow(3);
76289+static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
76290 unsigned int size)
76291 {
76292 unsigned int len;
fe2de317
MT
76293diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
76294index 92fc5f6..b790d91 100644
76295--- a/net/ipv4/fib_frontend.c
76296+++ b/net/ipv4/fib_frontend.c
76297@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
66a7e928
MT
76298 #ifdef CONFIG_IP_ROUTE_MULTIPATH
76299 fib_sync_up(dev);
76300 #endif
76301- atomic_inc(&net->ipv4.dev_addr_genid);
76302+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
76303 rt_cache_flush(dev_net(dev), -1);
76304 break;
76305 case NETDEV_DOWN:
76306 fib_del_ifaddr(ifa, NULL);
76307- atomic_inc(&net->ipv4.dev_addr_genid);
76308+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
76309 if (ifa->ifa_dev->ifa_list == NULL) {
76310 /* Last address was deleted from this interface.
76311 * Disable IP.
fe2de317 76312@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
66a7e928
MT
76313 #ifdef CONFIG_IP_ROUTE_MULTIPATH
76314 fib_sync_up(dev);
76315 #endif
76316- atomic_inc(&net->ipv4.dev_addr_genid);
76317+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
76318 rt_cache_flush(dev_net(dev), -1);
76319 break;
76320 case NETDEV_DOWN:
fe2de317
MT
76321diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
76322index 80106d8..232e898 100644
76323--- a/net/ipv4/fib_semantics.c
76324+++ b/net/ipv4/fib_semantics.c
76325@@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
66a7e928
MT
76326 nh->nh_saddr = inet_select_addr(nh->nh_dev,
76327 nh->nh_gw,
76328 nh->nh_parent->fib_scope);
76329- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
76330+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
76331
76332 return nh->nh_saddr;
76333 }
fe2de317
MT
76334diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
76335index 984ec65..97ac518 100644
76336--- a/net/ipv4/inet_hashtables.c
76337+++ b/net/ipv4/inet_hashtables.c
15a11c5b 76338@@ -18,12 +18,15 @@
58c5fc13
MT
76339 #include <linux/sched.h>
76340 #include <linux/slab.h>
76341 #include <linux/wait.h>
76342+#include <linux/security.h>
76343
76344 #include <net/inet_connection_sock.h>
76345 #include <net/inet_hashtables.h>
15a11c5b 76346 #include <net/secure_seq.h>
58c5fc13
MT
76347 #include <net/ip.h>
76348
76349+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
76350+
76351 /*
76352 * Allocate and initialize a new local port bind bucket.
76353 * The bindhash mutex for snum's hash chain must be held here.
15a11c5b 76354@@ -530,6 +533,8 @@ ok:
ae4e228f 76355 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
58c5fc13
MT
76356 spin_unlock(&head->lock);
76357
76358+ gr_update_task_in_ip_table(current, inet_sk(sk));
76359+
76360 if (tw) {
76361 inet_twsk_deschedule(tw, death_row);
ae4e228f 76362 while (twrefcnt) {
fe2de317 76363diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
5e856224 76364index d4d61b6..b81aec8 100644
fe2de317
MT
76365--- a/net/ipv4/inetpeer.c
76366+++ b/net/ipv4/inetpeer.c
5e856224 76367@@ -487,8 +487,8 @@ relookup:
6892158b 76368 if (p) {
16454cff 76369 p->daddr = *daddr;
6892158b
MT
76370 atomic_set(&p->refcnt, 1);
76371- atomic_set(&p->rid, 0);
6e9df6a3 76372- atomic_set(&p->ip_id_count,
6892158b 76373+ atomic_set_unchecked(&p->rid, 0);
6e9df6a3
MT
76374+ atomic_set_unchecked(&p->ip_id_count,
76375 (daddr->family == AF_INET) ?
76376 secure_ip_id(daddr->addr.a4) :
76377 secure_ipv6_id(daddr->addr.a6));
fe2de317 76378diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
5e856224 76379index 1f23a57..7180dfe 100644
fe2de317
MT
76380--- a/net/ipv4/ip_fragment.c
76381+++ b/net/ipv4/ip_fragment.c
76382@@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
6892158b
MT
76383 return 0;
76384
76385 start = qp->rid;
76386- end = atomic_inc_return(&peer->rid);
76387+ end = atomic_inc_return_unchecked(&peer->rid);
76388 qp->rid = end;
76389
76390 rc = qp->q.fragments && (end - start) > max;
fe2de317 76391diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
5e856224 76392index 8aa87c1..35c3248 100644
fe2de317
MT
76393--- a/net/ipv4/ip_sockglue.c
76394+++ b/net/ipv4/ip_sockglue.c
5e856224 76395@@ -1112,7 +1112,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
15a11c5b
MT
76396 len = min_t(unsigned int, len, opt->optlen);
76397 if (put_user(len, optlen))
76398 return -EFAULT;
76399- if (copy_to_user(optval, opt->__data, len))
76400+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
76401+ copy_to_user(optval, opt->__data, len))
76402 return -EFAULT;
76403 return 0;
76404 }
5e856224 76405@@ -1240,7 +1241,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
6e9df6a3
MT
76406 if (sk->sk_type != SOCK_STREAM)
76407 return -ENOPROTOOPT;
76408
76409- msg.msg_control = optval;
76410+ msg.msg_control = (void __force_kernel *)optval;
76411 msg.msg_controllen = len;
76412 msg.msg_flags = flags;
76413
fe2de317 76414diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
5e856224 76415index 6e412a6..6640538 100644
fe2de317
MT
76416--- a/net/ipv4/ipconfig.c
76417+++ b/net/ipv4/ipconfig.c
4c928ab7 76418@@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
fe2de317
MT
76419
76420 mm_segment_t oldfs = get_fs();
76421 set_fs(get_ds());
76422- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
76423+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
76424 set_fs(oldfs);
76425 return res;
76426 }
4c928ab7 76427@@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
fe2de317
MT
76428
76429 mm_segment_t oldfs = get_fs();
76430 set_fs(get_ds());
76431- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
76432+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
76433 set_fs(oldfs);
76434 return res;
76435 }
4c928ab7 76436@@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
fe2de317
MT
76437
76438 mm_segment_t oldfs = get_fs();
76439 set_fs(get_ds());
76440- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
76441+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
76442 set_fs(oldfs);
76443 return res;
76444 }
4c928ab7 76445diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
5e856224 76446index fd7a3f6..a1b1013 100644
4c928ab7
MT
76447--- a/net/ipv4/netfilter/arp_tables.c
76448+++ b/net/ipv4/netfilter/arp_tables.c
5e856224
MT
76449@@ -757,6 +757,9 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
76450
76451 static int copy_entries_to_user(unsigned int total_size,
76452 const struct xt_table *table,
76453+ void __user *userptr) __size_overflow(1);
76454+static int copy_entries_to_user(unsigned int total_size,
76455+ const struct xt_table *table,
76456 void __user *userptr)
76457 {
76458 unsigned int off, num;
76459@@ -984,6 +987,11 @@ static int __do_replace(struct net *net, const char *name,
4c928ab7
MT
76460 unsigned int valid_hooks,
76461 struct xt_table_info *newinfo,
76462 unsigned int num_counters,
76463+ void __user *counters_ptr) __size_overflow(5);
76464+static int __do_replace(struct net *net, const char *name,
76465+ unsigned int valid_hooks,
76466+ struct xt_table_info *newinfo,
76467+ unsigned int num_counters,
76468 void __user *counters_ptr)
76469 {
76470 int ret;
5e856224 76471@@ -1104,6 +1112,8 @@ static int do_replace(struct net *net, const void __user *user,
4c928ab7
MT
76472 }
76473
76474 static int do_add_counters(struct net *net, const void __user *user,
76475+ unsigned int len, int compat) __size_overflow(3);
76476+static int do_add_counters(struct net *net, const void __user *user,
76477 unsigned int len, int compat)
76478 {
76479 unsigned int i, curcpu;
76480diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
5e856224 76481index 24e556e..b073356 100644
4c928ab7
MT
76482--- a/net/ipv4/netfilter/ip_tables.c
76483+++ b/net/ipv4/netfilter/ip_tables.c
5e856224
MT
76484@@ -923,6 +923,10 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
76485 static int
76486 copy_entries_to_user(unsigned int total_size,
76487 const struct xt_table *table,
76488+ void __user *userptr) __size_overflow(1);
76489+static int
76490+copy_entries_to_user(unsigned int total_size,
76491+ const struct xt_table *table,
76492 void __user *userptr)
76493 {
76494 unsigned int off, num;
76495@@ -1172,6 +1176,10 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
4c928ab7
MT
76496 static int
76497 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
76498 struct xt_table_info *newinfo, unsigned int num_counters,
76499+ void __user *counters_ptr) __size_overflow(5);
76500+static int
76501+__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
76502+ struct xt_table_info *newinfo, unsigned int num_counters,
76503 void __user *counters_ptr)
76504 {
76505 int ret;
5e856224 76506@@ -1293,6 +1301,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
4c928ab7
MT
76507
76508 static int
76509 do_add_counters(struct net *net, const void __user *user,
76510+ unsigned int len, int compat) __size_overflow(3);
76511+static int
76512+do_add_counters(struct net *net, const void __user *user,
76513 unsigned int len, int compat)
76514 {
76515 unsigned int i, curcpu;
5e856224
MT
76516diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
76517index ba5756d..8d34d74 100644
76518--- a/net/ipv4/netfilter/ipt_ULOG.c
76519+++ b/net/ipv4/netfilter/ipt_ULOG.c
76520@@ -125,6 +125,7 @@ static void ulog_timer(unsigned long data)
76521 spin_unlock_bh(&ulog_lock);
76522 }
76523
76524+static struct sk_buff *ulog_alloc_skb(unsigned int size) __size_overflow(1);
76525 static struct sk_buff *ulog_alloc_skb(unsigned int size)
76526 {
76527 struct sk_buff *skb;
fe2de317 76528diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
4c928ab7 76529index 2133c30..0e8047e 100644
fe2de317
MT
76530--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
76531+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
4c928ab7
MT
76532@@ -435,6 +435,10 @@ static unsigned char asn1_subid_decode(struct asn1_ctx *ctx,
76533 static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
76534 unsigned char *eoc,
76535 unsigned long **oid,
76536+ unsigned int *len) __size_overflow(2);
76537+static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
76538+ unsigned char *eoc,
76539+ unsigned long **oid,
76540 unsigned int *len)
76541 {
76542 unsigned long subid;
fe2de317 76543diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
5e856224 76544index b072386..abdebcf 100644
fe2de317
MT
76545--- a/net/ipv4/ping.c
76546+++ b/net/ipv4/ping.c
5e856224 76547@@ -838,7 +838,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
15a11c5b
MT
76548 sk_rmem_alloc_get(sp),
76549 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
76550 atomic_read(&sp->sk_refcnt), sp,
76551- atomic_read(&sp->sk_drops), len);
76552+ atomic_read_unchecked(&sp->sk_drops), len);
76553 }
76554
76555 static int ping_seq_show(struct seq_file *seq, void *v)
fe2de317 76556diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
5e856224 76557index 3ccda5a..3c1e61d 100644
fe2de317
MT
76558--- a/net/ipv4/raw.c
76559+++ b/net/ipv4/raw.c
5e856224 76560@@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
8308f9c9
MT
76561 int raw_rcv(struct sock *sk, struct sk_buff *skb)
76562 {
76563 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
76564- atomic_inc(&sk->sk_drops);
76565+ atomic_inc_unchecked(&sk->sk_drops);
76566 kfree_skb(skb);
76567 return NET_RX_DROP;
76568 }
5e856224 76569@@ -742,16 +742,20 @@ static int raw_init(struct sock *sk)
71d190be
MT
76570
76571 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
76572 {
76573+ struct icmp_filter filter;
76574+
76575 if (optlen > sizeof(struct icmp_filter))
76576 optlen = sizeof(struct icmp_filter);
76577- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
76578+ if (copy_from_user(&filter, optval, optlen))
76579 return -EFAULT;
15a11c5b 76580+ raw_sk(sk)->filter = filter;
71d190be
MT
76581 return 0;
76582 }
76583
76584 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
76585 {
71d190be 76586 int len, ret = -EFAULT;
15a11c5b 76587+ struct icmp_filter filter;
71d190be
MT
76588
76589 if (get_user(len, optlen))
15a11c5b 76590 goto out;
5e856224 76591@@ -761,8 +765,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
71d190be
MT
76592 if (len > sizeof(struct icmp_filter))
76593 len = sizeof(struct icmp_filter);
76594 ret = -EFAULT;
15a11c5b 76595- if (put_user(len, optlen) ||
71d190be 76596- copy_to_user(optval, &raw_sk(sk)->filter, len))
15a11c5b 76597+ filter = raw_sk(sk)->filter;
6e9df6a3 76598+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
71d190be
MT
76599 goto out;
76600 ret = 0;
76601 out: return ret;
5e856224 76602@@ -990,7 +994,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
8308f9c9
MT
76603 sk_wmem_alloc_get(sp),
76604 sk_rmem_alloc_get(sp),
76605 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
76606- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
66a7e928
MT
76607+ atomic_read(&sp->sk_refcnt),
76608+#ifdef CONFIG_GRKERNSEC_HIDESYM
76609+ NULL,
76610+#else
76611+ sp,
76612+#endif
76613+ atomic_read_unchecked(&sp->sk_drops));
8308f9c9
MT
76614 }
76615
76616 static int raw_seq_show(struct seq_file *seq, void *v)
fe2de317 76617diff --git a/net/ipv4/route.c b/net/ipv4/route.c
5e856224 76618index 0197747..7adb0dc 100644
fe2de317
MT
76619--- a/net/ipv4/route.c
76620+++ b/net/ipv4/route.c
5e856224 76621@@ -311,7 +311,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
8308f9c9
MT
76622
76623 static inline int rt_genid(struct net *net)
76624 {
76625- return atomic_read(&net->ipv4.rt_genid);
76626+ return atomic_read_unchecked(&net->ipv4.rt_genid);
76627 }
76628
76629 #ifdef CONFIG_PROC_FS
5e856224 76630@@ -935,7 +935,7 @@ static void rt_cache_invalidate(struct net *net)
8308f9c9
MT
76631 unsigned char shuffle;
76632
76633 get_random_bytes(&shuffle, sizeof(shuffle));
76634- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
76635+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
5e856224 76636 inetpeer_invalidate_tree(AF_INET);
8308f9c9
MT
76637 }
76638
5e856224 76639@@ -3010,7 +3010,7 @@ static int rt_fill_info(struct net *net,
15a11c5b
MT
76640 error = rt->dst.error;
76641 if (peer) {
6892158b 76642 inet_peer_refcheck(rt->peer);
15a11c5b
MT
76643- id = atomic_read(&peer->ip_id_count) & 0xffff;
76644+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
76645 if (peer->tcp_ts_stamp) {
76646 ts = peer->tcp_ts;
76647 tsage = get_seconds() - peer->tcp_ts_stamp;
fe2de317 76648diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
5e856224 76649index fd54c5f..96d6407 100644
fe2de317
MT
76650--- a/net/ipv4/tcp_ipv4.c
76651+++ b/net/ipv4/tcp_ipv4.c
5e856224 76652@@ -88,6 +88,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
ae4e228f 76653 int sysctl_tcp_low_latency __read_mostly;
6892158b 76654 EXPORT_SYMBOL(sysctl_tcp_low_latency);
58c5fc13 76655
58c5fc13 76656+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
ae4e228f 76657+extern int grsec_enable_blackhole;
58c5fc13 76658+#endif
ae4e228f
MT
76659
76660 #ifdef CONFIG_TCP_MD5SIG
76661 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
5e856224 76662@@ -1638,6 +1641,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
57199397
MT
76663 return 0;
76664
76665 reset:
76666+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76667+ if (!grsec_enable_blackhole)
76668+#endif
76669 tcp_v4_send_reset(rsk, skb);
76670 discard:
76671 kfree_skb(skb);
5e856224 76672@@ -1700,12 +1706,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
ae4e228f
MT
76673 TCP_SKB_CB(skb)->sacked = 0;
76674
76675 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
76676- if (!sk)
76677+ if (!sk) {
76678+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76679+ ret = 1;
76680+#endif
76681 goto no_tcp_socket;
df50ba0c 76682-
ae4e228f 76683+ }
ae4e228f
MT
76684 process:
76685- if (sk->sk_state == TCP_TIME_WAIT)
76686+ if (sk->sk_state == TCP_TIME_WAIT) {
76687+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76688+ ret = 2;
76689+#endif
76690 goto do_time_wait;
76691+ }
76692
df50ba0c
MT
76693 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
76694 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
5e856224 76695@@ -1755,6 +1768,10 @@ no_tcp_socket:
58c5fc13
MT
76696 bad_packet:
76697 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
76698 } else {
76699+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
ae4e228f
MT
76700+ if (!grsec_enable_blackhole || (ret == 1 &&
76701+ (skb->dev->flags & IFF_LOOPBACK)))
58c5fc13
MT
76702+#endif
76703 tcp_v4_send_reset(NULL, skb);
76704 }
76705
5e856224 76706@@ -2417,7 +2434,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
57199397
MT
76707 0, /* non standard timer */
76708 0, /* open_requests have no inode */
76709 atomic_read(&sk->sk_refcnt),
76710+#ifdef CONFIG_GRKERNSEC_HIDESYM
76711+ NULL,
76712+#else
76713 req,
76714+#endif
76715 len);
76716 }
76717
5e856224 76718@@ -2467,7 +2488,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
57199397
MT
76719 sock_i_uid(sk),
76720 icsk->icsk_probes_out,
76721 sock_i_ino(sk),
76722- atomic_read(&sk->sk_refcnt), sk,
76723+ atomic_read(&sk->sk_refcnt),
76724+#ifdef CONFIG_GRKERNSEC_HIDESYM
76725+ NULL,
76726+#else
76727+ sk,
76728+#endif
76729 jiffies_to_clock_t(icsk->icsk_rto),
76730 jiffies_to_clock_t(icsk->icsk_ack.ato),
76731 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
5e856224 76732@@ -2495,7 +2521,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
15a11c5b 76733 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
57199397
MT
76734 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
76735 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
76736- atomic_read(&tw->tw_refcnt), tw, len);
76737+ atomic_read(&tw->tw_refcnt),
76738+#ifdef CONFIG_GRKERNSEC_HIDESYM
76739+ NULL,
76740+#else
76741+ tw,
76742+#endif
76743+ len);
76744 }
76745
76746 #define TMPSZ 150
fe2de317 76747diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
5e856224 76748index 550e755..25721b3 100644
fe2de317
MT
76749--- a/net/ipv4/tcp_minisocks.c
76750+++ b/net/ipv4/tcp_minisocks.c
df50ba0c 76751@@ -27,6 +27,10 @@
ae4e228f
MT
76752 #include <net/inet_common.h>
76753 #include <net/xfrm.h>
76754
76755+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76756+extern int grsec_enable_blackhole;
76757+#endif
76758+
76759 int sysctl_tcp_syncookies __read_mostly = 1;
76760 EXPORT_SYMBOL(sysctl_tcp_syncookies);
76761
5e856224 76762@@ -753,6 +757,10 @@ listen_overflow:
58c5fc13
MT
76763
76764 embryonic_reset:
76765 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
76766+
df50ba0c
MT
76767+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76768+ if (!grsec_enable_blackhole)
58c5fc13 76769+#endif
df50ba0c
MT
76770 if (!(flg & TCP_FLAG_RST))
76771 req->rsk_ops->send_reset(sk, skb);
58c5fc13 76772
fe2de317
MT
76773diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
76774index 85ee7eb..53277ab 100644
76775--- a/net/ipv4/tcp_probe.c
76776+++ b/net/ipv4/tcp_probe.c
76777@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
ae4e228f
MT
76778 if (cnt + width >= len)
76779 break;
76780
76781- if (copy_to_user(buf + cnt, tbuf, width))
bc901d79 76782+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
ae4e228f
MT
76783 return -EFAULT;
76784 cnt += width;
76785 }
fe2de317 76786diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
5e856224 76787index cd2e072..1fffee2 100644
fe2de317
MT
76788--- a/net/ipv4/tcp_timer.c
76789+++ b/net/ipv4/tcp_timer.c
df50ba0c
MT
76790@@ -22,6 +22,10 @@
76791 #include <linux/gfp.h>
ae4e228f
MT
76792 #include <net/tcp.h>
76793
76794+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76795+extern int grsec_lastack_retries;
76796+#endif
76797+
76798 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
76799 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
76800 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
5e856224 76801@@ -196,6 +200,13 @@ static int tcp_write_timeout(struct sock *sk)
ae4e228f
MT
76802 }
76803 }
76804
76805+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76806+ if ((sk->sk_state == TCP_LAST_ACK) &&
76807+ (grsec_lastack_retries > 0) &&
76808+ (grsec_lastack_retries < retry_until))
76809+ retry_until = grsec_lastack_retries;
76810+#endif
76811+
bc901d79
MT
76812 if (retransmits_timed_out(sk, retry_until,
76813 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
ae4e228f 76814 /* Has it gone just too far? */
fe2de317 76815diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
5e856224 76816index 5d075b5..d907d5f 100644
fe2de317
MT
76817--- a/net/ipv4/udp.c
76818+++ b/net/ipv4/udp.c
58c5fc13
MT
76819@@ -86,6 +86,7 @@
76820 #include <linux/types.h>
76821 #include <linux/fcntl.h>
76822 #include <linux/module.h>
76823+#include <linux/security.h>
76824 #include <linux/socket.h>
76825 #include <linux/sockios.h>
76826 #include <linux/igmp.h>
6e9df6a3
MT
76827@@ -108,6 +109,10 @@
76828 #include <trace/events/udp.h>
ae4e228f
MT
76829 #include "udp_impl.h"
76830
76831+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76832+extern int grsec_enable_blackhole;
76833+#endif
76834+
76835 struct udp_table udp_table __read_mostly;
76836 EXPORT_SYMBOL(udp_table);
76837
5e856224 76838@@ -566,6 +571,9 @@ found:
58c5fc13
MT
76839 return s;
76840 }
76841
76842+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
76843+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
76844+
76845 /*
76846 * This routine is called by the ICMP module when it gets some
76847 * sort of error condition. If err < 0 then the socket should
5e856224 76848@@ -857,9 +865,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
58c5fc13
MT
76849 dport = usin->sin_port;
76850 if (dport == 0)
76851 return -EINVAL;
76852+
76853+ err = gr_search_udp_sendmsg(sk, usin);
76854+ if (err)
76855+ return err;
76856 } else {
76857 if (sk->sk_state != TCP_ESTABLISHED)
76858 return -EDESTADDRREQ;
76859+
76860+ err = gr_search_udp_sendmsg(sk, NULL);
76861+ if (err)
76862+ return err;
76863+
ae4e228f
MT
76864 daddr = inet->inet_daddr;
76865 dport = inet->inet_dport;
58c5fc13 76866 /* Open fast path for connected socket.
5e856224 76867@@ -1100,7 +1117,7 @@ static unsigned int first_packet_length(struct sock *sk)
8308f9c9
MT
76868 udp_lib_checksum_complete(skb)) {
76869 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
76870 IS_UDPLITE(sk));
76871- atomic_inc(&sk->sk_drops);
76872+ atomic_inc_unchecked(&sk->sk_drops);
76873 __skb_unlink(skb, rcvq);
76874 __skb_queue_tail(&list_kill, skb);
76875 }
5e856224 76876@@ -1186,6 +1203,10 @@ try_again:
58c5fc13
MT
76877 if (!skb)
76878 goto out;
76879
76880+ err = gr_search_udp_recvmsg(sk, skb);
76881+ if (err)
76882+ goto out_free;
76883+
76884 ulen = skb->len - sizeof(struct udphdr);
4c928ab7
MT
76885 copied = len;
76886 if (copied > ulen)
5e856224 76887@@ -1489,7 +1510,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
8308f9c9
MT
76888
76889 drop:
76890 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
76891- atomic_inc(&sk->sk_drops);
76892+ atomic_inc_unchecked(&sk->sk_drops);
76893 kfree_skb(skb);
76894 return -1;
76895 }
5e856224 76896@@ -1508,7 +1529,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
8308f9c9
MT
76897 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
76898
76899 if (!skb1) {
76900- atomic_inc(&sk->sk_drops);
76901+ atomic_inc_unchecked(&sk->sk_drops);
76902 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
76903 IS_UDPLITE(sk));
76904 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
5e856224 76905@@ -1677,6 +1698,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
58c5fc13
MT
76906 goto csum_error;
76907
76908 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
76909+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
ae4e228f 76910+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
58c5fc13
MT
76911+#endif
76912 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
76913
76914 /*
5e856224 76915@@ -2100,8 +2124,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
57199397
MT
76916 sk_wmem_alloc_get(sp),
76917 sk_rmem_alloc_get(sp),
76918 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
76919- atomic_read(&sp->sk_refcnt), sp,
8308f9c9 76920- atomic_read(&sp->sk_drops), len);
57199397
MT
76921+ atomic_read(&sp->sk_refcnt),
76922+#ifdef CONFIG_GRKERNSEC_HIDESYM
76923+ NULL,
76924+#else
76925+ sp,
76926+#endif
8308f9c9 76927+ atomic_read_unchecked(&sp->sk_drops), len);
57199397
MT
76928 }
76929
8308f9c9 76930 int udp4_seq_show(struct seq_file *seq, void *v)
fe2de317 76931diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
5e856224 76932index 6b8ebc5..1d624f4 100644
fe2de317
MT
76933--- a/net/ipv6/addrconf.c
76934+++ b/net/ipv6/addrconf.c
5e856224 76935@@ -2145,7 +2145,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
6e9df6a3
MT
76936 p.iph.ihl = 5;
76937 p.iph.protocol = IPPROTO_IPV6;
76938 p.iph.ttl = 64;
76939- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
76940+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
76941
76942 if (ops->ndo_do_ioctl) {
76943 mm_segment_t oldfs = get_fs();
4c928ab7 76944diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
5e856224 76945index 2ae79db..8f101bf 100644
4c928ab7
MT
76946--- a/net/ipv6/ah6.c
76947+++ b/net/ipv6/ah6.c
76948@@ -56,6 +56,8 @@ struct ah_skb_cb {
76949 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
76950
76951 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
76952+ unsigned int size) __size_overflow(3);
76953+static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
76954 unsigned int size)
76955 {
76956 unsigned int len;
fe2de317 76957diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
5e856224 76958index 02dd203..e03fcc9 100644
fe2de317
MT
76959--- a/net/ipv6/inet6_connection_sock.c
76960+++ b/net/ipv6/inet6_connection_sock.c
76961@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
8308f9c9
MT
76962 #ifdef CONFIG_XFRM
76963 {
76964 struct rt6_info *rt = (struct rt6_info *)dst;
76965- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
76966+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
76967 }
76968 #endif
76969 }
fe2de317 76970@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
8308f9c9
MT
76971 #ifdef CONFIG_XFRM
76972 if (dst) {
76973 struct rt6_info *rt = (struct rt6_info *)dst;
76974- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
76975+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
76976 __sk_dst_reset(sk);
76977 dst = NULL;
76978 }
fe2de317 76979diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
5e856224 76980index 18a2719..779f36a 100644
fe2de317
MT
76981--- a/net/ipv6/ipv6_sockglue.c
76982+++ b/net/ipv6/ipv6_sockglue.c
4c928ab7 76983@@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
6e9df6a3
MT
76984 if (sk->sk_type != SOCK_STREAM)
76985 return -ENOPROTOOPT;
76986
76987- msg.msg_control = optval;
76988+ msg.msg_control = (void __force_kernel *)optval;
76989 msg.msg_controllen = len;
76990 msg.msg_flags = flags;
76991
4c928ab7 76992diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
5e856224 76993index 94874b0..108a94d 100644
4c928ab7
MT
76994--- a/net/ipv6/netfilter/ip6_tables.c
76995+++ b/net/ipv6/netfilter/ip6_tables.c
5e856224
MT
76996@@ -945,6 +945,10 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
76997 static int
76998 copy_entries_to_user(unsigned int total_size,
76999 const struct xt_table *table,
77000+ void __user *userptr) __size_overflow(1);
77001+static int
77002+copy_entries_to_user(unsigned int total_size,
77003+ const struct xt_table *table,
77004 void __user *userptr)
77005 {
77006 unsigned int off, num;
77007@@ -1194,6 +1198,10 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
4c928ab7
MT
77008 static int
77009 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
77010 struct xt_table_info *newinfo, unsigned int num_counters,
77011+ void __user *counters_ptr) __size_overflow(5);
77012+static int
77013+__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
77014+ struct xt_table_info *newinfo, unsigned int num_counters,
77015 void __user *counters_ptr)
77016 {
77017 int ret;
5e856224 77018@@ -1315,6 +1323,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
4c928ab7
MT
77019
77020 static int
77021 do_add_counters(struct net *net, const void __user *user, unsigned int len,
77022+ int compat) __size_overflow(3);
77023+static int
77024+do_add_counters(struct net *net, const void __user *user, unsigned int len,
77025 int compat)
77026 {
77027 unsigned int i, curcpu;
fe2de317 77028diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
5e856224 77029index d02f7e4..2d2a0f1 100644
fe2de317
MT
77030--- a/net/ipv6/raw.c
77031+++ b/net/ipv6/raw.c
4c928ab7 77032@@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
8308f9c9 77033 {
4c928ab7 77034 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
8308f9c9
MT
77035 skb_checksum_complete(skb)) {
77036- atomic_inc(&sk->sk_drops);
77037+ atomic_inc_unchecked(&sk->sk_drops);
77038 kfree_skb(skb);
77039 return NET_RX_DROP;
77040 }
5e856224 77041@@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
8308f9c9
MT
77042 struct raw6_sock *rp = raw6_sk(sk);
77043
77044 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
77045- atomic_inc(&sk->sk_drops);
77046+ atomic_inc_unchecked(&sk->sk_drops);
77047 kfree_skb(skb);
77048 return NET_RX_DROP;
77049 }
5e856224 77050@@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
8308f9c9
MT
77051
77052 if (inet->hdrincl) {
77053 if (skb_checksum_complete(skb)) {
77054- atomic_inc(&sk->sk_drops);
77055+ atomic_inc_unchecked(&sk->sk_drops);
77056 kfree_skb(skb);
77057 return NET_RX_DROP;
77058 }
5e856224 77059@@ -602,7 +602,7 @@ out:
58c5fc13
MT
77060 return err;
77061 }
77062
77063-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
77064+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
66a7e928 77065 struct flowi6 *fl6, struct dst_entry **dstp,
58c5fc13
MT
77066 unsigned int flags)
77067 {
5e856224 77068@@ -912,12 +912,15 @@ do_confirm:
71d190be
MT
77069 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
77070 char __user *optval, int optlen)
77071 {
77072+ struct icmp6_filter filter;
77073+
77074 switch (optname) {
77075 case ICMPV6_FILTER:
77076 if (optlen > sizeof(struct icmp6_filter))
77077 optlen = sizeof(struct icmp6_filter);
77078- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
77079+ if (copy_from_user(&filter, optval, optlen))
77080 return -EFAULT;
15a11c5b 77081+ raw6_sk(sk)->filter = filter;
71d190be
MT
77082 return 0;
77083 default:
77084 return -ENOPROTOOPT;
5e856224 77085@@ -930,6 +933,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
71d190be
MT
77086 char __user *optval, int __user *optlen)
77087 {
71d190be 77088 int len;
15a11c5b 77089+ struct icmp6_filter filter;
71d190be
MT
77090
77091 switch (optname) {
15a11c5b 77092 case ICMPV6_FILTER:
5e856224 77093@@ -941,7 +945,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
71d190be
MT
77094 len = sizeof(struct icmp6_filter);
77095 if (put_user(len, optlen))
77096 return -EFAULT;
77097- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
15a11c5b
MT
77098+ filter = raw6_sk(sk)->filter;
77099+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
71d190be
MT
77100 return -EFAULT;
77101 return 0;
77102 default:
5e856224 77103@@ -1248,7 +1253,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
6892158b
MT
77104 0, 0L, 0,
77105 sock_i_uid(sp), 0,
77106 sock_i_ino(sp),
77107- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
77108+ atomic_read(&sp->sk_refcnt),
77109+#ifdef CONFIG_GRKERNSEC_HIDESYM
77110+ NULL,
77111+#else
77112+ sp,
77113+#endif
8308f9c9 77114+ atomic_read_unchecked(&sp->sk_drops));
6892158b
MT
77115 }
77116
77117 static int raw6_seq_show(struct seq_file *seq, void *v)
fe2de317 77118diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
5e856224 77119index a89ca8d..12e66b0 100644
fe2de317
MT
77120--- a/net/ipv6/tcp_ipv6.c
77121+++ b/net/ipv6/tcp_ipv6.c
5e856224 77122@@ -94,6 +94,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
df50ba0c
MT
77123 }
77124 #endif
77125
77126+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77127+extern int grsec_enable_blackhole;
77128+#endif
77129+
77130 static void tcp_v6_hash(struct sock *sk)
77131 {
77132 if (sk->sk_state != TCP_CLOSE) {
5e856224 77133@@ -1654,6 +1658,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
57199397
MT
77134 return 0;
77135
77136 reset:
77137+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77138+ if (!grsec_enable_blackhole)
77139+#endif
77140 tcp_v6_send_reset(sk, skb);
77141 discard:
77142 if (opt_skb)
5e856224 77143@@ -1733,12 +1740,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
df50ba0c 77144 TCP_SKB_CB(skb)->sacked = 0;
58c5fc13 77145
df50ba0c
MT
77146 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
77147- if (!sk)
77148+ if (!sk) {
77149+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77150+ ret = 1;
77151+#endif
77152 goto no_tcp_socket;
77153+ }
77154
77155 process:
77156- if (sk->sk_state == TCP_TIME_WAIT)
77157+ if (sk->sk_state == TCP_TIME_WAIT) {
58c5fc13 77158+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
df50ba0c 77159+ ret = 2;
58c5fc13 77160+#endif
df50ba0c
MT
77161 goto do_time_wait;
77162+ }
77163
57199397
MT
77164 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
77165 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
5e856224 77166@@ -1786,6 +1801,10 @@ no_tcp_socket:
58c5fc13
MT
77167 bad_packet:
77168 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
77169 } else {
77170+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
df50ba0c
MT
77171+ if (!grsec_enable_blackhole || (ret == 1 &&
77172+ (skb->dev->flags & IFF_LOOPBACK)))
58c5fc13
MT
77173+#endif
77174 tcp_v6_send_reset(NULL, skb);
77175 }
77176
5e856224 77177@@ -2047,7 +2066,13 @@ static void get_openreq6(struct seq_file *seq,
6892158b
MT
77178 uid,
77179 0, /* non standard timer */
77180 0, /* open_requests have no inode */
77181- 0, req);
77182+ 0,
77183+#ifdef CONFIG_GRKERNSEC_HIDESYM
77184+ NULL
77185+#else
77186+ req
77187+#endif
77188+ );
77189 }
77190
77191 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
5e856224 77192@@ -2097,7 +2122,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
6892158b
MT
77193 sock_i_uid(sp),
77194 icsk->icsk_probes_out,
77195 sock_i_ino(sp),
77196- atomic_read(&sp->sk_refcnt), sp,
77197+ atomic_read(&sp->sk_refcnt),
77198+#ifdef CONFIG_GRKERNSEC_HIDESYM
77199+ NULL,
77200+#else
77201+ sp,
77202+#endif
77203 jiffies_to_clock_t(icsk->icsk_rto),
77204 jiffies_to_clock_t(icsk->icsk_ack.ato),
77205 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
5e856224 77206@@ -2132,7 +2162,13 @@ static void get_timewait6_sock(struct seq_file *seq,
6892158b
MT
77207 dest->s6_addr32[2], dest->s6_addr32[3], destp,
77208 tw->tw_substate, 0, 0,
77209 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
77210- atomic_read(&tw->tw_refcnt), tw);
77211+ atomic_read(&tw->tw_refcnt),
77212+#ifdef CONFIG_GRKERNSEC_HIDESYM
77213+ NULL
77214+#else
77215+ tw
77216+#endif
77217+ );
77218 }
77219
77220 static int tcp6_seq_show(struct seq_file *seq, void *v)
fe2de317 77221diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
5e856224 77222index 4f96b5c..75543d7 100644
fe2de317
MT
77223--- a/net/ipv6/udp.c
77224+++ b/net/ipv6/udp.c
df50ba0c
MT
77225@@ -50,6 +50,10 @@
77226 #include <linux/seq_file.h>
77227 #include "udp_impl.h"
77228
77229+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77230+extern int grsec_enable_blackhole;
77231+#endif
77232+
77233 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
77234 {
77235 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
5e856224 77236@@ -551,7 +555,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
8308f9c9
MT
77237
77238 return 0;
77239 drop:
77240- atomic_inc(&sk->sk_drops);
77241+ atomic_inc_unchecked(&sk->sk_drops);
77242 drop_no_sk_drops_inc:
77243 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
77244 kfree_skb(skb);
5e856224 77245@@ -627,7 +631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
8308f9c9
MT
77246 continue;
77247 }
77248 drop:
77249- atomic_inc(&sk->sk_drops);
77250+ atomic_inc_unchecked(&sk->sk_drops);
77251 UDP6_INC_STATS_BH(sock_net(sk),
77252 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
77253 UDP6_INC_STATS_BH(sock_net(sk),
5e856224 77254@@ -782,6 +786,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
58c5fc13
MT
77255 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
77256 proto == IPPROTO_UDPLITE);
77257
77258+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
df50ba0c 77259+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
58c5fc13 77260+#endif
df50ba0c 77261 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
58c5fc13
MT
77262
77263 kfree_skb(skb);
5e856224 77264@@ -798,7 +805,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
8308f9c9
MT
77265 if (!sock_owned_by_user(sk))
77266 udpv6_queue_rcv_skb(sk, skb);
77267 else if (sk_add_backlog(sk, skb)) {
77268- atomic_inc(&sk->sk_drops);
77269+ atomic_inc_unchecked(&sk->sk_drops);
77270 bh_unlock_sock(sk);
77271 sock_put(sk);
77272 goto discard;
5e856224 77273@@ -1410,8 +1417,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
6892158b
MT
77274 0, 0L, 0,
77275 sock_i_uid(sp), 0,
77276 sock_i_ino(sp),
77277- atomic_read(&sp->sk_refcnt), sp,
8308f9c9 77278- atomic_read(&sp->sk_drops));
6892158b
MT
77279+ atomic_read(&sp->sk_refcnt),
77280+#ifdef CONFIG_GRKERNSEC_HIDESYM
77281+ NULL,
77282+#else
77283+ sp,
77284+#endif
8308f9c9 77285+ atomic_read_unchecked(&sp->sk_drops));
6892158b
MT
77286 }
77287
8308f9c9 77288 int udp6_seq_show(struct seq_file *seq, void *v)
fe2de317 77289diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
4c928ab7 77290index 253695d..9481ce8 100644
fe2de317
MT
77291--- a/net/irda/ircomm/ircomm_tty.c
77292+++ b/net/irda/ircomm/ircomm_tty.c
77293@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
58c5fc13
MT
77294 add_wait_queue(&self->open_wait, &wait);
77295
77296 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
77297- __FILE__,__LINE__, tty->driver->name, self->open_count );
c52201e0 77298+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
58c5fc13
MT
77299
77300 /* As far as I can see, we protect open_count - Jean II */
77301 spin_lock_irqsave(&self->spinlock, flags);
77302 if (!tty_hung_up_p(filp)) {
77303 extra_count = 1;
77304- self->open_count--;
c52201e0 77305+ local_dec(&self->open_count);
58c5fc13
MT
77306 }
77307 spin_unlock_irqrestore(&self->spinlock, flags);
77308- self->blocked_open++;
c52201e0 77309+ local_inc(&self->blocked_open);
58c5fc13
MT
77310
77311 while (1) {
77312 if (tty->termios->c_cflag & CBAUD) {
fe2de317 77313@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
58c5fc13
MT
77314 }
77315
77316 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
77317- __FILE__,__LINE__, tty->driver->name, self->open_count );
c52201e0 77318+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
58c5fc13
MT
77319
77320 schedule();
77321 }
fe2de317 77322@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
58c5fc13
MT
77323 if (extra_count) {
77324 /* ++ is not atomic, so this should be protected - Jean II */
77325 spin_lock_irqsave(&self->spinlock, flags);
77326- self->open_count++;
c52201e0 77327+ local_inc(&self->open_count);
58c5fc13
MT
77328 spin_unlock_irqrestore(&self->spinlock, flags);
77329 }
77330- self->blocked_open--;
c52201e0 77331+ local_dec(&self->blocked_open);
58c5fc13
MT
77332
77333 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
77334- __FILE__,__LINE__, tty->driver->name, self->open_count);
c52201e0 77335+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
58c5fc13
MT
77336
77337 if (!retval)
77338 self->flags |= ASYNC_NORMAL_ACTIVE;
fe2de317 77339@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
77340 }
77341 /* ++ is not atomic, so this should be protected - Jean II */
77342 spin_lock_irqsave(&self->spinlock, flags);
77343- self->open_count++;
c52201e0 77344+ local_inc(&self->open_count);
58c5fc13
MT
77345
77346 tty->driver_data = self;
77347 self->tty = tty;
77348 spin_unlock_irqrestore(&self->spinlock, flags);
77349
77350 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
77351- self->line, self->open_count);
c52201e0 77352+ self->line, local_read(&self->open_count));
58c5fc13
MT
77353
77354 /* Not really used by us, but lets do it anyway */
77355 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
fe2de317 77356@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
77357 return;
77358 }
77359
77360- if ((tty->count == 1) && (self->open_count != 1)) {
c52201e0 77361+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
58c5fc13
MT
77362 /*
77363 * Uh, oh. tty->count is 1, which means that the tty
77364 * structure will be freed. state->count should always
fe2de317 77365@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
77366 */
77367 IRDA_DEBUG(0, "%s(), bad serial port count; "
77368 "tty->count is 1, state->count is %d\n", __func__ ,
77369- self->open_count);
77370- self->open_count = 1;
c52201e0
MT
77371+ local_read(&self->open_count));
77372+ local_set(&self->open_count, 1);
58c5fc13
MT
77373 }
77374
77375- if (--self->open_count < 0) {
c52201e0 77376+ if (local_dec_return(&self->open_count) < 0) {
58c5fc13
MT
77377 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
77378- __func__, self->line, self->open_count);
77379- self->open_count = 0;
c52201e0
MT
77380+ __func__, self->line, local_read(&self->open_count));
77381+ local_set(&self->open_count, 0);
58c5fc13
MT
77382 }
77383- if (self->open_count) {
c52201e0 77384+ if (local_read(&self->open_count)) {
58c5fc13
MT
77385 spin_unlock_irqrestore(&self->spinlock, flags);
77386
77387 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
fe2de317 77388@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
77389 tty->closing = 0;
77390 self->tty = NULL;
77391
77392- if (self->blocked_open) {
c52201e0 77393+ if (local_read(&self->blocked_open)) {
58c5fc13
MT
77394 if (self->close_delay)
77395 schedule_timeout_interruptible(self->close_delay);
77396 wake_up_interruptible(&self->open_wait);
fe2de317 77397@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
58c5fc13
MT
77398 spin_lock_irqsave(&self->spinlock, flags);
77399 self->flags &= ~ASYNC_NORMAL_ACTIVE;
77400 self->tty = NULL;
77401- self->open_count = 0;
c52201e0 77402+ local_set(&self->open_count, 0);
58c5fc13
MT
77403 spin_unlock_irqrestore(&self->spinlock, flags);
77404
77405 wake_up_interruptible(&self->open_wait);
fe2de317 77406@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
58c5fc13
MT
77407 seq_putc(m, '\n');
77408
77409 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
77410- seq_printf(m, "Open count: %d\n", self->open_count);
c52201e0 77411+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
58c5fc13
MT
77412 seq_printf(m, "Max data size: %d\n", self->max_data_size);
77413 seq_printf(m, "Max header size: %d\n", self->max_header_size);
77414
fe2de317 77415diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
5e856224 77416index d5c5b8f..33beff0 100644
fe2de317
MT
77417--- a/net/iucv/af_iucv.c
77418+++ b/net/iucv/af_iucv.c
5e856224 77419@@ -764,10 +764,10 @@ static int iucv_sock_autobind(struct sock *sk)
8308f9c9
MT
77420
77421 write_lock_bh(&iucv_sk_list.lock);
77422
77423- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
77424+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
77425 while (__iucv_get_sock_by_name(name)) {
77426 sprintf(name, "%08x",
77427- atomic_inc_return(&iucv_sk_list.autobind_name));
77428+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
77429 }
77430
77431 write_unlock_bh(&iucv_sk_list.lock);
fe2de317 77432diff --git a/net/key/af_key.c b/net/key/af_key.c
5e856224 77433index 11dbb22..c20f667 100644
fe2de317
MT
77434--- a/net/key/af_key.c
77435+++ b/net/key/af_key.c
4c928ab7 77436@@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
8308f9c9
MT
77437 static u32 get_acqseq(void)
77438 {
77439 u32 res;
77440- static atomic_t acqseq;
77441+ static atomic_unchecked_t acqseq;
77442
77443 do {
77444- res = atomic_inc_return(&acqseq);
77445+ res = atomic_inc_return_unchecked(&acqseq);
77446 } while (!res);
77447 return res;
77448 }
fe2de317 77449diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
5e856224 77450index 2f0642d..e5c6fba 100644
fe2de317
MT
77451--- a/net/mac80211/ieee80211_i.h
77452+++ b/net/mac80211/ieee80211_i.h
5e856224 77453@@ -28,6 +28,7 @@
c52201e0
MT
77454 #include <net/ieee80211_radiotap.h>
77455 #include <net/cfg80211.h>
77456 #include <net/mac80211.h>
77457+#include <asm/local.h>
77458 #include "key.h"
77459 #include "sta_info.h"
77460
5e856224 77461@@ -781,7 +782,7 @@ struct ieee80211_local {
ae4e228f 77462 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
58c5fc13
MT
77463 spinlock_t queue_stop_reason_lock;
77464
58c5fc13 77465- int open_count;
c52201e0 77466+ local_t open_count;
58c5fc13
MT
77467 int monitors, cooked_mntrs;
77468 /* number of interfaces with corresponding FIF_ flags */
bc901d79 77469 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
fe2de317 77470diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
5e856224 77471index 8e2137b..2974283 100644
fe2de317
MT
77472--- a/net/mac80211/iface.c
77473+++ b/net/mac80211/iface.c
5e856224 77474@@ -222,7 +222,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
58c5fc13
MT
77475 break;
77476 }
77477
77478- if (local->open_count == 0) {
c52201e0 77479+ if (local_read(&local->open_count) == 0) {
58c5fc13
MT
77480 res = drv_start(local);
77481 if (res)
77482 goto err_del_bss;
5e856224 77483@@ -246,7 +246,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
bc901d79
MT
77484 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
77485
77486 if (!is_valid_ether_addr(dev->dev_addr)) {
77487- if (!local->open_count)
c52201e0 77488+ if (!local_read(&local->open_count))
bc901d79
MT
77489 drv_stop(local);
77490 return -EADDRNOTAVAIL;
77491 }
5e856224 77492@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
bc901d79 77493 mutex_unlock(&local->mtx);
58c5fc13 77494
bc901d79
MT
77495 if (coming_up)
77496- local->open_count++;
c52201e0 77497+ local_inc(&local->open_count);
58c5fc13 77498
5e856224 77499 if (hw_reconf_flags)
58c5fc13 77500 ieee80211_hw_config(local, hw_reconf_flags);
5e856224 77501@@ -360,7 +360,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
58c5fc13 77502 err_del_interface:
5e856224 77503 drv_remove_interface(local, sdata);
58c5fc13
MT
77504 err_stop:
77505- if (!local->open_count)
c52201e0 77506+ if (!local_read(&local->open_count))
58c5fc13
MT
77507 drv_stop(local);
77508 err_del_bss:
77509 sdata->bss = NULL;
5e856224 77510@@ -489,7 +489,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
58c5fc13
MT
77511 }
77512
bc901d79
MT
77513 if (going_down)
77514- local->open_count--;
c52201e0 77515+ local_dec(&local->open_count);
58c5fc13
MT
77516
77517 switch (sdata->vif.type) {
77518 case NL80211_IFTYPE_AP_VLAN:
5e856224 77519@@ -548,7 +548,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
58c5fc13
MT
77520
77521 ieee80211_recalc_ps(local, -1);
77522
77523- if (local->open_count == 0) {
c52201e0 77524+ if (local_read(&local->open_count) == 0) {
bc901d79
MT
77525 if (local->ops->napi_poll)
77526 napi_disable(&local->napi);
ae4e228f 77527 ieee80211_clear_tx_pending(local);
fe2de317 77528diff --git a/net/mac80211/main.c b/net/mac80211/main.c
5e856224 77529index b142bd4..a651749 100644
fe2de317
MT
77530--- a/net/mac80211/main.c
77531+++ b/net/mac80211/main.c
5e856224 77532@@ -166,7 +166,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
58c5fc13
MT
77533 local->hw.conf.power_level = power;
77534 }
77535
77536- if (changed && local->open_count) {
c52201e0 77537+ if (changed && local_read(&local->open_count)) {
58c5fc13
MT
77538 ret = drv_config(local, changed);
77539 /*
77540 * Goal:
fe2de317 77541diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
5e856224 77542index 596efaf..8f1911f 100644
fe2de317
MT
77543--- a/net/mac80211/pm.c
77544+++ b/net/mac80211/pm.c
77545@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
6e9df6a3
MT
77546 struct ieee80211_sub_if_data *sdata;
77547 struct sta_info *sta;
77548
77549- if (!local->open_count)
77550+ if (!local_read(&local->open_count))
77551 goto suspend;
77552
77553 ieee80211_scan_cancel(local);
fe2de317 77554@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
15a11c5b
MT
77555 cancel_work_sync(&local->dynamic_ps_enable_work);
77556 del_timer_sync(&local->dynamic_ps_timer);
77557
77558- local->wowlan = wowlan && local->open_count;
77559+ local->wowlan = wowlan && local_read(&local->open_count);
77560 if (local->wowlan) {
77561 int err = drv_suspend(local, wowlan);
6e9df6a3 77562 if (err < 0) {
fe2de317 77563@@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
58c5fc13
MT
77564 }
77565
77566 /* stop hardware - this must stop RX */
ae4e228f 77567- if (local->open_count)
c52201e0 77568+ if (local_read(&local->open_count))
ae4e228f
MT
77569 ieee80211_stop_device(local);
77570
15a11c5b 77571 suspend:
fe2de317 77572diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
5e856224 77573index f9b8e81..bb89b46 100644
fe2de317
MT
77574--- a/net/mac80211/rate.c
77575+++ b/net/mac80211/rate.c
4c928ab7 77576@@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
58c5fc13
MT
77577
77578 ASSERT_RTNL();
ae4e228f
MT
77579
77580- if (local->open_count)
c52201e0 77581+ if (local_read(&local->open_count))
58c5fc13
MT
77582 return -EBUSY;
77583
ae4e228f 77584 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
fe2de317 77585diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
4c928ab7 77586index c97a065..ff61928 100644
fe2de317
MT
77587--- a/net/mac80211/rc80211_pid_debugfs.c
77588+++ b/net/mac80211/rc80211_pid_debugfs.c
4c928ab7 77589@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
58c5fc13 77590
ae4e228f 77591 spin_unlock_irqrestore(&events->lock, status);
58c5fc13 77592
ae4e228f
MT
77593- if (copy_to_user(buf, pb, p))
77594+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
77595 return -EFAULT;
58c5fc13 77596
ae4e228f 77597 return p;
fe2de317 77598diff --git a/net/mac80211/util.c b/net/mac80211/util.c
5e856224 77599index 9919892..8c49803 100644
fe2de317
MT
77600--- a/net/mac80211/util.c
77601+++ b/net/mac80211/util.c
5e856224
MT
77602@@ -1143,7 +1143,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
77603 }
77604 #endif
6e9df6a3
MT
77605 /* everything else happens only if HW was up & running */
77606- if (!local->open_count)
77607+ if (!local_read(&local->open_count))
77608 goto wake_up;
77609
77610 /*
fe2de317 77611diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
5e856224 77612index f8ac4ef..b02560b 100644
fe2de317
MT
77613--- a/net/netfilter/Kconfig
77614+++ b/net/netfilter/Kconfig
5e856224 77615@@ -806,6 +806,16 @@ config NETFILTER_XT_MATCH_ESP
fe2de317
MT
77616
77617 To compile it as a module, choose M here. If unsure, say N.
77618
77619+config NETFILTER_XT_MATCH_GRADM
77620+ tristate '"gradm" match support'
77621+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
77622+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
77623+ ---help---
77624+ The gradm match allows to match on grsecurity RBAC being enabled.
77625+ It is useful when iptables rules are applied early on bootup to
77626+ prevent connections to the machine (except from a trusted host)
77627+ while the RBAC system is disabled.
77628+
77629 config NETFILTER_XT_MATCH_HASHLIMIT
77630 tristate '"hashlimit" match support'
77631 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
77632diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
5e856224 77633index 40f4c3d..0d5dd6b 100644
fe2de317
MT
77634--- a/net/netfilter/Makefile
77635+++ b/net/netfilter/Makefile
5e856224 77636@@ -83,6 +83,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
fe2de317 77637 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
5e856224 77638 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
fe2de317
MT
77639 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
77640+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
77641 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
77642 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
77643 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
77644diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
4c928ab7 77645index 29fa5ba..8debc79 100644
fe2de317
MT
77646--- a/net/netfilter/ipvs/ip_vs_conn.c
77647+++ b/net/netfilter/ipvs/ip_vs_conn.c
77648@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
8308f9c9
MT
77649 /* Increase the refcnt counter of the dest */
77650 atomic_inc(&dest->refcnt);
77651
77652- conn_flags = atomic_read(&dest->conn_flags);
77653+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
77654 if (cp->protocol != IPPROTO_UDP)
77655 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
77656 /* Bind with the destination and its corresponding transmitter */
fe2de317 77657@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
8308f9c9
MT
77658 atomic_set(&cp->refcnt, 1);
77659
77660 atomic_set(&cp->n_control, 0);
77661- atomic_set(&cp->in_pkts, 0);
77662+ atomic_set_unchecked(&cp->in_pkts, 0);
77663
66a7e928 77664 atomic_inc(&ipvs->conn_count);
8308f9c9 77665 if (flags & IP_VS_CONN_F_NO_CPORT)
fe2de317 77666@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
8308f9c9
MT
77667
77668 /* Don't drop the entry if its number of incoming packets is not
77669 located in [0, 8] */
77670- i = atomic_read(&cp->in_pkts);
77671+ i = atomic_read_unchecked(&cp->in_pkts);
77672 if (i > 8 || i < 0) return 0;
77673
77674 if (!todrop_rate[i]) return 0;
fe2de317 77675diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
5e856224 77676index 2555816..31492d9 100644
fe2de317
MT
77677--- a/net/netfilter/ipvs/ip_vs_core.c
77678+++ b/net/netfilter/ipvs/ip_vs_core.c
4c928ab7 77679@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
66a7e928 77680 ret = cp->packet_xmit(skb, cp, pd->pp);
8308f9c9
MT
77681 /* do not touch skb anymore */
77682
77683- atomic_inc(&cp->in_pkts);
77684+ atomic_inc_unchecked(&cp->in_pkts);
77685 ip_vs_conn_put(cp);
77686 return ret;
77687 }
4c928ab7 77688@@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
66a7e928
MT
77689 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
77690 pkts = sysctl_sync_threshold(ipvs);
77691 else
77692- pkts = atomic_add_return(1, &cp->in_pkts);
77693+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
77694
77695 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
8308f9c9 77696 cp->protocol == IPPROTO_SCTP) {
fe2de317 77697diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
5e856224 77698index b3afe18..08ec940 100644
fe2de317
MT
77699--- a/net/netfilter/ipvs/ip_vs_ctl.c
77700+++ b/net/netfilter/ipvs/ip_vs_ctl.c
4c928ab7 77701@@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
66a7e928
MT
77702 ip_vs_rs_hash(ipvs, dest);
77703 write_unlock_bh(&ipvs->rs_lock);
8308f9c9
MT
77704 }
77705- atomic_set(&dest->conn_flags, conn_flags);
77706+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
77707
77708 /* bind the service */
77709 if (!dest->svc) {
4c928ab7 77710@@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
8308f9c9
MT
77711 " %-7s %-6d %-10d %-10d\n",
77712 &dest->addr.in6,
77713 ntohs(dest->port),
77714- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
77715+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
77716 atomic_read(&dest->weight),
77717 atomic_read(&dest->activeconns),
77718 atomic_read(&dest->inactconns));
4c928ab7 77719@@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
8308f9c9
MT
77720 "%-7s %-6d %-10d %-10d\n",
77721 ntohl(dest->addr.ip),
77722 ntohs(dest->port),
77723- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
77724+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
77725 atomic_read(&dest->weight),
77726 atomic_read(&dest->activeconns),
77727 atomic_read(&dest->inactconns));
4c928ab7 77728@@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
8308f9c9
MT
77729
77730 entry.addr = dest->addr.ip;
77731 entry.port = dest->port;
77732- entry.conn_flags = atomic_read(&dest->conn_flags);
77733+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
77734 entry.weight = atomic_read(&dest->weight);
77735 entry.u_threshold = dest->u_threshold;
77736 entry.l_threshold = dest->l_threshold;
4c928ab7 77737@@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
8308f9c9
MT
77738 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
77739
77740 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
77741- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
77742+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
77743 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
77744 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
77745 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
fe2de317 77746diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
5e856224 77747index 8a0d6d6..90ec197 100644
fe2de317
MT
77748--- a/net/netfilter/ipvs/ip_vs_sync.c
77749+++ b/net/netfilter/ipvs/ip_vs_sync.c
6e9df6a3 77750@@ -649,7 +649,7 @@ control:
66a7e928
MT
77751 * i.e only increment in_pkts for Templates.
77752 */
77753 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
77754- int pkts = atomic_add_return(1, &cp->in_pkts);
77755+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
77756
77757 if (pkts % sysctl_sync_period(ipvs) != 1)
77758 return;
fe2de317 77759@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
66a7e928
MT
77760
77761 if (opt)
77762 memcpy(&cp->in_seq, opt, sizeof(*opt));
77763- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
77764+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
77765 cp->state = state;
77766 cp->old_state = cp->state;
77767 /*
fe2de317 77768diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
5e856224 77769index 7fd66de..e6fb361 100644
fe2de317
MT
77770--- a/net/netfilter/ipvs/ip_vs_xmit.c
77771+++ b/net/netfilter/ipvs/ip_vs_xmit.c
77772@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
8308f9c9
MT
77773 else
77774 rc = NF_ACCEPT;
77775 /* do not touch skb anymore */
77776- atomic_inc(&cp->in_pkts);
77777+ atomic_inc_unchecked(&cp->in_pkts);
77778 goto out;
77779 }
77780
fe2de317 77781@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
8308f9c9
MT
77782 else
77783 rc = NF_ACCEPT;
77784 /* do not touch skb anymore */
77785- atomic_inc(&cp->in_pkts);
77786+ atomic_inc_unchecked(&cp->in_pkts);
77787 goto out;
77788 }
77789
fe2de317 77790diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
4c928ab7 77791index 66b2c54..c7884e3 100644
fe2de317
MT
77792--- a/net/netfilter/nfnetlink_log.c
77793+++ b/net/netfilter/nfnetlink_log.c
8308f9c9
MT
77794@@ -70,7 +70,7 @@ struct nfulnl_instance {
77795 };
77796
77797 static DEFINE_SPINLOCK(instances_lock);
77798-static atomic_t global_seq;
77799+static atomic_unchecked_t global_seq;
77800
77801 #define INSTANCE_BUCKETS 16
77802 static struct hlist_head instance_table[INSTANCE_BUCKETS];
4c928ab7 77803@@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
8308f9c9
MT
77804 /* global sequence number */
77805 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
77806 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
77807- htonl(atomic_inc_return(&global_seq)));
77808+ htonl(atomic_inc_return_unchecked(&global_seq)));
77809
77810 if (data_len) {
77811 struct nlattr *nla;
fe2de317
MT
77812diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
77813new file mode 100644
77814index 0000000..6905327
77815--- /dev/null
77816+++ b/net/netfilter/xt_gradm.c
6892158b
MT
77817@@ -0,0 +1,51 @@
77818+/*
77819+ * gradm match for netfilter
77820