]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9.1-3.4.6-201207242237.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.4.6-201207242237.patch
CommitLineData
d474a772
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b4a898f..781c7ad 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,6 +75,7 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52@@ -80,6 +87,7 @@ aic7*seq.h*
53 aicasm
54 aicdb.h*
55 altivec*.c
56+ashldi3.S
57 asm-offsets.h
58 asm_offsets.h
59 autoconf.h*
60@@ -92,19 +100,24 @@ bounds.h
61 bsetup
62 btfixupprep
63 build
64+builtin-policy.h
65 bvmlinux
66 bzImage*
67 capability_names.h
68 capflags.c
69 classlist.h*
70+clut_vga16.c
71+common-cmds.h
72 comp*.log
73 compile.h*
74 conf
75 config
76 config-*
77 config_data.h*
78+config.c
79 config.mak
80 config.mak.autogen
81+config.tmp
82 conmakehash
83 consolemap_deftbl.c*
84 cpustr.h
85@@ -115,9 +128,11 @@ devlist.h*
86 dnotify_test
87 docproc
88 dslm
89+dtc-lexer.lex.c
90 elf2ecoff
91 elfconfig.h*
92 evergreen_reg_safe.h
93+exception_policy.conf
94 fixdep
95 flask.h
96 fore200e_mkfirm
97@@ -125,12 +140,15 @@ fore200e_pca_fw.c*
98 gconf
99 gconf.glade.h
100 gen-devlist
101+gen-kdb_cmds.c
102 gen_crc32table
103 gen_init_cpio
104 generated
105 genheaders
106 genksyms
107 *_gray256.c
108+hash
109+hid-example
110 hpet_example
111 hugepage-mmap
112 hugepage-shm
113@@ -145,7 +163,7 @@ int32.c
114 int4.c
115 int8.c
116 kallsyms
117-kconfig
118+kern_constants.h
119 keywords.c
120 ksym.c*
121 ksym.h*
122@@ -153,7 +171,7 @@ kxgettext
123 lkc_defs.h
124 lex.c
125 lex.*.c
126-linux
127+lib1funcs.S
128 logo_*.c
129 logo_*_clut224.c
130 logo_*_mono.c
131@@ -164,14 +182,15 @@ machtypes.h
132 map
133 map_hugetlb
134 maui_boot.h
135-media
136 mconf
137+mdp
138 miboot*
139 mk_elfconfig
140 mkboot
141 mkbugboot
142 mkcpustr
143 mkdep
144+mkpiggy
145 mkprep
146 mkregtable
147 mktables
148@@ -188,6 +207,7 @@ oui.c*
149 page-types
150 parse.c
151 parse.h
152+parse-events*
153 patches*
154 pca200e.bin
155 pca200e_ecd.bin2
156@@ -197,6 +217,7 @@ perf-archive
157 piggyback
158 piggy.gzip
159 piggy.S
160+pmu-*
161 pnmtologo
162 ppc_defs.h*
163 pss_boot.h
164@@ -207,6 +228,7 @@ r300_reg_safe.h
165 r420_reg_safe.h
166 r600_reg_safe.h
167 recordmcount
168+regdb.c
169 relocs
170 rlim_names.h
171 rn50_reg_safe.h
172@@ -216,7 +238,9 @@ series
173 setup
174 setup.bin
175 setup.elf
176+size_overflow_hash.h
177 sImage
178+slabinfo
179 sm_tbl*
180 split-include
181 syscalltab.h
182@@ -227,6 +251,7 @@ tftpboot.img
183 timeconst.h
184 times.h*
185 trix_boot.h
186+user_constants.h
187 utsrelease.h*
188 vdso-syms.lds
189 vdso.lds
190@@ -238,13 +263,17 @@ vdso32.lds
191 vdso32.so.dbg
192 vdso64.lds
193 vdso64.so.dbg
194+vdsox32.lds
195+vdsox32-syms.lds
196 version.h*
197 vmImage
198 vmlinux
199 vmlinux-*
200 vmlinux.aout
201 vmlinux.bin.all
202+vmlinux.bin.bz2
203 vmlinux.lds
204+vmlinux.relocs
205 vmlinuz
206 voffset.h
207 vsyscall.lds
208@@ -252,9 +281,11 @@ vsyscall_32.lds
209 wanxlfw.inc
210 uImage
211 unifdef
212+utsrelease.h
213 wakeup.bin
214 wakeup.elf
215 wakeup.lds
216 zImage*
217 zconf.hash.c
218+zconf.lex.c
219 zoffset.h
220diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
221index c1601e5..08557ce 100644
222--- a/Documentation/kernel-parameters.txt
223+++ b/Documentation/kernel-parameters.txt
224@@ -2021,6 +2021,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
225 the specified number of seconds. This is to be used if
226 your oopses keep scrolling off the screen.
227
228+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
229+ virtualization environments that don't cope well with the
230+ expand down segment used by UDEREF on X86-32 or the frequent
231+ page table updates on X86-64.
232+
233+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
234+
235 pcbit= [HW,ISDN]
236
237 pcd. [PARIDE]
238diff --git a/Makefile b/Makefile
239index 5d0edcb..121c424 100644
240--- a/Makefile
241+++ b/Makefile
242@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
243
244 HOSTCC = gcc
245 HOSTCXX = g++
246-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
247-HOSTCXXFLAGS = -O2
248+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
249+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
250+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
251
252 # Decide whether to build built-in, modular, or both.
253 # Normally, just do built-in.
254@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
255 # Rules shared between *config targets and build targets
256
257 # Basic helpers built in scripts/
258-PHONY += scripts_basic
259-scripts_basic:
260+PHONY += scripts_basic gcc-plugins
261+scripts_basic: gcc-plugins
262 $(Q)$(MAKE) $(build)=scripts/basic
263 $(Q)rm -f .tmp_quiet_recordmcount
264
265@@ -564,6 +565,60 @@ else
266 KBUILD_CFLAGS += -O2
267 endif
268
269+ifndef DISABLE_PAX_PLUGINS
270+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
271+ifneq ($(PLUGINCC),)
272+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
273+ifndef CONFIG_UML
274+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
275+endif
276+endif
277+ifdef CONFIG_PAX_MEMORY_STACKLEAK
278+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
279+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
280+endif
281+ifdef CONFIG_KALLOCSTAT_PLUGIN
282+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
283+endif
284+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
285+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
286+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
287+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
288+endif
289+ifdef CONFIG_CHECKER_PLUGIN
290+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
291+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
292+endif
293+endif
294+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
295+ifdef CONFIG_PAX_SIZE_OVERFLOW
296+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
297+endif
298+ifdef CONFIG_PAX_LATENT_ENTROPY
299+LATENT_ENTROPY := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so
300+endif
301+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
302+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
303+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY)
304+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
305+export PLUGINCC CONSTIFY_PLUGIN
306+ifeq ($(KBUILD_EXTMOD),)
307+gcc-plugins:
308+ $(Q)$(MAKE) $(build)=tools/gcc
309+else
310+gcc-plugins: ;
311+endif
312+else
313+gcc-plugins:
314+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
315+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
316+else
317+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
318+endif
319+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
320+endif
321+endif
322+
323 include $(srctree)/arch/$(SRCARCH)/Makefile
324
325 ifneq ($(CONFIG_FRAME_WARN),0)
326@@ -708,7 +763,7 @@ export mod_strip_cmd
327
328
329 ifeq ($(KBUILD_EXTMOD),)
330-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
331+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
332
333 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
334 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
335@@ -932,6 +987,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
336
337 # The actual objects are generated when descending,
338 # make sure no implicit rule kicks in
339+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
340+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
341 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
342
343 # Handle descending into subdirectories listed in $(vmlinux-dirs)
344@@ -941,7 +998,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
345 # Error messages still appears in the original language
346
347 PHONY += $(vmlinux-dirs)
348-$(vmlinux-dirs): prepare scripts
349+$(vmlinux-dirs): gcc-plugins prepare scripts
350 $(Q)$(MAKE) $(build)=$@
351
352 # Store (new) KERNELRELASE string in include/config/kernel.release
353@@ -985,6 +1042,7 @@ prepare0: archprepare FORCE
354 $(Q)$(MAKE) $(build)=.
355
356 # All the preparing..
357+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
358 prepare: prepare0
359
360 # Generate some files
361@@ -1092,6 +1150,8 @@ all: modules
362 # using awk while concatenating to the final file.
363
364 PHONY += modules
365+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
366+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
367 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
368 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
369 @$(kecho) ' Building modules, stage 2.';
370@@ -1107,7 +1167,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
371
372 # Target to prepare building external modules
373 PHONY += modules_prepare
374-modules_prepare: prepare scripts
375+modules_prepare: gcc-plugins prepare scripts
376
377 # Target to install modules
378 PHONY += modules_install
379@@ -1166,7 +1226,7 @@ CLEAN_FILES += vmlinux System.map \
380 MRPROPER_DIRS += include/config usr/include include/generated \
381 arch/*/include/generated
382 MRPROPER_FILES += .config .config.old .version .old_version \
383- include/linux/version.h \
384+ include/linux/version.h tools/gcc/size_overflow_hash.h\
385 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
386
387 # clean - Delete most, but leave enough to build external modules
388@@ -1204,6 +1264,7 @@ distclean: mrproper
389 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
390 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
391 -o -name '.*.rej' \
392+ -o -name '.*.rej' -o -name '*.so' \
393 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
394 -type f -print | xargs rm -f
395
396@@ -1364,6 +1425,8 @@ PHONY += $(module-dirs) modules
397 $(module-dirs): crmodverdir $(objtree)/Module.symvers
398 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
399
400+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
401+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
402 modules: $(module-dirs)
403 @$(kecho) ' Building modules, stage 2.';
404 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
405@@ -1490,17 +1553,21 @@ else
406 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
407 endif
408
409-%.s: %.c prepare scripts FORCE
410+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
411+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
412+%.s: %.c gcc-plugins prepare scripts FORCE
413 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
414 %.i: %.c prepare scripts FORCE
415 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
416-%.o: %.c prepare scripts FORCE
417+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
418+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
419+%.o: %.c gcc-plugins prepare scripts FORCE
420 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
421 %.lst: %.c prepare scripts FORCE
422 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
423-%.s: %.S prepare scripts FORCE
424+%.s: %.S gcc-plugins prepare scripts FORCE
425 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
426-%.o: %.S prepare scripts FORCE
427+%.o: %.S gcc-plugins prepare scripts FORCE
428 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
429 %.symtypes: %.c prepare scripts FORCE
430 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
431@@ -1510,11 +1577,15 @@ endif
432 $(cmd_crmodverdir)
433 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
434 $(build)=$(build-dir)
435-%/: prepare scripts FORCE
436+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
437+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
438+%/: gcc-plugins prepare scripts FORCE
439 $(cmd_crmodverdir)
440 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
441 $(build)=$(build-dir)
442-%.ko: prepare scripts FORCE
443+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
444+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
445+%.ko: gcc-plugins prepare scripts FORCE
446 $(cmd_crmodverdir)
447 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
448 $(build)=$(build-dir) $(@:.ko=.o)
449diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
450index 3bb7ffe..347a54c 100644
451--- a/arch/alpha/include/asm/atomic.h
452+++ b/arch/alpha/include/asm/atomic.h
453@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
454 #define atomic_dec(v) atomic_sub(1,(v))
455 #define atomic64_dec(v) atomic64_sub(1,(v))
456
457+#define atomic64_read_unchecked(v) atomic64_read(v)
458+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
459+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
460+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
461+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
462+#define atomic64_inc_unchecked(v) atomic64_inc(v)
463+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
464+#define atomic64_dec_unchecked(v) atomic64_dec(v)
465+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
466+
467 #define smp_mb__before_atomic_dec() smp_mb()
468 #define smp_mb__after_atomic_dec() smp_mb()
469 #define smp_mb__before_atomic_inc() smp_mb()
470diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
471index ad368a9..fbe0f25 100644
472--- a/arch/alpha/include/asm/cache.h
473+++ b/arch/alpha/include/asm/cache.h
474@@ -4,19 +4,19 @@
475 #ifndef __ARCH_ALPHA_CACHE_H
476 #define __ARCH_ALPHA_CACHE_H
477
478+#include <linux/const.h>
479
480 /* Bytes per L1 (data) cache line. */
481 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
482-# define L1_CACHE_BYTES 64
483 # define L1_CACHE_SHIFT 6
484 #else
485 /* Both EV4 and EV5 are write-through, read-allocate,
486 direct-mapped, physical.
487 */
488-# define L1_CACHE_BYTES 32
489 # define L1_CACHE_SHIFT 5
490 #endif
491
492+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
493 #define SMP_CACHE_BYTES L1_CACHE_BYTES
494
495 #endif
496diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
497index 968d999..d36b2df 100644
498--- a/arch/alpha/include/asm/elf.h
499+++ b/arch/alpha/include/asm/elf.h
500@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
501
502 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
503
504+#ifdef CONFIG_PAX_ASLR
505+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
506+
507+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
508+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
509+#endif
510+
511 /* $0 is set by ld.so to a pointer to a function which might be
512 registered using atexit. This provides a mean for the dynamic
513 linker to call DT_FINI functions for shared libraries that have
514diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
515index bc2a0da..8ad11ee 100644
516--- a/arch/alpha/include/asm/pgalloc.h
517+++ b/arch/alpha/include/asm/pgalloc.h
518@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
519 pgd_set(pgd, pmd);
520 }
521
522+static inline void
523+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
524+{
525+ pgd_populate(mm, pgd, pmd);
526+}
527+
528 extern pgd_t *pgd_alloc(struct mm_struct *mm);
529
530 static inline void
531diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
532index 81a4342..348b927 100644
533--- a/arch/alpha/include/asm/pgtable.h
534+++ b/arch/alpha/include/asm/pgtable.h
535@@ -102,6 +102,17 @@ struct vm_area_struct;
536 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
537 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
538 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
539+
540+#ifdef CONFIG_PAX_PAGEEXEC
541+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
542+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
543+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
544+#else
545+# define PAGE_SHARED_NOEXEC PAGE_SHARED
546+# define PAGE_COPY_NOEXEC PAGE_COPY
547+# define PAGE_READONLY_NOEXEC PAGE_READONLY
548+#endif
549+
550 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
551
552 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
553diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
554index 2fd00b7..cfd5069 100644
555--- a/arch/alpha/kernel/module.c
556+++ b/arch/alpha/kernel/module.c
557@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
558
559 /* The small sections were sorted to the end of the segment.
560 The following should definitely cover them. */
561- gp = (u64)me->module_core + me->core_size - 0x8000;
562+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
563 got = sechdrs[me->arch.gotsecindex].sh_addr;
564
565 for (i = 0; i < n; i++) {
566diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
567index 49ee319..9ee7d14 100644
568--- a/arch/alpha/kernel/osf_sys.c
569+++ b/arch/alpha/kernel/osf_sys.c
570@@ -1146,7 +1146,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
571 /* At this point: (!vma || addr < vma->vm_end). */
572 if (limit - len < addr)
573 return -ENOMEM;
574- if (!vma || addr + len <= vma->vm_start)
575+ if (check_heap_stack_gap(vma, addr, len))
576 return addr;
577 addr = vma->vm_end;
578 vma = vma->vm_next;
579@@ -1182,6 +1182,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
580 merely specific addresses, but regions of memory -- perhaps
581 this feature should be incorporated into all ports? */
582
583+#ifdef CONFIG_PAX_RANDMMAP
584+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
585+#endif
586+
587 if (addr) {
588 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
589 if (addr != (unsigned long) -ENOMEM)
590@@ -1189,8 +1193,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
591 }
592
593 /* Next, try allocating at TASK_UNMAPPED_BASE. */
594- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
595- len, limit);
596+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
597+
598 if (addr != (unsigned long) -ENOMEM)
599 return addr;
600
601diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
602index 5eecab1..609abc0 100644
603--- a/arch/alpha/mm/fault.c
604+++ b/arch/alpha/mm/fault.c
605@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
606 __reload_thread(pcb);
607 }
608
609+#ifdef CONFIG_PAX_PAGEEXEC
610+/*
611+ * PaX: decide what to do with offenders (regs->pc = fault address)
612+ *
613+ * returns 1 when task should be killed
614+ * 2 when patched PLT trampoline was detected
615+ * 3 when unpatched PLT trampoline was detected
616+ */
617+static int pax_handle_fetch_fault(struct pt_regs *regs)
618+{
619+
620+#ifdef CONFIG_PAX_EMUPLT
621+ int err;
622+
623+ do { /* PaX: patched PLT emulation #1 */
624+ unsigned int ldah, ldq, jmp;
625+
626+ err = get_user(ldah, (unsigned int *)regs->pc);
627+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
628+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
629+
630+ if (err)
631+ break;
632+
633+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
634+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
635+ jmp == 0x6BFB0000U)
636+ {
637+ unsigned long r27, addr;
638+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
639+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
640+
641+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
642+ err = get_user(r27, (unsigned long *)addr);
643+ if (err)
644+ break;
645+
646+ regs->r27 = r27;
647+ regs->pc = r27;
648+ return 2;
649+ }
650+ } while (0);
651+
652+ do { /* PaX: patched PLT emulation #2 */
653+ unsigned int ldah, lda, br;
654+
655+ err = get_user(ldah, (unsigned int *)regs->pc);
656+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
657+ err |= get_user(br, (unsigned int *)(regs->pc+8));
658+
659+ if (err)
660+ break;
661+
662+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
663+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
664+ (br & 0xFFE00000U) == 0xC3E00000U)
665+ {
666+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
667+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
668+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
669+
670+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
671+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
672+ return 2;
673+ }
674+ } while (0);
675+
676+ do { /* PaX: unpatched PLT emulation */
677+ unsigned int br;
678+
679+ err = get_user(br, (unsigned int *)regs->pc);
680+
681+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
682+ unsigned int br2, ldq, nop, jmp;
683+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
684+
685+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
686+ err = get_user(br2, (unsigned int *)addr);
687+ err |= get_user(ldq, (unsigned int *)(addr+4));
688+ err |= get_user(nop, (unsigned int *)(addr+8));
689+ err |= get_user(jmp, (unsigned int *)(addr+12));
690+ err |= get_user(resolver, (unsigned long *)(addr+16));
691+
692+ if (err)
693+ break;
694+
695+ if (br2 == 0xC3600000U &&
696+ ldq == 0xA77B000CU &&
697+ nop == 0x47FF041FU &&
698+ jmp == 0x6B7B0000U)
699+ {
700+ regs->r28 = regs->pc+4;
701+ regs->r27 = addr+16;
702+ regs->pc = resolver;
703+ return 3;
704+ }
705+ }
706+ } while (0);
707+#endif
708+
709+ return 1;
710+}
711+
712+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
713+{
714+ unsigned long i;
715+
716+ printk(KERN_ERR "PAX: bytes at PC: ");
717+ for (i = 0; i < 5; i++) {
718+ unsigned int c;
719+ if (get_user(c, (unsigned int *)pc+i))
720+ printk(KERN_CONT "???????? ");
721+ else
722+ printk(KERN_CONT "%08x ", c);
723+ }
724+ printk("\n");
725+}
726+#endif
727
728 /*
729 * This routine handles page faults. It determines the address,
730@@ -130,8 +248,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
731 good_area:
732 si_code = SEGV_ACCERR;
733 if (cause < 0) {
734- if (!(vma->vm_flags & VM_EXEC))
735+ if (!(vma->vm_flags & VM_EXEC)) {
736+
737+#ifdef CONFIG_PAX_PAGEEXEC
738+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
739+ goto bad_area;
740+
741+ up_read(&mm->mmap_sem);
742+ switch (pax_handle_fetch_fault(regs)) {
743+
744+#ifdef CONFIG_PAX_EMUPLT
745+ case 2:
746+ case 3:
747+ return;
748+#endif
749+
750+ }
751+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
752+ do_group_exit(SIGKILL);
753+#else
754 goto bad_area;
755+#endif
756+
757+ }
758 } else if (!cause) {
759 /* Allow reads even for write-only mappings */
760 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
761diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
762index 68374ba..cff7196 100644
763--- a/arch/arm/include/asm/atomic.h
764+++ b/arch/arm/include/asm/atomic.h
765@@ -17,17 +17,35 @@
766 #include <asm/barrier.h>
767 #include <asm/cmpxchg.h>
768
769+#ifdef CONFIG_GENERIC_ATOMIC64
770+#include <asm-generic/atomic64.h>
771+#endif
772+
773 #define ATOMIC_INIT(i) { (i) }
774
775 #ifdef __KERNEL__
776
777+#define _ASM_EXTABLE(from, to) \
778+" .pushsection __ex_table,\"a\"\n"\
779+" .align 3\n" \
780+" .long " #from ", " #to"\n" \
781+" .popsection"
782+
783 /*
784 * On ARM, ordinary assignment (str instruction) doesn't clear the local
785 * strex/ldrex monitor on some implementations. The reason we can use it for
786 * atomic_set() is the clrex or dummy strex done on every exception return.
787 */
788 #define atomic_read(v) (*(volatile int *)&(v)->counter)
789+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
790+{
791+ return v->counter;
792+}
793 #define atomic_set(v,i) (((v)->counter) = (i))
794+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
795+{
796+ v->counter = i;
797+}
798
799 #if __LINUX_ARM_ARCH__ >= 6
800
801@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
802 int result;
803
804 __asm__ __volatile__("@ atomic_add\n"
805+"1: ldrex %1, [%3]\n"
806+" adds %0, %1, %4\n"
807+
808+#ifdef CONFIG_PAX_REFCOUNT
809+" bvc 3f\n"
810+"2: bkpt 0xf103\n"
811+"3:\n"
812+#endif
813+
814+" strex %1, %0, [%3]\n"
815+" teq %1, #0\n"
816+" bne 1b"
817+
818+#ifdef CONFIG_PAX_REFCOUNT
819+"\n4:\n"
820+ _ASM_EXTABLE(2b, 4b)
821+#endif
822+
823+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
824+ : "r" (&v->counter), "Ir" (i)
825+ : "cc");
826+}
827+
828+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
829+{
830+ unsigned long tmp;
831+ int result;
832+
833+ __asm__ __volatile__("@ atomic_add_unchecked\n"
834 "1: ldrex %0, [%3]\n"
835 " add %0, %0, %4\n"
836 " strex %1, %0, [%3]\n"
837@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
838 smp_mb();
839
840 __asm__ __volatile__("@ atomic_add_return\n"
841+"1: ldrex %1, [%3]\n"
842+" adds %0, %1, %4\n"
843+
844+#ifdef CONFIG_PAX_REFCOUNT
845+" bvc 3f\n"
846+" mov %0, %1\n"
847+"2: bkpt 0xf103\n"
848+"3:\n"
849+#endif
850+
851+" strex %1, %0, [%3]\n"
852+" teq %1, #0\n"
853+" bne 1b"
854+
855+#ifdef CONFIG_PAX_REFCOUNT
856+"\n4:\n"
857+ _ASM_EXTABLE(2b, 4b)
858+#endif
859+
860+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
861+ : "r" (&v->counter), "Ir" (i)
862+ : "cc");
863+
864+ smp_mb();
865+
866+ return result;
867+}
868+
869+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
870+{
871+ unsigned long tmp;
872+ int result;
873+
874+ smp_mb();
875+
876+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
877 "1: ldrex %0, [%3]\n"
878 " add %0, %0, %4\n"
879 " strex %1, %0, [%3]\n"
880@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
881 int result;
882
883 __asm__ __volatile__("@ atomic_sub\n"
884+"1: ldrex %1, [%3]\n"
885+" subs %0, %1, %4\n"
886+
887+#ifdef CONFIG_PAX_REFCOUNT
888+" bvc 3f\n"
889+"2: bkpt 0xf103\n"
890+"3:\n"
891+#endif
892+
893+" strex %1, %0, [%3]\n"
894+" teq %1, #0\n"
895+" bne 1b"
896+
897+#ifdef CONFIG_PAX_REFCOUNT
898+"\n4:\n"
899+ _ASM_EXTABLE(2b, 4b)
900+#endif
901+
902+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
903+ : "r" (&v->counter), "Ir" (i)
904+ : "cc");
905+}
906+
907+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
908+{
909+ unsigned long tmp;
910+ int result;
911+
912+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
913 "1: ldrex %0, [%3]\n"
914 " sub %0, %0, %4\n"
915 " strex %1, %0, [%3]\n"
916@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
917 smp_mb();
918
919 __asm__ __volatile__("@ atomic_sub_return\n"
920-"1: ldrex %0, [%3]\n"
921-" sub %0, %0, %4\n"
922+"1: ldrex %1, [%3]\n"
923+" sub %0, %1, %4\n"
924+
925+#ifdef CONFIG_PAX_REFCOUNT
926+" bvc 3f\n"
927+" mov %0, %1\n"
928+"2: bkpt 0xf103\n"
929+"3:\n"
930+#endif
931+
932 " strex %1, %0, [%3]\n"
933 " teq %1, #0\n"
934 " bne 1b"
935+
936+#ifdef CONFIG_PAX_REFCOUNT
937+"\n4:\n"
938+ _ASM_EXTABLE(2b, 4b)
939+#endif
940+
941 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
942 : "r" (&v->counter), "Ir" (i)
943 : "cc");
944@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
945 return oldval;
946 }
947
948+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
949+{
950+ unsigned long oldval, res;
951+
952+ smp_mb();
953+
954+ do {
955+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
956+ "ldrex %1, [%3]\n"
957+ "mov %0, #0\n"
958+ "teq %1, %4\n"
959+ "strexeq %0, %5, [%3]\n"
960+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
961+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
962+ : "cc");
963+ } while (res);
964+
965+ smp_mb();
966+
967+ return oldval;
968+}
969+
970 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
971 {
972 unsigned long tmp, tmp2;
973@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
974
975 return val;
976 }
977+
978+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
979+{
980+ return atomic_add_return(i, v);
981+}
982+
983 #define atomic_add(i, v) (void) atomic_add_return(i, v)
984+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
985+{
986+ (void) atomic_add_return(i, v);
987+}
988
989 static inline int atomic_sub_return(int i, atomic_t *v)
990 {
991@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
992 return val;
993 }
994 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
995+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
996+{
997+ (void) atomic_sub_return(i, v);
998+}
999
1000 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1001 {
1002@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1003 return ret;
1004 }
1005
1006+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1007+{
1008+ return atomic_cmpxchg(v, old, new);
1009+}
1010+
1011 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1012 {
1013 unsigned long flags;
1014@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1015 #endif /* __LINUX_ARM_ARCH__ */
1016
1017 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1018+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1019+{
1020+ return xchg(&v->counter, new);
1021+}
1022
1023 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1024 {
1025@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1026 }
1027
1028 #define atomic_inc(v) atomic_add(1, v)
1029+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1030+{
1031+ atomic_add_unchecked(1, v);
1032+}
1033 #define atomic_dec(v) atomic_sub(1, v)
1034+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1035+{
1036+ atomic_sub_unchecked(1, v);
1037+}
1038
1039 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1040+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1041+{
1042+ return atomic_add_return_unchecked(1, v) == 0;
1043+}
1044 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1045 #define atomic_inc_return(v) (atomic_add_return(1, v))
1046+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1047+{
1048+ return atomic_add_return_unchecked(1, v);
1049+}
1050 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1051 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1052
1053@@ -241,6 +428,14 @@ typedef struct {
1054 u64 __aligned(8) counter;
1055 } atomic64_t;
1056
1057+#ifdef CONFIG_PAX_REFCOUNT
1058+typedef struct {
1059+ u64 __aligned(8) counter;
1060+} atomic64_unchecked_t;
1061+#else
1062+typedef atomic64_t atomic64_unchecked_t;
1063+#endif
1064+
1065 #define ATOMIC64_INIT(i) { (i) }
1066
1067 static inline u64 atomic64_read(atomic64_t *v)
1068@@ -256,6 +451,19 @@ static inline u64 atomic64_read(atomic64_t *v)
1069 return result;
1070 }
1071
1072+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1073+{
1074+ u64 result;
1075+
1076+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1077+" ldrexd %0, %H0, [%1]"
1078+ : "=&r" (result)
1079+ : "r" (&v->counter), "Qo" (v->counter)
1080+ );
1081+
1082+ return result;
1083+}
1084+
1085 static inline void atomic64_set(atomic64_t *v, u64 i)
1086 {
1087 u64 tmp;
1088@@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1089 : "cc");
1090 }
1091
1092+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1093+{
1094+ u64 tmp;
1095+
1096+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1097+"1: ldrexd %0, %H0, [%2]\n"
1098+" strexd %0, %3, %H3, [%2]\n"
1099+" teq %0, #0\n"
1100+" bne 1b"
1101+ : "=&r" (tmp), "=Qo" (v->counter)
1102+ : "r" (&v->counter), "r" (i)
1103+ : "cc");
1104+}
1105+
1106 static inline void atomic64_add(u64 i, atomic64_t *v)
1107 {
1108 u64 result;
1109@@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1110 __asm__ __volatile__("@ atomic64_add\n"
1111 "1: ldrexd %0, %H0, [%3]\n"
1112 " adds %0, %0, %4\n"
1113+" adcs %H0, %H0, %H4\n"
1114+
1115+#ifdef CONFIG_PAX_REFCOUNT
1116+" bvc 3f\n"
1117+"2: bkpt 0xf103\n"
1118+"3:\n"
1119+#endif
1120+
1121+" strexd %1, %0, %H0, [%3]\n"
1122+" teq %1, #0\n"
1123+" bne 1b"
1124+
1125+#ifdef CONFIG_PAX_REFCOUNT
1126+"\n4:\n"
1127+ _ASM_EXTABLE(2b, 4b)
1128+#endif
1129+
1130+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1131+ : "r" (&v->counter), "r" (i)
1132+ : "cc");
1133+}
1134+
1135+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1136+{
1137+ u64 result;
1138+ unsigned long tmp;
1139+
1140+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1141+"1: ldrexd %0, %H0, [%3]\n"
1142+" adds %0, %0, %4\n"
1143 " adc %H0, %H0, %H4\n"
1144 " strexd %1, %0, %H0, [%3]\n"
1145 " teq %1, #0\n"
1146@@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1147
1148 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1149 {
1150- u64 result;
1151- unsigned long tmp;
1152+ u64 result, tmp;
1153
1154 smp_mb();
1155
1156 __asm__ __volatile__("@ atomic64_add_return\n"
1157+"1: ldrexd %1, %H1, [%3]\n"
1158+" adds %0, %1, %4\n"
1159+" adcs %H0, %H1, %H4\n"
1160+
1161+#ifdef CONFIG_PAX_REFCOUNT
1162+" bvc 3f\n"
1163+" mov %0, %1\n"
1164+" mov %H0, %H1\n"
1165+"2: bkpt 0xf103\n"
1166+"3:\n"
1167+#endif
1168+
1169+" strexd %1, %0, %H0, [%3]\n"
1170+" teq %1, #0\n"
1171+" bne 1b"
1172+
1173+#ifdef CONFIG_PAX_REFCOUNT
1174+"\n4:\n"
1175+ _ASM_EXTABLE(2b, 4b)
1176+#endif
1177+
1178+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1179+ : "r" (&v->counter), "r" (i)
1180+ : "cc");
1181+
1182+ smp_mb();
1183+
1184+ return result;
1185+}
1186+
1187+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1188+{
1189+ u64 result;
1190+ unsigned long tmp;
1191+
1192+ smp_mb();
1193+
1194+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1195 "1: ldrexd %0, %H0, [%3]\n"
1196 " adds %0, %0, %4\n"
1197 " adc %H0, %H0, %H4\n"
1198@@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1199 __asm__ __volatile__("@ atomic64_sub\n"
1200 "1: ldrexd %0, %H0, [%3]\n"
1201 " subs %0, %0, %4\n"
1202+" sbcs %H0, %H0, %H4\n"
1203+
1204+#ifdef CONFIG_PAX_REFCOUNT
1205+" bvc 3f\n"
1206+"2: bkpt 0xf103\n"
1207+"3:\n"
1208+#endif
1209+
1210+" strexd %1, %0, %H0, [%3]\n"
1211+" teq %1, #0\n"
1212+" bne 1b"
1213+
1214+#ifdef CONFIG_PAX_REFCOUNT
1215+"\n4:\n"
1216+ _ASM_EXTABLE(2b, 4b)
1217+#endif
1218+
1219+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1220+ : "r" (&v->counter), "r" (i)
1221+ : "cc");
1222+}
1223+
1224+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1225+{
1226+ u64 result;
1227+ unsigned long tmp;
1228+
1229+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1230+"1: ldrexd %0, %H0, [%3]\n"
1231+" subs %0, %0, %4\n"
1232 " sbc %H0, %H0, %H4\n"
1233 " strexd %1, %0, %H0, [%3]\n"
1234 " teq %1, #0\n"
1235@@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1236
1237 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1238 {
1239- u64 result;
1240- unsigned long tmp;
1241+ u64 result, tmp;
1242
1243 smp_mb();
1244
1245 __asm__ __volatile__("@ atomic64_sub_return\n"
1246-"1: ldrexd %0, %H0, [%3]\n"
1247-" subs %0, %0, %4\n"
1248-" sbc %H0, %H0, %H4\n"
1249+"1: ldrexd %1, %H1, [%3]\n"
1250+" subs %0, %1, %4\n"
1251+" sbc %H0, %H1, %H4\n"
1252+
1253+#ifdef CONFIG_PAX_REFCOUNT
1254+" bvc 3f\n"
1255+" mov %0, %1\n"
1256+" mov %H0, %H1\n"
1257+"2: bkpt 0xf103\n"
1258+"3:\n"
1259+#endif
1260+
1261 " strexd %1, %0, %H0, [%3]\n"
1262 " teq %1, #0\n"
1263 " bne 1b"
1264+
1265+#ifdef CONFIG_PAX_REFCOUNT
1266+"\n4:\n"
1267+ _ASM_EXTABLE(2b, 4b)
1268+#endif
1269+
1270 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1271 : "r" (&v->counter), "r" (i)
1272 : "cc");
1273@@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1274 return oldval;
1275 }
1276
1277+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1278+{
1279+ u64 oldval;
1280+ unsigned long res;
1281+
1282+ smp_mb();
1283+
1284+ do {
1285+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1286+ "ldrexd %1, %H1, [%3]\n"
1287+ "mov %0, #0\n"
1288+ "teq %1, %4\n"
1289+ "teqeq %H1, %H4\n"
1290+ "strexdeq %0, %5, %H5, [%3]"
1291+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1292+ : "r" (&ptr->counter), "r" (old), "r" (new)
1293+ : "cc");
1294+ } while (res);
1295+
1296+ smp_mb();
1297+
1298+ return oldval;
1299+}
1300+
1301 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1302 {
1303 u64 result;
1304@@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1305
1306 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1307 {
1308- u64 result;
1309- unsigned long tmp;
1310+ u64 result, tmp;
1311
1312 smp_mb();
1313
1314 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1315-"1: ldrexd %0, %H0, [%3]\n"
1316-" subs %0, %0, #1\n"
1317-" sbc %H0, %H0, #0\n"
1318+"1: ldrexd %1, %H1, [%3]\n"
1319+" subs %0, %1, #1\n"
1320+" sbc %H0, %H1, #0\n"
1321+
1322+#ifdef CONFIG_PAX_REFCOUNT
1323+" bvc 3f\n"
1324+" mov %0, %1\n"
1325+" mov %H0, %H1\n"
1326+"2: bkpt 0xf103\n"
1327+"3:\n"
1328+#endif
1329+
1330 " teq %H0, #0\n"
1331-" bmi 2f\n"
1332+" bmi 4f\n"
1333 " strexd %1, %0, %H0, [%3]\n"
1334 " teq %1, #0\n"
1335 " bne 1b\n"
1336-"2:"
1337+"4:\n"
1338+
1339+#ifdef CONFIG_PAX_REFCOUNT
1340+ _ASM_EXTABLE(2b, 4b)
1341+#endif
1342+
1343 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1344 : "r" (&v->counter)
1345 : "cc");
1346@@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1347 " teq %0, %5\n"
1348 " teqeq %H0, %H5\n"
1349 " moveq %1, #0\n"
1350-" beq 2f\n"
1351+" beq 4f\n"
1352 " adds %0, %0, %6\n"
1353 " adc %H0, %H0, %H6\n"
1354+
1355+#ifdef CONFIG_PAX_REFCOUNT
1356+" bvc 3f\n"
1357+"2: bkpt 0xf103\n"
1358+"3:\n"
1359+#endif
1360+
1361 " strexd %2, %0, %H0, [%4]\n"
1362 " teq %2, #0\n"
1363 " bne 1b\n"
1364-"2:"
1365+"4:\n"
1366+
1367+#ifdef CONFIG_PAX_REFCOUNT
1368+ _ASM_EXTABLE(2b, 4b)
1369+#endif
1370+
1371 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1372 : "r" (&v->counter), "r" (u), "r" (a)
1373 : "cc");
1374@@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1375
1376 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1377 #define atomic64_inc(v) atomic64_add(1LL, (v))
1378+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1379 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1380+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1381 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1382 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1383 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1384+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1385 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1386 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1387 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1388diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1389index 75fe66b..2255c86 100644
1390--- a/arch/arm/include/asm/cache.h
1391+++ b/arch/arm/include/asm/cache.h
1392@@ -4,8 +4,10 @@
1393 #ifndef __ASMARM_CACHE_H
1394 #define __ASMARM_CACHE_H
1395
1396+#include <linux/const.h>
1397+
1398 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1399-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1400+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1401
1402 /*
1403 * Memory returned by kmalloc() may be used for DMA, so we must make
1404diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1405index 1252a26..9dc17b5 100644
1406--- a/arch/arm/include/asm/cacheflush.h
1407+++ b/arch/arm/include/asm/cacheflush.h
1408@@ -108,7 +108,7 @@ struct cpu_cache_fns {
1409 void (*dma_unmap_area)(const void *, size_t, int);
1410
1411 void (*dma_flush_range)(const void *, const void *);
1412-};
1413+} __no_const;
1414
1415 /*
1416 * Select the calling method
1417diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1418index d41d7cb..9bea5e0 100644
1419--- a/arch/arm/include/asm/cmpxchg.h
1420+++ b/arch/arm/include/asm/cmpxchg.h
1421@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1422
1423 #define xchg(ptr,x) \
1424 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1425+#define xchg_unchecked(ptr,x) \
1426+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1427
1428 #include <asm-generic/cmpxchg-local.h>
1429
1430diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1431index 38050b1..9d90e8b 100644
1432--- a/arch/arm/include/asm/elf.h
1433+++ b/arch/arm/include/asm/elf.h
1434@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1435 the loader. We need to make sure that it is out of the way of the program
1436 that it will "exec", and that there is sufficient room for the brk. */
1437
1438-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1439+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1440+
1441+#ifdef CONFIG_PAX_ASLR
1442+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1443+
1444+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1445+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1446+#endif
1447
1448 /* When the program starts, a1 contains a pointer to a function to be
1449 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1450@@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1451 extern void elf_set_personality(const struct elf32_hdr *);
1452 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1453
1454-struct mm_struct;
1455-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1456-#define arch_randomize_brk arch_randomize_brk
1457-
1458 #endif
1459diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1460index e51b1e8..32a3113 100644
1461--- a/arch/arm/include/asm/kmap_types.h
1462+++ b/arch/arm/include/asm/kmap_types.h
1463@@ -21,6 +21,7 @@ enum km_type {
1464 KM_L1_CACHE,
1465 KM_L2_CACHE,
1466 KM_KDB,
1467+ KM_CLEARPAGE,
1468 KM_TYPE_NR
1469 };
1470
1471diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1472index 53426c6..c7baff3 100644
1473--- a/arch/arm/include/asm/outercache.h
1474+++ b/arch/arm/include/asm/outercache.h
1475@@ -35,7 +35,7 @@ struct outer_cache_fns {
1476 #endif
1477 void (*set_debug)(unsigned long);
1478 void (*resume)(void);
1479-};
1480+} __no_const;
1481
1482 #ifdef CONFIG_OUTER_CACHE
1483
1484diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1485index 5838361..da6e813 100644
1486--- a/arch/arm/include/asm/page.h
1487+++ b/arch/arm/include/asm/page.h
1488@@ -123,7 +123,7 @@ struct cpu_user_fns {
1489 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1490 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1491 unsigned long vaddr, struct vm_area_struct *vma);
1492-};
1493+} __no_const;
1494
1495 #ifdef MULTI_USER
1496 extern struct cpu_user_fns cpu_user;
1497diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1498index 943504f..bf8d667 100644
1499--- a/arch/arm/include/asm/pgalloc.h
1500+++ b/arch/arm/include/asm/pgalloc.h
1501@@ -43,6 +43,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1502 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1503 }
1504
1505+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1506+{
1507+ pud_populate(mm, pud, pmd);
1508+}
1509+
1510 #else /* !CONFIG_ARM_LPAE */
1511
1512 /*
1513@@ -51,6 +56,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1514 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1515 #define pmd_free(mm, pmd) do { } while (0)
1516 #define pud_populate(mm,pmd,pte) BUG()
1517+#define pud_populate_kernel(mm,pmd,pte) BUG()
1518
1519 #endif /* CONFIG_ARM_LPAE */
1520
1521diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
1522index 0f04d84..2be5648 100644
1523--- a/arch/arm/include/asm/thread_info.h
1524+++ b/arch/arm/include/asm/thread_info.h
1525@@ -148,6 +148,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1526 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
1527 #define TIF_SYSCALL_TRACE 8
1528 #define TIF_SYSCALL_AUDIT 9
1529+
1530+/* within 8 bits of TIF_SYSCALL_TRACE
1531+ to meet flexible second operand requirements
1532+*/
1533+#define TIF_GRSEC_SETXID 10
1534+
1535 #define TIF_POLLING_NRFLAG 16
1536 #define TIF_USING_IWMMXT 17
1537 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
1538@@ -163,9 +169,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1539 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
1540 #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
1541 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
1542+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
1543
1544 /* Checks for any syscall work in entry-common.S */
1545-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
1546+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
1547+ _TIF_GRSEC_SETXID)
1548
1549 /*
1550 * Change these and you break ASM code in entry-common.S
1551diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1552index 71f6536..602f279 100644
1553--- a/arch/arm/include/asm/uaccess.h
1554+++ b/arch/arm/include/asm/uaccess.h
1555@@ -22,6 +22,8 @@
1556 #define VERIFY_READ 0
1557 #define VERIFY_WRITE 1
1558
1559+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1560+
1561 /*
1562 * The exception table consists of pairs of addresses: the first is the
1563 * address of an instruction that is allowed to fault, and the second is
1564@@ -387,8 +389,23 @@ do { \
1565
1566
1567 #ifdef CONFIG_MMU
1568-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1569-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1570+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1571+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1572+
1573+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1574+{
1575+ if (!__builtin_constant_p(n))
1576+ check_object_size(to, n, false);
1577+ return ___copy_from_user(to, from, n);
1578+}
1579+
1580+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1581+{
1582+ if (!__builtin_constant_p(n))
1583+ check_object_size(from, n, true);
1584+ return ___copy_to_user(to, from, n);
1585+}
1586+
1587 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1588 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1589 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1590@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1591
1592 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1593 {
1594+ if ((long)n < 0)
1595+ return n;
1596+
1597 if (access_ok(VERIFY_READ, from, n))
1598 n = __copy_from_user(to, from, n);
1599 else /* security hole - plug it */
1600@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1601
1602 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1603 {
1604+ if ((long)n < 0)
1605+ return n;
1606+
1607 if (access_ok(VERIFY_WRITE, to, n))
1608 n = __copy_to_user(to, from, n);
1609 return n;
1610diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1611index b57c75e..ed2d6b2 100644
1612--- a/arch/arm/kernel/armksyms.c
1613+++ b/arch/arm/kernel/armksyms.c
1614@@ -94,8 +94,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1615 #ifdef CONFIG_MMU
1616 EXPORT_SYMBOL(copy_page);
1617
1618-EXPORT_SYMBOL(__copy_from_user);
1619-EXPORT_SYMBOL(__copy_to_user);
1620+EXPORT_SYMBOL(___copy_from_user);
1621+EXPORT_SYMBOL(___copy_to_user);
1622 EXPORT_SYMBOL(__clear_user);
1623
1624 EXPORT_SYMBOL(__get_user_1);
1625diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1626index 2b7b017..c380fa2 100644
1627--- a/arch/arm/kernel/process.c
1628+++ b/arch/arm/kernel/process.c
1629@@ -28,7 +28,6 @@
1630 #include <linux/tick.h>
1631 #include <linux/utsname.h>
1632 #include <linux/uaccess.h>
1633-#include <linux/random.h>
1634 #include <linux/hw_breakpoint.h>
1635 #include <linux/cpuidle.h>
1636
1637@@ -275,9 +274,10 @@ void machine_power_off(void)
1638 machine_shutdown();
1639 if (pm_power_off)
1640 pm_power_off();
1641+ BUG();
1642 }
1643
1644-void machine_restart(char *cmd)
1645+__noreturn void machine_restart(char *cmd)
1646 {
1647 machine_shutdown();
1648
1649@@ -519,12 +519,6 @@ unsigned long get_wchan(struct task_struct *p)
1650 return 0;
1651 }
1652
1653-unsigned long arch_randomize_brk(struct mm_struct *mm)
1654-{
1655- unsigned long range_end = mm->brk + 0x02000000;
1656- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1657-}
1658-
1659 #ifdef CONFIG_MMU
1660 /*
1661 * The vectors page is always readable from user space for the
1662diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
1663index 9650c14..ae30cdd 100644
1664--- a/arch/arm/kernel/ptrace.c
1665+++ b/arch/arm/kernel/ptrace.c
1666@@ -906,10 +906,19 @@ long arch_ptrace(struct task_struct *child, long request,
1667 return ret;
1668 }
1669
1670+#ifdef CONFIG_GRKERNSEC_SETXID
1671+extern void gr_delayed_cred_worker(void);
1672+#endif
1673+
1674 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
1675 {
1676 unsigned long ip;
1677
1678+#ifdef CONFIG_GRKERNSEC_SETXID
1679+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
1680+ gr_delayed_cred_worker();
1681+#endif
1682+
1683 if (why)
1684 audit_syscall_exit(regs);
1685 else
1686diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1687index ebfac78..cbea9c0 100644
1688--- a/arch/arm/kernel/setup.c
1689+++ b/arch/arm/kernel/setup.c
1690@@ -111,13 +111,13 @@ struct processor processor __read_mostly;
1691 struct cpu_tlb_fns cpu_tlb __read_mostly;
1692 #endif
1693 #ifdef MULTI_USER
1694-struct cpu_user_fns cpu_user __read_mostly;
1695+struct cpu_user_fns cpu_user __read_only;
1696 #endif
1697 #ifdef MULTI_CACHE
1698-struct cpu_cache_fns cpu_cache __read_mostly;
1699+struct cpu_cache_fns cpu_cache __read_only;
1700 #endif
1701 #ifdef CONFIG_OUTER_CACHE
1702-struct outer_cache_fns outer_cache __read_mostly;
1703+struct outer_cache_fns outer_cache __read_only;
1704 EXPORT_SYMBOL(outer_cache);
1705 #endif
1706
1707diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1708index 63d402f..db1d714 100644
1709--- a/arch/arm/kernel/traps.c
1710+++ b/arch/arm/kernel/traps.c
1711@@ -264,6 +264,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
1712
1713 static DEFINE_RAW_SPINLOCK(die_lock);
1714
1715+extern void gr_handle_kernel_exploit(void);
1716+
1717 /*
1718 * This function is protected against re-entrancy.
1719 */
1720@@ -296,6 +298,9 @@ void die(const char *str, struct pt_regs *regs, int err)
1721 panic("Fatal exception in interrupt");
1722 if (panic_on_oops)
1723 panic("Fatal exception");
1724+
1725+ gr_handle_kernel_exploit();
1726+
1727 if (ret != NOTIFY_STOP)
1728 do_exit(SIGSEGV);
1729 }
1730diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1731index 66a477a..bee61d3 100644
1732--- a/arch/arm/lib/copy_from_user.S
1733+++ b/arch/arm/lib/copy_from_user.S
1734@@ -16,7 +16,7 @@
1735 /*
1736 * Prototype:
1737 *
1738- * size_t __copy_from_user(void *to, const void *from, size_t n)
1739+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
1740 *
1741 * Purpose:
1742 *
1743@@ -84,11 +84,11 @@
1744
1745 .text
1746
1747-ENTRY(__copy_from_user)
1748+ENTRY(___copy_from_user)
1749
1750 #include "copy_template.S"
1751
1752-ENDPROC(__copy_from_user)
1753+ENDPROC(___copy_from_user)
1754
1755 .pushsection .fixup,"ax"
1756 .align 0
1757diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1758index 6ee2f67..d1cce76 100644
1759--- a/arch/arm/lib/copy_page.S
1760+++ b/arch/arm/lib/copy_page.S
1761@@ -10,6 +10,7 @@
1762 * ASM optimised string functions
1763 */
1764 #include <linux/linkage.h>
1765+#include <linux/const.h>
1766 #include <asm/assembler.h>
1767 #include <asm/asm-offsets.h>
1768 #include <asm/cache.h>
1769diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1770index d066df6..df28194 100644
1771--- a/arch/arm/lib/copy_to_user.S
1772+++ b/arch/arm/lib/copy_to_user.S
1773@@ -16,7 +16,7 @@
1774 /*
1775 * Prototype:
1776 *
1777- * size_t __copy_to_user(void *to, const void *from, size_t n)
1778+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
1779 *
1780 * Purpose:
1781 *
1782@@ -88,11 +88,11 @@
1783 .text
1784
1785 ENTRY(__copy_to_user_std)
1786-WEAK(__copy_to_user)
1787+WEAK(___copy_to_user)
1788
1789 #include "copy_template.S"
1790
1791-ENDPROC(__copy_to_user)
1792+ENDPROC(___copy_to_user)
1793 ENDPROC(__copy_to_user_std)
1794
1795 .pushsection .fixup,"ax"
1796diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1797index 5c908b1..e712687 100644
1798--- a/arch/arm/lib/uaccess.S
1799+++ b/arch/arm/lib/uaccess.S
1800@@ -20,7 +20,7 @@
1801
1802 #define PAGE_SHIFT 12
1803
1804-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1805+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1806 * Purpose : copy a block to user memory from kernel memory
1807 * Params : to - user memory
1808 * : from - kernel memory
1809@@ -40,7 +40,7 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1810 sub r2, r2, ip
1811 b .Lc2u_dest_aligned
1812
1813-ENTRY(__copy_to_user)
1814+ENTRY(___copy_to_user)
1815 stmfd sp!, {r2, r4 - r7, lr}
1816 cmp r2, #4
1817 blt .Lc2u_not_enough
1818@@ -278,14 +278,14 @@ USER( TUSER( strgeb) r3, [r0], #1) @ May fault
1819 ldrgtb r3, [r1], #0
1820 USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1821 b .Lc2u_finished
1822-ENDPROC(__copy_to_user)
1823+ENDPROC(___copy_to_user)
1824
1825 .pushsection .fixup,"ax"
1826 .align 0
1827 9001: ldmfd sp!, {r0, r4 - r7, pc}
1828 .popsection
1829
1830-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1831+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1832 * Purpose : copy a block from user memory to kernel memory
1833 * Params : to - kernel memory
1834 * : from - user memory
1835@@ -304,7 +304,7 @@ USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1836 sub r2, r2, ip
1837 b .Lcfu_dest_aligned
1838
1839-ENTRY(__copy_from_user)
1840+ENTRY(___copy_from_user)
1841 stmfd sp!, {r0, r2, r4 - r7, lr}
1842 cmp r2, #4
1843 blt .Lcfu_not_enough
1844@@ -544,7 +544,7 @@ USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
1845 USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1846 strgtb r3, [r0], #1
1847 b .Lcfu_finished
1848-ENDPROC(__copy_from_user)
1849+ENDPROC(___copy_from_user)
1850
1851 .pushsection .fixup,"ax"
1852 .align 0
1853diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1854index 025f742..8432b08 100644
1855--- a/arch/arm/lib/uaccess_with_memcpy.c
1856+++ b/arch/arm/lib/uaccess_with_memcpy.c
1857@@ -104,7 +104,7 @@ out:
1858 }
1859
1860 unsigned long
1861-__copy_to_user(void __user *to, const void *from, unsigned long n)
1862+___copy_to_user(void __user *to, const void *from, unsigned long n)
1863 {
1864 /*
1865 * This test is stubbed out of the main function above to keep
1866diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
1867index 518091c..eae9a76 100644
1868--- a/arch/arm/mach-omap2/board-n8x0.c
1869+++ b/arch/arm/mach-omap2/board-n8x0.c
1870@@ -596,7 +596,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
1871 }
1872 #endif
1873
1874-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1875+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1876 .late_init = n8x0_menelaus_late_init,
1877 };
1878
1879diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1880index 5bb4835..4760f68 100644
1881--- a/arch/arm/mm/fault.c
1882+++ b/arch/arm/mm/fault.c
1883@@ -174,6 +174,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1884 }
1885 #endif
1886
1887+#ifdef CONFIG_PAX_PAGEEXEC
1888+ if (fsr & FSR_LNX_PF) {
1889+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1890+ do_group_exit(SIGKILL);
1891+ }
1892+#endif
1893+
1894 tsk->thread.address = addr;
1895 tsk->thread.error_code = fsr;
1896 tsk->thread.trap_no = 14;
1897@@ -397,6 +404,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1898 }
1899 #endif /* CONFIG_MMU */
1900
1901+#ifdef CONFIG_PAX_PAGEEXEC
1902+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1903+{
1904+ long i;
1905+
1906+ printk(KERN_ERR "PAX: bytes at PC: ");
1907+ for (i = 0; i < 20; i++) {
1908+ unsigned char c;
1909+ if (get_user(c, (__force unsigned char __user *)pc+i))
1910+ printk(KERN_CONT "?? ");
1911+ else
1912+ printk(KERN_CONT "%02x ", c);
1913+ }
1914+ printk("\n");
1915+
1916+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1917+ for (i = -1; i < 20; i++) {
1918+ unsigned long c;
1919+ if (get_user(c, (__force unsigned long __user *)sp+i))
1920+ printk(KERN_CONT "???????? ");
1921+ else
1922+ printk(KERN_CONT "%08lx ", c);
1923+ }
1924+ printk("\n");
1925+}
1926+#endif
1927+
1928 /*
1929 * First Level Translation Fault Handler
1930 *
1931@@ -577,6 +611,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1932 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1933 struct siginfo info;
1934
1935+#ifdef CONFIG_PAX_REFCOUNT
1936+ if (fsr_fs(ifsr) == 2) {
1937+ unsigned int bkpt;
1938+
1939+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1940+ current->thread.error_code = ifsr;
1941+ current->thread.trap_no = 0;
1942+ pax_report_refcount_overflow(regs);
1943+ fixup_exception(regs);
1944+ return;
1945+ }
1946+ }
1947+#endif
1948+
1949 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1950 return;
1951
1952diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1953index ce8cb19..3ec539d 100644
1954--- a/arch/arm/mm/mmap.c
1955+++ b/arch/arm/mm/mmap.c
1956@@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1957 if (len > TASK_SIZE)
1958 return -ENOMEM;
1959
1960+#ifdef CONFIG_PAX_RANDMMAP
1961+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1962+#endif
1963+
1964 if (addr) {
1965 if (do_align)
1966 addr = COLOUR_ALIGN(addr, pgoff);
1967@@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1968 addr = PAGE_ALIGN(addr);
1969
1970 vma = find_vma(mm, addr);
1971- if (TASK_SIZE - len >= addr &&
1972- (!vma || addr + len <= vma->vm_start))
1973+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1974 return addr;
1975 }
1976 if (len > mm->cached_hole_size) {
1977- start_addr = addr = mm->free_area_cache;
1978+ start_addr = addr = mm->free_area_cache;
1979 } else {
1980- start_addr = addr = mm->mmap_base;
1981- mm->cached_hole_size = 0;
1982+ start_addr = addr = mm->mmap_base;
1983+ mm->cached_hole_size = 0;
1984 }
1985
1986 full_search:
1987@@ -124,14 +127,14 @@ full_search:
1988 * Start a new search - just in case we missed
1989 * some holes.
1990 */
1991- if (start_addr != TASK_UNMAPPED_BASE) {
1992- start_addr = addr = TASK_UNMAPPED_BASE;
1993+ if (start_addr != mm->mmap_base) {
1994+ start_addr = addr = mm->mmap_base;
1995 mm->cached_hole_size = 0;
1996 goto full_search;
1997 }
1998 return -ENOMEM;
1999 }
2000- if (!vma || addr + len <= vma->vm_start) {
2001+ if (check_heap_stack_gap(vma, addr, len)) {
2002 /*
2003 * Remember the place where we stopped the search:
2004 */
2005@@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2006
2007 if (mmap_is_legacy()) {
2008 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
2009+
2010+#ifdef CONFIG_PAX_RANDMMAP
2011+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2012+ mm->mmap_base += mm->delta_mmap;
2013+#endif
2014+
2015 mm->get_unmapped_area = arch_get_unmapped_area;
2016 mm->unmap_area = arch_unmap_area;
2017 } else {
2018 mm->mmap_base = mmap_base(random_factor);
2019+
2020+#ifdef CONFIG_PAX_RANDMMAP
2021+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2022+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2023+#endif
2024+
2025 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2026 mm->unmap_area = arch_unmap_area_topdown;
2027 }
2028diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
2029index fd556f7..af2e7d2 100644
2030--- a/arch/arm/plat-orion/include/plat/addr-map.h
2031+++ b/arch/arm/plat-orion/include/plat/addr-map.h
2032@@ -26,7 +26,7 @@ struct orion_addr_map_cfg {
2033 value in bridge_virt_base */
2034 void __iomem *(*win_cfg_base) (const struct orion_addr_map_cfg *cfg,
2035 const int win);
2036-};
2037+} __no_const;
2038
2039 /*
2040 * Information needed to setup one address mapping.
2041diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
2042index 71a6827..e7fbc23 100644
2043--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
2044+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
2045@@ -43,7 +43,7 @@ struct samsung_dma_ops {
2046 int (*started)(unsigned ch);
2047 int (*flush)(unsigned ch);
2048 int (*stop)(unsigned ch);
2049-};
2050+} __no_const;
2051
2052 extern void *samsung_dmadev_get_ops(void);
2053 extern void *s3c_dma_get_ops(void);
2054diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
2055index 5f28cae..3d23723 100644
2056--- a/arch/arm/plat-samsung/include/plat/ehci.h
2057+++ b/arch/arm/plat-samsung/include/plat/ehci.h
2058@@ -14,7 +14,7 @@
2059 struct s5p_ehci_platdata {
2060 int (*phy_init)(struct platform_device *pdev, int type);
2061 int (*phy_exit)(struct platform_device *pdev, int type);
2062-};
2063+} __no_const;
2064
2065 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
2066
2067diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
2068index c3a58a1..78fbf54 100644
2069--- a/arch/avr32/include/asm/cache.h
2070+++ b/arch/avr32/include/asm/cache.h
2071@@ -1,8 +1,10 @@
2072 #ifndef __ASM_AVR32_CACHE_H
2073 #define __ASM_AVR32_CACHE_H
2074
2075+#include <linux/const.h>
2076+
2077 #define L1_CACHE_SHIFT 5
2078-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2079+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2080
2081 /*
2082 * Memory returned by kmalloc() may be used for DMA, so we must make
2083diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
2084index 3b3159b..425ea94 100644
2085--- a/arch/avr32/include/asm/elf.h
2086+++ b/arch/avr32/include/asm/elf.h
2087@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
2088 the loader. We need to make sure that it is out of the way of the program
2089 that it will "exec", and that there is sufficient room for the brk. */
2090
2091-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2092+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2093
2094+#ifdef CONFIG_PAX_ASLR
2095+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
2096+
2097+#define PAX_DELTA_MMAP_LEN 15
2098+#define PAX_DELTA_STACK_LEN 15
2099+#endif
2100
2101 /* This yields a mask that user programs can use to figure out what
2102 instruction set this CPU supports. This could be done in user space,
2103diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
2104index b7f5c68..556135c 100644
2105--- a/arch/avr32/include/asm/kmap_types.h
2106+++ b/arch/avr32/include/asm/kmap_types.h
2107@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
2108 D(11) KM_IRQ1,
2109 D(12) KM_SOFTIRQ0,
2110 D(13) KM_SOFTIRQ1,
2111-D(14) KM_TYPE_NR
2112+D(14) KM_CLEARPAGE,
2113+D(15) KM_TYPE_NR
2114 };
2115
2116 #undef D
2117diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
2118index f7040a1..db9f300 100644
2119--- a/arch/avr32/mm/fault.c
2120+++ b/arch/avr32/mm/fault.c
2121@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
2122
2123 int exception_trace = 1;
2124
2125+#ifdef CONFIG_PAX_PAGEEXEC
2126+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2127+{
2128+ unsigned long i;
2129+
2130+ printk(KERN_ERR "PAX: bytes at PC: ");
2131+ for (i = 0; i < 20; i++) {
2132+ unsigned char c;
2133+ if (get_user(c, (unsigned char *)pc+i))
2134+ printk(KERN_CONT "???????? ");
2135+ else
2136+ printk(KERN_CONT "%02x ", c);
2137+ }
2138+ printk("\n");
2139+}
2140+#endif
2141+
2142 /*
2143 * This routine handles page faults. It determines the address and the
2144 * problem, and then passes it off to one of the appropriate routines.
2145@@ -156,6 +173,16 @@ bad_area:
2146 up_read(&mm->mmap_sem);
2147
2148 if (user_mode(regs)) {
2149+
2150+#ifdef CONFIG_PAX_PAGEEXEC
2151+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2152+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
2153+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
2154+ do_group_exit(SIGKILL);
2155+ }
2156+ }
2157+#endif
2158+
2159 if (exception_trace && printk_ratelimit())
2160 printk("%s%s[%d]: segfault at %08lx pc %08lx "
2161 "sp %08lx ecr %lu\n",
2162diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
2163index 568885a..f8008df 100644
2164--- a/arch/blackfin/include/asm/cache.h
2165+++ b/arch/blackfin/include/asm/cache.h
2166@@ -7,6 +7,7 @@
2167 #ifndef __ARCH_BLACKFIN_CACHE_H
2168 #define __ARCH_BLACKFIN_CACHE_H
2169
2170+#include <linux/const.h>
2171 #include <linux/linkage.h> /* for asmlinkage */
2172
2173 /*
2174@@ -14,7 +15,7 @@
2175 * Blackfin loads 32 bytes for cache
2176 */
2177 #define L1_CACHE_SHIFT 5
2178-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2179+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2180 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2181
2182 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2183diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2184index aea2718..3639a60 100644
2185--- a/arch/cris/include/arch-v10/arch/cache.h
2186+++ b/arch/cris/include/arch-v10/arch/cache.h
2187@@ -1,8 +1,9 @@
2188 #ifndef _ASM_ARCH_CACHE_H
2189 #define _ASM_ARCH_CACHE_H
2190
2191+#include <linux/const.h>
2192 /* Etrax 100LX have 32-byte cache-lines. */
2193-#define L1_CACHE_BYTES 32
2194 #define L1_CACHE_SHIFT 5
2195+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2196
2197 #endif /* _ASM_ARCH_CACHE_H */
2198diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2199index 1de779f..336fad3 100644
2200--- a/arch/cris/include/arch-v32/arch/cache.h
2201+++ b/arch/cris/include/arch-v32/arch/cache.h
2202@@ -1,11 +1,12 @@
2203 #ifndef _ASM_CRIS_ARCH_CACHE_H
2204 #define _ASM_CRIS_ARCH_CACHE_H
2205
2206+#include <linux/const.h>
2207 #include <arch/hwregs/dma.h>
2208
2209 /* A cache-line is 32 bytes. */
2210-#define L1_CACHE_BYTES 32
2211 #define L1_CACHE_SHIFT 5
2212+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2213
2214 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
2215
2216diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2217index b86329d..6709906 100644
2218--- a/arch/frv/include/asm/atomic.h
2219+++ b/arch/frv/include/asm/atomic.h
2220@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
2221 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2222 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2223
2224+#define atomic64_read_unchecked(v) atomic64_read(v)
2225+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2226+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2227+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2228+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2229+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2230+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2231+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2232+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2233+
2234 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2235 {
2236 int c, old;
2237diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2238index 2797163..c2a401d 100644
2239--- a/arch/frv/include/asm/cache.h
2240+++ b/arch/frv/include/asm/cache.h
2241@@ -12,10 +12,11 @@
2242 #ifndef __ASM_CACHE_H
2243 #define __ASM_CACHE_H
2244
2245+#include <linux/const.h>
2246
2247 /* bytes per L1 cache line */
2248 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2249-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2250+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2251
2252 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2253 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2254diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2255index f8e16b2..c73ff79 100644
2256--- a/arch/frv/include/asm/kmap_types.h
2257+++ b/arch/frv/include/asm/kmap_types.h
2258@@ -23,6 +23,7 @@ enum km_type {
2259 KM_IRQ1,
2260 KM_SOFTIRQ0,
2261 KM_SOFTIRQ1,
2262+ KM_CLEARPAGE,
2263 KM_TYPE_NR
2264 };
2265
2266diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2267index 385fd30..6c3d97e 100644
2268--- a/arch/frv/mm/elf-fdpic.c
2269+++ b/arch/frv/mm/elf-fdpic.c
2270@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2271 if (addr) {
2272 addr = PAGE_ALIGN(addr);
2273 vma = find_vma(current->mm, addr);
2274- if (TASK_SIZE - len >= addr &&
2275- (!vma || addr + len <= vma->vm_start))
2276+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2277 goto success;
2278 }
2279
2280@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2281 for (; vma; vma = vma->vm_next) {
2282 if (addr > limit)
2283 break;
2284- if (addr + len <= vma->vm_start)
2285+ if (check_heap_stack_gap(vma, addr, len))
2286 goto success;
2287 addr = vma->vm_end;
2288 }
2289@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2290 for (; vma; vma = vma->vm_next) {
2291 if (addr > limit)
2292 break;
2293- if (addr + len <= vma->vm_start)
2294+ if (check_heap_stack_gap(vma, addr, len))
2295 goto success;
2296 addr = vma->vm_end;
2297 }
2298diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2299index c635028..6d9445a 100644
2300--- a/arch/h8300/include/asm/cache.h
2301+++ b/arch/h8300/include/asm/cache.h
2302@@ -1,8 +1,10 @@
2303 #ifndef __ARCH_H8300_CACHE_H
2304 #define __ARCH_H8300_CACHE_H
2305
2306+#include <linux/const.h>
2307+
2308 /* bytes per L1 cache line */
2309-#define L1_CACHE_BYTES 4
2310+#define L1_CACHE_BYTES _AC(4,UL)
2311
2312 /* m68k-elf-gcc 2.95.2 doesn't like these */
2313
2314diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2315index 0f01de2..d37d309 100644
2316--- a/arch/hexagon/include/asm/cache.h
2317+++ b/arch/hexagon/include/asm/cache.h
2318@@ -21,9 +21,11 @@
2319 #ifndef __ASM_CACHE_H
2320 #define __ASM_CACHE_H
2321
2322+#include <linux/const.h>
2323+
2324 /* Bytes per L1 cache line */
2325-#define L1_CACHE_SHIFT (5)
2326-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2327+#define L1_CACHE_SHIFT 5
2328+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2329
2330 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2331 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2332diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2333index 7d91166..88ab87e 100644
2334--- a/arch/ia64/include/asm/atomic.h
2335+++ b/arch/ia64/include/asm/atomic.h
2336@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2337 #define atomic64_inc(v) atomic64_add(1, (v))
2338 #define atomic64_dec(v) atomic64_sub(1, (v))
2339
2340+#define atomic64_read_unchecked(v) atomic64_read(v)
2341+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2342+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2343+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2344+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2345+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2346+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2347+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2348+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2349+
2350 /* Atomic operations are already serializing */
2351 #define smp_mb__before_atomic_dec() barrier()
2352 #define smp_mb__after_atomic_dec() barrier()
2353diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2354index 988254a..e1ee885 100644
2355--- a/arch/ia64/include/asm/cache.h
2356+++ b/arch/ia64/include/asm/cache.h
2357@@ -1,6 +1,7 @@
2358 #ifndef _ASM_IA64_CACHE_H
2359 #define _ASM_IA64_CACHE_H
2360
2361+#include <linux/const.h>
2362
2363 /*
2364 * Copyright (C) 1998-2000 Hewlett-Packard Co
2365@@ -9,7 +10,7 @@
2366
2367 /* Bytes per L1 (data) cache line. */
2368 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2369-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2370+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2371
2372 #ifdef CONFIG_SMP
2373 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2374diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2375index b5298eb..67c6e62 100644
2376--- a/arch/ia64/include/asm/elf.h
2377+++ b/arch/ia64/include/asm/elf.h
2378@@ -42,6 +42,13 @@
2379 */
2380 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2381
2382+#ifdef CONFIG_PAX_ASLR
2383+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2384+
2385+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2386+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2387+#endif
2388+
2389 #define PT_IA_64_UNWIND 0x70000001
2390
2391 /* IA-64 relocations: */
2392diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
2393index 96a8d92..617a1cf 100644
2394--- a/arch/ia64/include/asm/pgalloc.h
2395+++ b/arch/ia64/include/asm/pgalloc.h
2396@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2397 pgd_val(*pgd_entry) = __pa(pud);
2398 }
2399
2400+static inline void
2401+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2402+{
2403+ pgd_populate(mm, pgd_entry, pud);
2404+}
2405+
2406 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
2407 {
2408 return quicklist_alloc(0, GFP_KERNEL, NULL);
2409@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2410 pud_val(*pud_entry) = __pa(pmd);
2411 }
2412
2413+static inline void
2414+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2415+{
2416+ pud_populate(mm, pud_entry, pmd);
2417+}
2418+
2419 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
2420 {
2421 return quicklist_alloc(0, GFP_KERNEL, NULL);
2422diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2423index 815810c..d60bd4c 100644
2424--- a/arch/ia64/include/asm/pgtable.h
2425+++ b/arch/ia64/include/asm/pgtable.h
2426@@ -12,7 +12,7 @@
2427 * David Mosberger-Tang <davidm@hpl.hp.com>
2428 */
2429
2430-
2431+#include <linux/const.h>
2432 #include <asm/mman.h>
2433 #include <asm/page.h>
2434 #include <asm/processor.h>
2435@@ -142,6 +142,17 @@
2436 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2437 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2438 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2439+
2440+#ifdef CONFIG_PAX_PAGEEXEC
2441+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2442+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2443+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2444+#else
2445+# define PAGE_SHARED_NOEXEC PAGE_SHARED
2446+# define PAGE_READONLY_NOEXEC PAGE_READONLY
2447+# define PAGE_COPY_NOEXEC PAGE_COPY
2448+#endif
2449+
2450 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2451 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2452 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2453diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2454index 54ff557..70c88b7 100644
2455--- a/arch/ia64/include/asm/spinlock.h
2456+++ b/arch/ia64/include/asm/spinlock.h
2457@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2458 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2459
2460 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2461- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2462+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2463 }
2464
2465 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
2466diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2467index 449c8c0..432a3d2 100644
2468--- a/arch/ia64/include/asm/uaccess.h
2469+++ b/arch/ia64/include/asm/uaccess.h
2470@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2471 const void *__cu_from = (from); \
2472 long __cu_len = (n); \
2473 \
2474- if (__access_ok(__cu_to, __cu_len, get_fs())) \
2475+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2476 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2477 __cu_len; \
2478 })
2479@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2480 long __cu_len = (n); \
2481 \
2482 __chk_user_ptr(__cu_from); \
2483- if (__access_ok(__cu_from, __cu_len, get_fs())) \
2484+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2485 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2486 __cu_len; \
2487 })
2488diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2489index 24603be..948052d 100644
2490--- a/arch/ia64/kernel/module.c
2491+++ b/arch/ia64/kernel/module.c
2492@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
2493 void
2494 module_free (struct module *mod, void *module_region)
2495 {
2496- if (mod && mod->arch.init_unw_table &&
2497- module_region == mod->module_init) {
2498+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2499 unw_remove_unwind_table(mod->arch.init_unw_table);
2500 mod->arch.init_unw_table = NULL;
2501 }
2502@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2503 }
2504
2505 static inline int
2506+in_init_rx (const struct module *mod, uint64_t addr)
2507+{
2508+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2509+}
2510+
2511+static inline int
2512+in_init_rw (const struct module *mod, uint64_t addr)
2513+{
2514+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2515+}
2516+
2517+static inline int
2518 in_init (const struct module *mod, uint64_t addr)
2519 {
2520- return addr - (uint64_t) mod->module_init < mod->init_size;
2521+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2522+}
2523+
2524+static inline int
2525+in_core_rx (const struct module *mod, uint64_t addr)
2526+{
2527+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2528+}
2529+
2530+static inline int
2531+in_core_rw (const struct module *mod, uint64_t addr)
2532+{
2533+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2534 }
2535
2536 static inline int
2537 in_core (const struct module *mod, uint64_t addr)
2538 {
2539- return addr - (uint64_t) mod->module_core < mod->core_size;
2540+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2541 }
2542
2543 static inline int
2544@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2545 break;
2546
2547 case RV_BDREL:
2548- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2549+ if (in_init_rx(mod, val))
2550+ val -= (uint64_t) mod->module_init_rx;
2551+ else if (in_init_rw(mod, val))
2552+ val -= (uint64_t) mod->module_init_rw;
2553+ else if (in_core_rx(mod, val))
2554+ val -= (uint64_t) mod->module_core_rx;
2555+ else if (in_core_rw(mod, val))
2556+ val -= (uint64_t) mod->module_core_rw;
2557 break;
2558
2559 case RV_LTV:
2560@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2561 * addresses have been selected...
2562 */
2563 uint64_t gp;
2564- if (mod->core_size > MAX_LTOFF)
2565+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2566 /*
2567 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2568 * at the end of the module.
2569 */
2570- gp = mod->core_size - MAX_LTOFF / 2;
2571+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2572 else
2573- gp = mod->core_size / 2;
2574- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2575+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2576+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2577 mod->arch.gp = gp;
2578 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2579 }
2580diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2581index 609d500..7dde2a8 100644
2582--- a/arch/ia64/kernel/sys_ia64.c
2583+++ b/arch/ia64/kernel/sys_ia64.c
2584@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2585 if (REGION_NUMBER(addr) == RGN_HPAGE)
2586 addr = 0;
2587 #endif
2588+
2589+#ifdef CONFIG_PAX_RANDMMAP
2590+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2591+ addr = mm->free_area_cache;
2592+ else
2593+#endif
2594+
2595 if (!addr)
2596 addr = mm->free_area_cache;
2597
2598@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2599 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2600 /* At this point: (!vma || addr < vma->vm_end). */
2601 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2602- if (start_addr != TASK_UNMAPPED_BASE) {
2603+ if (start_addr != mm->mmap_base) {
2604 /* Start a new search --- just in case we missed some holes. */
2605- addr = TASK_UNMAPPED_BASE;
2606+ addr = mm->mmap_base;
2607 goto full_search;
2608 }
2609 return -ENOMEM;
2610 }
2611- if (!vma || addr + len <= vma->vm_start) {
2612+ if (check_heap_stack_gap(vma, addr, len)) {
2613 /* Remember the address where we stopped this search: */
2614 mm->free_area_cache = addr + len;
2615 return addr;
2616diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2617index 0ccb28f..8992469 100644
2618--- a/arch/ia64/kernel/vmlinux.lds.S
2619+++ b/arch/ia64/kernel/vmlinux.lds.S
2620@@ -198,7 +198,7 @@ SECTIONS {
2621 /* Per-cpu data: */
2622 . = ALIGN(PERCPU_PAGE_SIZE);
2623 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
2624- __phys_per_cpu_start = __per_cpu_load;
2625+ __phys_per_cpu_start = per_cpu_load;
2626 /*
2627 * ensure percpu data fits
2628 * into percpu page size
2629diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2630index 02d29c2..ea893df 100644
2631--- a/arch/ia64/mm/fault.c
2632+++ b/arch/ia64/mm/fault.c
2633@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
2634 return pte_present(pte);
2635 }
2636
2637+#ifdef CONFIG_PAX_PAGEEXEC
2638+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2639+{
2640+ unsigned long i;
2641+
2642+ printk(KERN_ERR "PAX: bytes at PC: ");
2643+ for (i = 0; i < 8; i++) {
2644+ unsigned int c;
2645+ if (get_user(c, (unsigned int *)pc+i))
2646+ printk(KERN_CONT "???????? ");
2647+ else
2648+ printk(KERN_CONT "%08x ", c);
2649+ }
2650+ printk("\n");
2651+}
2652+#endif
2653+
2654 void __kprobes
2655 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2656 {
2657@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2658 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2659 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2660
2661- if ((vma->vm_flags & mask) != mask)
2662+ if ((vma->vm_flags & mask) != mask) {
2663+
2664+#ifdef CONFIG_PAX_PAGEEXEC
2665+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2666+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2667+ goto bad_area;
2668+
2669+ up_read(&mm->mmap_sem);
2670+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2671+ do_group_exit(SIGKILL);
2672+ }
2673+#endif
2674+
2675 goto bad_area;
2676
2677+ }
2678+
2679 /*
2680 * If for any reason at all we couldn't handle the fault, make
2681 * sure we exit gracefully rather than endlessly redo the
2682diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2683index 5ca674b..e0e1b70 100644
2684--- a/arch/ia64/mm/hugetlbpage.c
2685+++ b/arch/ia64/mm/hugetlbpage.c
2686@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2687 /* At this point: (!vmm || addr < vmm->vm_end). */
2688 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2689 return -ENOMEM;
2690- if (!vmm || (addr + len) <= vmm->vm_start)
2691+ if (check_heap_stack_gap(vmm, addr, len))
2692 return addr;
2693 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2694 }
2695diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2696index 0eab454..bd794f2 100644
2697--- a/arch/ia64/mm/init.c
2698+++ b/arch/ia64/mm/init.c
2699@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
2700 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2701 vma->vm_end = vma->vm_start + PAGE_SIZE;
2702 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2703+
2704+#ifdef CONFIG_PAX_PAGEEXEC
2705+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2706+ vma->vm_flags &= ~VM_EXEC;
2707+
2708+#ifdef CONFIG_PAX_MPROTECT
2709+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
2710+ vma->vm_flags &= ~VM_MAYEXEC;
2711+#endif
2712+
2713+ }
2714+#endif
2715+
2716 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2717 down_write(&current->mm->mmap_sem);
2718 if (insert_vm_struct(current->mm, vma)) {
2719diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2720index 40b3ee9..8c2c112 100644
2721--- a/arch/m32r/include/asm/cache.h
2722+++ b/arch/m32r/include/asm/cache.h
2723@@ -1,8 +1,10 @@
2724 #ifndef _ASM_M32R_CACHE_H
2725 #define _ASM_M32R_CACHE_H
2726
2727+#include <linux/const.h>
2728+
2729 /* L1 cache line size */
2730 #define L1_CACHE_SHIFT 4
2731-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2732+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2733
2734 #endif /* _ASM_M32R_CACHE_H */
2735diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2736index 82abd15..d95ae5d 100644
2737--- a/arch/m32r/lib/usercopy.c
2738+++ b/arch/m32r/lib/usercopy.c
2739@@ -14,6 +14,9 @@
2740 unsigned long
2741 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2742 {
2743+ if ((long)n < 0)
2744+ return n;
2745+
2746 prefetch(from);
2747 if (access_ok(VERIFY_WRITE, to, n))
2748 __copy_user(to,from,n);
2749@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2750 unsigned long
2751 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2752 {
2753+ if ((long)n < 0)
2754+ return n;
2755+
2756 prefetchw(to);
2757 if (access_ok(VERIFY_READ, from, n))
2758 __copy_user_zeroing(to,from,n);
2759diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2760index 0395c51..5f26031 100644
2761--- a/arch/m68k/include/asm/cache.h
2762+++ b/arch/m68k/include/asm/cache.h
2763@@ -4,9 +4,11 @@
2764 #ifndef __ARCH_M68K_CACHE_H
2765 #define __ARCH_M68K_CACHE_H
2766
2767+#include <linux/const.h>
2768+
2769 /* bytes per L1 cache line */
2770 #define L1_CACHE_SHIFT 4
2771-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2772+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2773
2774 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2775
2776diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2777index 4efe96a..60e8699 100644
2778--- a/arch/microblaze/include/asm/cache.h
2779+++ b/arch/microblaze/include/asm/cache.h
2780@@ -13,11 +13,12 @@
2781 #ifndef _ASM_MICROBLAZE_CACHE_H
2782 #define _ASM_MICROBLAZE_CACHE_H
2783
2784+#include <linux/const.h>
2785 #include <asm/registers.h>
2786
2787 #define L1_CACHE_SHIFT 5
2788 /* word-granular cache in microblaze */
2789-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2790+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2791
2792 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2793
2794diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2795index 3f4c5cb..3439c6e 100644
2796--- a/arch/mips/include/asm/atomic.h
2797+++ b/arch/mips/include/asm/atomic.h
2798@@ -21,6 +21,10 @@
2799 #include <asm/cmpxchg.h>
2800 #include <asm/war.h>
2801
2802+#ifdef CONFIG_GENERIC_ATOMIC64
2803+#include <asm-generic/atomic64.h>
2804+#endif
2805+
2806 #define ATOMIC_INIT(i) { (i) }
2807
2808 /*
2809@@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2810 */
2811 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2812
2813+#define atomic64_read_unchecked(v) atomic64_read(v)
2814+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2815+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2816+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2817+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2818+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2819+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2820+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2821+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2822+
2823 #endif /* CONFIG_64BIT */
2824
2825 /*
2826diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2827index b4db69f..8f3b093 100644
2828--- a/arch/mips/include/asm/cache.h
2829+++ b/arch/mips/include/asm/cache.h
2830@@ -9,10 +9,11 @@
2831 #ifndef _ASM_CACHE_H
2832 #define _ASM_CACHE_H
2833
2834+#include <linux/const.h>
2835 #include <kmalloc.h>
2836
2837 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2838-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2839+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2840
2841 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2842 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2843diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2844index 455c0ac..ad65fbe 100644
2845--- a/arch/mips/include/asm/elf.h
2846+++ b/arch/mips/include/asm/elf.h
2847@@ -372,13 +372,16 @@ extern const char *__elf_platform;
2848 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2849 #endif
2850
2851+#ifdef CONFIG_PAX_ASLR
2852+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2853+
2854+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2855+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2856+#endif
2857+
2858 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2859 struct linux_binprm;
2860 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2861 int uses_interp);
2862
2863-struct mm_struct;
2864-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2865-#define arch_randomize_brk arch_randomize_brk
2866-
2867 #endif /* _ASM_ELF_H */
2868diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
2869index c1f6afa..38cc6e9 100644
2870--- a/arch/mips/include/asm/exec.h
2871+++ b/arch/mips/include/asm/exec.h
2872@@ -12,6 +12,6 @@
2873 #ifndef _ASM_EXEC_H
2874 #define _ASM_EXEC_H
2875
2876-extern unsigned long arch_align_stack(unsigned long sp);
2877+#define arch_align_stack(x) ((x) & ~0xfUL)
2878
2879 #endif /* _ASM_EXEC_H */
2880diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2881index da9bd7d..91aa7ab 100644
2882--- a/arch/mips/include/asm/page.h
2883+++ b/arch/mips/include/asm/page.h
2884@@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2885 #ifdef CONFIG_CPU_MIPS32
2886 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2887 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2888- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2889+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2890 #else
2891 typedef struct { unsigned long long pte; } pte_t;
2892 #define pte_val(x) ((x).pte)
2893diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
2894index 881d18b..cea38bc 100644
2895--- a/arch/mips/include/asm/pgalloc.h
2896+++ b/arch/mips/include/asm/pgalloc.h
2897@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2898 {
2899 set_pud(pud, __pud((unsigned long)pmd));
2900 }
2901+
2902+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2903+{
2904+ pud_populate(mm, pud, pmd);
2905+}
2906 #endif
2907
2908 /*
2909diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
2910index 0d85d8e..ec71487 100644
2911--- a/arch/mips/include/asm/thread_info.h
2912+++ b/arch/mips/include/asm/thread_info.h
2913@@ -123,6 +123,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
2914 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
2915 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
2916 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
2917+/* li takes a 32bit immediate */
2918+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
2919 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
2920
2921 #ifdef CONFIG_MIPS32_O32
2922@@ -146,15 +148,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
2923 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
2924 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
2925 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
2926+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
2927+
2928+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2929
2930 /* work to do in syscall_trace_leave() */
2931-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
2932+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2933
2934 /* work to do on interrupt/exception return */
2935 #define _TIF_WORK_MASK (0x0000ffef & \
2936 ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
2937 /* work to do on any return to u-space */
2938-#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
2939+#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID)
2940
2941 #endif /* __KERNEL__ */
2942
2943diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2944index 9fdd8bc..4bd7f1a 100644
2945--- a/arch/mips/kernel/binfmt_elfn32.c
2946+++ b/arch/mips/kernel/binfmt_elfn32.c
2947@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2948 #undef ELF_ET_DYN_BASE
2949 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2950
2951+#ifdef CONFIG_PAX_ASLR
2952+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2953+
2954+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2955+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2956+#endif
2957+
2958 #include <asm/processor.h>
2959 #include <linux/module.h>
2960 #include <linux/elfcore.h>
2961diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2962index ff44823..97f8906 100644
2963--- a/arch/mips/kernel/binfmt_elfo32.c
2964+++ b/arch/mips/kernel/binfmt_elfo32.c
2965@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2966 #undef ELF_ET_DYN_BASE
2967 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2968
2969+#ifdef CONFIG_PAX_ASLR
2970+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2971+
2972+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2973+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2974+#endif
2975+
2976 #include <asm/processor.h>
2977
2978 /*
2979diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2980index e9a5fd7..378809a 100644
2981--- a/arch/mips/kernel/process.c
2982+++ b/arch/mips/kernel/process.c
2983@@ -480,15 +480,3 @@ unsigned long get_wchan(struct task_struct *task)
2984 out:
2985 return pc;
2986 }
2987-
2988-/*
2989- * Don't forget that the stack pointer must be aligned on a 8 bytes
2990- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2991- */
2992-unsigned long arch_align_stack(unsigned long sp)
2993-{
2994- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2995- sp -= get_random_int() & ~PAGE_MASK;
2996-
2997- return sp & ALMASK;
2998-}
2999diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
3000index 7c24c29..e2f1981 100644
3001--- a/arch/mips/kernel/ptrace.c
3002+++ b/arch/mips/kernel/ptrace.c
3003@@ -528,6 +528,10 @@ static inline int audit_arch(void)
3004 return arch;
3005 }
3006
3007+#ifdef CONFIG_GRKERNSEC_SETXID
3008+extern void gr_delayed_cred_worker(void);
3009+#endif
3010+
3011 /*
3012 * Notification of system call entry/exit
3013 * - triggered by current->work.syscall_trace
3014@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
3015 /* do the secure computing check first */
3016 secure_computing(regs->regs[2]);
3017
3018+#ifdef CONFIG_GRKERNSEC_SETXID
3019+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3020+ gr_delayed_cred_worker();
3021+#endif
3022+
3023 if (!(current->ptrace & PT_PTRACED))
3024 goto out;
3025
3026diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
3027index a632bc1..0b77c7c 100644
3028--- a/arch/mips/kernel/scall32-o32.S
3029+++ b/arch/mips/kernel/scall32-o32.S
3030@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3031
3032 stack_done:
3033 lw t0, TI_FLAGS($28) # syscall tracing enabled?
3034- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3035+ li t1, _TIF_SYSCALL_WORK
3036 and t0, t1
3037 bnez t0, syscall_trace_entry # -> yes
3038
3039diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
3040index 3b5a5e9..e1ee86d 100644
3041--- a/arch/mips/kernel/scall64-64.S
3042+++ b/arch/mips/kernel/scall64-64.S
3043@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
3044
3045 sd a3, PT_R26(sp) # save a3 for syscall restarting
3046
3047- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3048+ li t1, _TIF_SYSCALL_WORK
3049 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3050 and t0, t1, t0
3051 bnez t0, syscall_trace_entry
3052diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
3053index 6be6f70..1859577 100644
3054--- a/arch/mips/kernel/scall64-n32.S
3055+++ b/arch/mips/kernel/scall64-n32.S
3056@@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
3057
3058 sd a3, PT_R26(sp) # save a3 for syscall restarting
3059
3060- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3061+ li t1, _TIF_SYSCALL_WORK
3062 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3063 and t0, t1, t0
3064 bnez t0, n32_syscall_trace_entry
3065diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
3066index 5422855..74e63a3 100644
3067--- a/arch/mips/kernel/scall64-o32.S
3068+++ b/arch/mips/kernel/scall64-o32.S
3069@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3070 PTR 4b, bad_stack
3071 .previous
3072
3073- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3074+ li t1, _TIF_SYSCALL_WORK
3075 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3076 and t0, t1, t0
3077 bnez t0, trace_a_syscall
3078diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
3079index c14f6df..537e729 100644
3080--- a/arch/mips/mm/fault.c
3081+++ b/arch/mips/mm/fault.c
3082@@ -27,6 +27,23 @@
3083 #include <asm/highmem.h> /* For VMALLOC_END */
3084 #include <linux/kdebug.h>
3085
3086+#ifdef CONFIG_PAX_PAGEEXEC
3087+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3088+{
3089+ unsigned long i;
3090+
3091+ printk(KERN_ERR "PAX: bytes at PC: ");
3092+ for (i = 0; i < 5; i++) {
3093+ unsigned int c;
3094+ if (get_user(c, (unsigned int *)pc+i))
3095+ printk(KERN_CONT "???????? ");
3096+ else
3097+ printk(KERN_CONT "%08x ", c);
3098+ }
3099+ printk("\n");
3100+}
3101+#endif
3102+
3103 /*
3104 * This routine handles page faults. It determines the address,
3105 * and the problem, and then passes it off to one of the appropriate
3106diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
3107index 302d779..7d35bf8 100644
3108--- a/arch/mips/mm/mmap.c
3109+++ b/arch/mips/mm/mmap.c
3110@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3111 do_color_align = 1;
3112
3113 /* requesting a specific address */
3114+
3115+#ifdef CONFIG_PAX_RANDMMAP
3116+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
3117+#endif
3118+
3119 if (addr) {
3120 if (do_color_align)
3121 addr = COLOUR_ALIGN(addr, pgoff);
3122@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3123 addr = PAGE_ALIGN(addr);
3124
3125 vma = find_vma(mm, addr);
3126- if (TASK_SIZE - len >= addr &&
3127- (!vma || addr + len <= vma->vm_start))
3128+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
3129 return addr;
3130 }
3131
3132@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3133 /* At this point: (!vma || addr < vma->vm_end). */
3134 if (TASK_SIZE - len < addr)
3135 return -ENOMEM;
3136- if (!vma || addr + len <= vma->vm_start)
3137+ if (check_heap_stack_gap(vmm, addr, len))
3138 return addr;
3139 addr = vma->vm_end;
3140 if (do_color_align)
3141@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3142 /* make sure it can fit in the remaining address space */
3143 if (likely(addr > len)) {
3144 vma = find_vma(mm, addr - len);
3145- if (!vma || addr <= vma->vm_start) {
3146+ if (check_heap_stack_gap(vmm, addr - len, len))
3147 /* cache the address as a hint for next time */
3148 return mm->free_area_cache = addr - len;
3149 }
3150@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3151 * return with success:
3152 */
3153 vma = find_vma(mm, addr);
3154- if (likely(!vma || addr + len <= vma->vm_start)) {
3155+ if (check_heap_stack_gap(vmm, addr, len)) {
3156 /* cache the address as a hint for next time */
3157 return mm->free_area_cache = addr;
3158 }
3159@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3160 mm->unmap_area = arch_unmap_area_topdown;
3161 }
3162 }
3163-
3164-static inline unsigned long brk_rnd(void)
3165-{
3166- unsigned long rnd = get_random_int();
3167-
3168- rnd = rnd << PAGE_SHIFT;
3169- /* 8MB for 32bit, 256MB for 64bit */
3170- if (TASK_IS_32BIT_ADDR)
3171- rnd = rnd & 0x7ffffful;
3172- else
3173- rnd = rnd & 0xffffffful;
3174-
3175- return rnd;
3176-}
3177-
3178-unsigned long arch_randomize_brk(struct mm_struct *mm)
3179-{
3180- unsigned long base = mm->brk;
3181- unsigned long ret;
3182-
3183- ret = PAGE_ALIGN(base + brk_rnd());
3184-
3185- if (ret < mm->brk)
3186- return mm->brk;
3187-
3188- return ret;
3189-}
3190diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3191index 967d144..db12197 100644
3192--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
3193+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3194@@ -11,12 +11,14 @@
3195 #ifndef _ASM_PROC_CACHE_H
3196 #define _ASM_PROC_CACHE_H
3197
3198+#include <linux/const.h>
3199+
3200 /* L1 cache */
3201
3202 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3203 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
3204-#define L1_CACHE_BYTES 16 /* bytes per entry */
3205 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
3206+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3207 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
3208
3209 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3210diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3211index bcb5df2..84fabd2 100644
3212--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3213+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3214@@ -16,13 +16,15 @@
3215 #ifndef _ASM_PROC_CACHE_H
3216 #define _ASM_PROC_CACHE_H
3217
3218+#include <linux/const.h>
3219+
3220 /*
3221 * L1 cache
3222 */
3223 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3224 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
3225-#define L1_CACHE_BYTES 32 /* bytes per entry */
3226 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
3227+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3228 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
3229
3230 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3231diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
3232index 4ce7a01..449202a 100644
3233--- a/arch/openrisc/include/asm/cache.h
3234+++ b/arch/openrisc/include/asm/cache.h
3235@@ -19,11 +19,13 @@
3236 #ifndef __ASM_OPENRISC_CACHE_H
3237 #define __ASM_OPENRISC_CACHE_H
3238
3239+#include <linux/const.h>
3240+
3241 /* FIXME: How can we replace these with values from the CPU...
3242 * they shouldn't be hard-coded!
3243 */
3244
3245-#define L1_CACHE_BYTES 16
3246 #define L1_CACHE_SHIFT 4
3247+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3248
3249 #endif /* __ASM_OPENRISC_CACHE_H */
3250diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
3251index 6c6defc..d30653d 100644
3252--- a/arch/parisc/include/asm/atomic.h
3253+++ b/arch/parisc/include/asm/atomic.h
3254@@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3255
3256 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3257
3258+#define atomic64_read_unchecked(v) atomic64_read(v)
3259+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3260+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3261+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3262+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3263+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3264+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3265+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3266+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3267+
3268 #endif /* !CONFIG_64BIT */
3269
3270
3271diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3272index 47f11c7..3420df2 100644
3273--- a/arch/parisc/include/asm/cache.h
3274+++ b/arch/parisc/include/asm/cache.h
3275@@ -5,6 +5,7 @@
3276 #ifndef __ARCH_PARISC_CACHE_H
3277 #define __ARCH_PARISC_CACHE_H
3278
3279+#include <linux/const.h>
3280
3281 /*
3282 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3283@@ -15,13 +16,13 @@
3284 * just ruin performance.
3285 */
3286 #ifdef CONFIG_PA20
3287-#define L1_CACHE_BYTES 64
3288 #define L1_CACHE_SHIFT 6
3289 #else
3290-#define L1_CACHE_BYTES 32
3291 #define L1_CACHE_SHIFT 5
3292 #endif
3293
3294+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3295+
3296 #ifndef __ASSEMBLY__
3297
3298 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3299diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3300index 19f6cb1..6c78cf2 100644
3301--- a/arch/parisc/include/asm/elf.h
3302+++ b/arch/parisc/include/asm/elf.h
3303@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
3304
3305 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3306
3307+#ifdef CONFIG_PAX_ASLR
3308+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3309+
3310+#define PAX_DELTA_MMAP_LEN 16
3311+#define PAX_DELTA_STACK_LEN 16
3312+#endif
3313+
3314 /* This yields a mask that user programs can use to figure out what
3315 instruction set this CPU supports. This could be done in user space,
3316 but it's not easy, and we've already done it here. */
3317diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
3318index fc987a1..6e068ef 100644
3319--- a/arch/parisc/include/asm/pgalloc.h
3320+++ b/arch/parisc/include/asm/pgalloc.h
3321@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3322 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
3323 }
3324
3325+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3326+{
3327+ pgd_populate(mm, pgd, pmd);
3328+}
3329+
3330 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
3331 {
3332 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
3333@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
3334 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
3335 #define pmd_free(mm, x) do { } while (0)
3336 #define pgd_populate(mm, pmd, pte) BUG()
3337+#define pgd_populate_kernel(mm, pmd, pte) BUG()
3338
3339 #endif
3340
3341diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3342index ee99f23..802b0a1 100644
3343--- a/arch/parisc/include/asm/pgtable.h
3344+++ b/arch/parisc/include/asm/pgtable.h
3345@@ -212,6 +212,17 @@ struct vm_area_struct;
3346 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3347 #define PAGE_COPY PAGE_EXECREAD
3348 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3349+
3350+#ifdef CONFIG_PAX_PAGEEXEC
3351+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3352+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3353+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3354+#else
3355+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3356+# define PAGE_COPY_NOEXEC PAGE_COPY
3357+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3358+#endif
3359+
3360 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3361 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
3362 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
3363diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
3364index 9ac0660..6ed15c4 100644
3365--- a/arch/parisc/include/asm/uaccess.h
3366+++ b/arch/parisc/include/asm/uaccess.h
3367@@ -252,10 +252,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
3368 const void __user *from,
3369 unsigned long n)
3370 {
3371- int sz = __compiletime_object_size(to);
3372+ size_t sz = __compiletime_object_size(to);
3373 int ret = -EFAULT;
3374
3375- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
3376+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
3377 ret = __copy_from_user(to, from, n);
3378 else
3379 copy_from_user_overflow();
3380diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3381index 5e34ccf..672bc9c 100644
3382--- a/arch/parisc/kernel/module.c
3383+++ b/arch/parisc/kernel/module.c
3384@@ -98,16 +98,38 @@
3385
3386 /* three functions to determine where in the module core
3387 * or init pieces the location is */
3388+static inline int in_init_rx(struct module *me, void *loc)
3389+{
3390+ return (loc >= me->module_init_rx &&
3391+ loc < (me->module_init_rx + me->init_size_rx));
3392+}
3393+
3394+static inline int in_init_rw(struct module *me, void *loc)
3395+{
3396+ return (loc >= me->module_init_rw &&
3397+ loc < (me->module_init_rw + me->init_size_rw));
3398+}
3399+
3400 static inline int in_init(struct module *me, void *loc)
3401 {
3402- return (loc >= me->module_init &&
3403- loc <= (me->module_init + me->init_size));
3404+ return in_init_rx(me, loc) || in_init_rw(me, loc);
3405+}
3406+
3407+static inline int in_core_rx(struct module *me, void *loc)
3408+{
3409+ return (loc >= me->module_core_rx &&
3410+ loc < (me->module_core_rx + me->core_size_rx));
3411+}
3412+
3413+static inline int in_core_rw(struct module *me, void *loc)
3414+{
3415+ return (loc >= me->module_core_rw &&
3416+ loc < (me->module_core_rw + me->core_size_rw));
3417 }
3418
3419 static inline int in_core(struct module *me, void *loc)
3420 {
3421- return (loc >= me->module_core &&
3422- loc <= (me->module_core + me->core_size));
3423+ return in_core_rx(me, loc) || in_core_rw(me, loc);
3424 }
3425
3426 static inline int in_local(struct module *me, void *loc)
3427@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3428 }
3429
3430 /* align things a bit */
3431- me->core_size = ALIGN(me->core_size, 16);
3432- me->arch.got_offset = me->core_size;
3433- me->core_size += gots * sizeof(struct got_entry);
3434+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3435+ me->arch.got_offset = me->core_size_rw;
3436+ me->core_size_rw += gots * sizeof(struct got_entry);
3437
3438- me->core_size = ALIGN(me->core_size, 16);
3439- me->arch.fdesc_offset = me->core_size;
3440- me->core_size += fdescs * sizeof(Elf_Fdesc);
3441+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3442+ me->arch.fdesc_offset = me->core_size_rw;
3443+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3444
3445 me->arch.got_max = gots;
3446 me->arch.fdesc_max = fdescs;
3447@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3448
3449 BUG_ON(value == 0);
3450
3451- got = me->module_core + me->arch.got_offset;
3452+ got = me->module_core_rw + me->arch.got_offset;
3453 for (i = 0; got[i].addr; i++)
3454 if (got[i].addr == value)
3455 goto out;
3456@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3457 #ifdef CONFIG_64BIT
3458 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3459 {
3460- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3461+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3462
3463 if (!value) {
3464 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3465@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3466
3467 /* Create new one */
3468 fdesc->addr = value;
3469- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3470+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3471 return (Elf_Addr)fdesc;
3472 }
3473 #endif /* CONFIG_64BIT */
3474@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
3475
3476 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3477 end = table + sechdrs[me->arch.unwind_section].sh_size;
3478- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3479+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3480
3481 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3482 me->arch.unwind_section, table, end, gp);
3483diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3484index c9b9322..02d8940 100644
3485--- a/arch/parisc/kernel/sys_parisc.c
3486+++ b/arch/parisc/kernel/sys_parisc.c
3487@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3488 /* At this point: (!vma || addr < vma->vm_end). */
3489 if (TASK_SIZE - len < addr)
3490 return -ENOMEM;
3491- if (!vma || addr + len <= vma->vm_start)
3492+ if (check_heap_stack_gap(vma, addr, len))
3493 return addr;
3494 addr = vma->vm_end;
3495 }
3496@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3497 /* At this point: (!vma || addr < vma->vm_end). */
3498 if (TASK_SIZE - len < addr)
3499 return -ENOMEM;
3500- if (!vma || addr + len <= vma->vm_start)
3501+ if (check_heap_stack_gap(vma, addr, len))
3502 return addr;
3503 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3504 if (addr < vma->vm_end) /* handle wraparound */
3505@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3506 if (flags & MAP_FIXED)
3507 return addr;
3508 if (!addr)
3509- addr = TASK_UNMAPPED_BASE;
3510+ addr = current->mm->mmap_base;
3511
3512 if (filp) {
3513 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3514diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3515index 45ba99f..8e22c33 100644
3516--- a/arch/parisc/kernel/traps.c
3517+++ b/arch/parisc/kernel/traps.c
3518@@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3519
3520 down_read(&current->mm->mmap_sem);
3521 vma = find_vma(current->mm,regs->iaoq[0]);
3522- if (vma && (regs->iaoq[0] >= vma->vm_start)
3523- && (vma->vm_flags & VM_EXEC)) {
3524-
3525+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3526 fault_address = regs->iaoq[0];
3527 fault_space = regs->iasq[0];
3528
3529diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3530index 18162ce..94de376 100644
3531--- a/arch/parisc/mm/fault.c
3532+++ b/arch/parisc/mm/fault.c
3533@@ -15,6 +15,7 @@
3534 #include <linux/sched.h>
3535 #include <linux/interrupt.h>
3536 #include <linux/module.h>
3537+#include <linux/unistd.h>
3538
3539 #include <asm/uaccess.h>
3540 #include <asm/traps.h>
3541@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3542 static unsigned long
3543 parisc_acctyp(unsigned long code, unsigned int inst)
3544 {
3545- if (code == 6 || code == 16)
3546+ if (code == 6 || code == 7 || code == 16)
3547 return VM_EXEC;
3548
3549 switch (inst & 0xf0000000) {
3550@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3551 }
3552 #endif
3553
3554+#ifdef CONFIG_PAX_PAGEEXEC
3555+/*
3556+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3557+ *
3558+ * returns 1 when task should be killed
3559+ * 2 when rt_sigreturn trampoline was detected
3560+ * 3 when unpatched PLT trampoline was detected
3561+ */
3562+static int pax_handle_fetch_fault(struct pt_regs *regs)
3563+{
3564+
3565+#ifdef CONFIG_PAX_EMUPLT
3566+ int err;
3567+
3568+ do { /* PaX: unpatched PLT emulation */
3569+ unsigned int bl, depwi;
3570+
3571+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3572+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3573+
3574+ if (err)
3575+ break;
3576+
3577+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3578+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3579+
3580+ err = get_user(ldw, (unsigned int *)addr);
3581+ err |= get_user(bv, (unsigned int *)(addr+4));
3582+ err |= get_user(ldw2, (unsigned int *)(addr+8));
3583+
3584+ if (err)
3585+ break;
3586+
3587+ if (ldw == 0x0E801096U &&
3588+ bv == 0xEAC0C000U &&
3589+ ldw2 == 0x0E881095U)
3590+ {
3591+ unsigned int resolver, map;
3592+
3593+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3594+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3595+ if (err)
3596+ break;
3597+
3598+ regs->gr[20] = instruction_pointer(regs)+8;
3599+ regs->gr[21] = map;
3600+ regs->gr[22] = resolver;
3601+ regs->iaoq[0] = resolver | 3UL;
3602+ regs->iaoq[1] = regs->iaoq[0] + 4;
3603+ return 3;
3604+ }
3605+ }
3606+ } while (0);
3607+#endif
3608+
3609+#ifdef CONFIG_PAX_EMUTRAMP
3610+
3611+#ifndef CONFIG_PAX_EMUSIGRT
3612+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3613+ return 1;
3614+#endif
3615+
3616+ do { /* PaX: rt_sigreturn emulation */
3617+ unsigned int ldi1, ldi2, bel, nop;
3618+
3619+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3620+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3621+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3622+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3623+
3624+ if (err)
3625+ break;
3626+
3627+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3628+ ldi2 == 0x3414015AU &&
3629+ bel == 0xE4008200U &&
3630+ nop == 0x08000240U)
3631+ {
3632+ regs->gr[25] = (ldi1 & 2) >> 1;
3633+ regs->gr[20] = __NR_rt_sigreturn;
3634+ regs->gr[31] = regs->iaoq[1] + 16;
3635+ regs->sr[0] = regs->iasq[1];
3636+ regs->iaoq[0] = 0x100UL;
3637+ regs->iaoq[1] = regs->iaoq[0] + 4;
3638+ regs->iasq[0] = regs->sr[2];
3639+ regs->iasq[1] = regs->sr[2];
3640+ return 2;
3641+ }
3642+ } while (0);
3643+#endif
3644+
3645+ return 1;
3646+}
3647+
3648+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3649+{
3650+ unsigned long i;
3651+
3652+ printk(KERN_ERR "PAX: bytes at PC: ");
3653+ for (i = 0; i < 5; i++) {
3654+ unsigned int c;
3655+ if (get_user(c, (unsigned int *)pc+i))
3656+ printk(KERN_CONT "???????? ");
3657+ else
3658+ printk(KERN_CONT "%08x ", c);
3659+ }
3660+ printk("\n");
3661+}
3662+#endif
3663+
3664 int fixup_exception(struct pt_regs *regs)
3665 {
3666 const struct exception_table_entry *fix;
3667@@ -192,8 +303,33 @@ good_area:
3668
3669 acc_type = parisc_acctyp(code,regs->iir);
3670
3671- if ((vma->vm_flags & acc_type) != acc_type)
3672+ if ((vma->vm_flags & acc_type) != acc_type) {
3673+
3674+#ifdef CONFIG_PAX_PAGEEXEC
3675+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3676+ (address & ~3UL) == instruction_pointer(regs))
3677+ {
3678+ up_read(&mm->mmap_sem);
3679+ switch (pax_handle_fetch_fault(regs)) {
3680+
3681+#ifdef CONFIG_PAX_EMUPLT
3682+ case 3:
3683+ return;
3684+#endif
3685+
3686+#ifdef CONFIG_PAX_EMUTRAMP
3687+ case 2:
3688+ return;
3689+#endif
3690+
3691+ }
3692+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3693+ do_group_exit(SIGKILL);
3694+ }
3695+#endif
3696+
3697 goto bad_area;
3698+ }
3699
3700 /*
3701 * If for any reason at all we couldn't handle the fault, make
3702diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3703index da29032..f76c24c 100644
3704--- a/arch/powerpc/include/asm/atomic.h
3705+++ b/arch/powerpc/include/asm/atomic.h
3706@@ -522,6 +522,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
3707 return t1;
3708 }
3709
3710+#define atomic64_read_unchecked(v) atomic64_read(v)
3711+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3712+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3713+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3714+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3715+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3716+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3717+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3718+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3719+
3720 #endif /* __powerpc64__ */
3721
3722 #endif /* __KERNEL__ */
3723diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3724index 9e495c9..b6878e5 100644
3725--- a/arch/powerpc/include/asm/cache.h
3726+++ b/arch/powerpc/include/asm/cache.h
3727@@ -3,6 +3,7 @@
3728
3729 #ifdef __KERNEL__
3730
3731+#include <linux/const.h>
3732
3733 /* bytes per L1 cache line */
3734 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3735@@ -22,7 +23,7 @@
3736 #define L1_CACHE_SHIFT 7
3737 #endif
3738
3739-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3740+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3741
3742 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3743
3744diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3745index 3bf9cca..e7457d0 100644
3746--- a/arch/powerpc/include/asm/elf.h
3747+++ b/arch/powerpc/include/asm/elf.h
3748@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3749 the loader. We need to make sure that it is out of the way of the program
3750 that it will "exec", and that there is sufficient room for the brk. */
3751
3752-extern unsigned long randomize_et_dyn(unsigned long base);
3753-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3754+#define ELF_ET_DYN_BASE (0x20000000)
3755+
3756+#ifdef CONFIG_PAX_ASLR
3757+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3758+
3759+#ifdef __powerpc64__
3760+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3761+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
3762+#else
3763+#define PAX_DELTA_MMAP_LEN 15
3764+#define PAX_DELTA_STACK_LEN 15
3765+#endif
3766+#endif
3767
3768 /*
3769 * Our registers are always unsigned longs, whether we're a 32 bit
3770@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3771 (0x7ff >> (PAGE_SHIFT - 12)) : \
3772 (0x3ffff >> (PAGE_SHIFT - 12)))
3773
3774-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3775-#define arch_randomize_brk arch_randomize_brk
3776-
3777 #endif /* __KERNEL__ */
3778
3779 /*
3780diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
3781index 8196e9c..d83a9f3 100644
3782--- a/arch/powerpc/include/asm/exec.h
3783+++ b/arch/powerpc/include/asm/exec.h
3784@@ -4,6 +4,6 @@
3785 #ifndef _ASM_POWERPC_EXEC_H
3786 #define _ASM_POWERPC_EXEC_H
3787
3788-extern unsigned long arch_align_stack(unsigned long sp);
3789+#define arch_align_stack(x) ((x) & ~0xfUL)
3790
3791 #endif /* _ASM_POWERPC_EXEC_H */
3792diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3793index bca8fdc..61e9580 100644
3794--- a/arch/powerpc/include/asm/kmap_types.h
3795+++ b/arch/powerpc/include/asm/kmap_types.h
3796@@ -27,6 +27,7 @@ enum km_type {
3797 KM_PPC_SYNC_PAGE,
3798 KM_PPC_SYNC_ICACHE,
3799 KM_KDB,
3800+ KM_CLEARPAGE,
3801 KM_TYPE_NR
3802 };
3803
3804diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3805index d4a7f64..451de1c 100644
3806--- a/arch/powerpc/include/asm/mman.h
3807+++ b/arch/powerpc/include/asm/mman.h
3808@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
3809 }
3810 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3811
3812-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3813+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3814 {
3815 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3816 }
3817diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3818index f072e97..b436dee 100644
3819--- a/arch/powerpc/include/asm/page.h
3820+++ b/arch/powerpc/include/asm/page.h
3821@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
3822 * and needs to be executable. This means the whole heap ends
3823 * up being executable.
3824 */
3825-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3826- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3827+#define VM_DATA_DEFAULT_FLAGS32 \
3828+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3829+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3830
3831 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3832 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3833@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
3834 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3835 #endif
3836
3837+#define ktla_ktva(addr) (addr)
3838+#define ktva_ktla(addr) (addr)
3839+
3840 /*
3841 * Use the top bit of the higher-level page table entries to indicate whether
3842 * the entries we point to contain hugepages. This works because we know that
3843diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3844index fed85e6..da5c71b 100644
3845--- a/arch/powerpc/include/asm/page_64.h
3846+++ b/arch/powerpc/include/asm/page_64.h
3847@@ -146,15 +146,18 @@ do { \
3848 * stack by default, so in the absence of a PT_GNU_STACK program header
3849 * we turn execute permission off.
3850 */
3851-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3852- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3853+#define VM_STACK_DEFAULT_FLAGS32 \
3854+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3855+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3856
3857 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3858 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3859
3860+#ifndef CONFIG_PAX_PAGEEXEC
3861 #define VM_STACK_DEFAULT_FLAGS \
3862 (is_32bit_task() ? \
3863 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3864+#endif
3865
3866 #include <asm-generic/getorder.h>
3867
3868diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
3869index 292725c..f87ae14 100644
3870--- a/arch/powerpc/include/asm/pgalloc-64.h
3871+++ b/arch/powerpc/include/asm/pgalloc-64.h
3872@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
3873 #ifndef CONFIG_PPC_64K_PAGES
3874
3875 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
3876+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
3877
3878 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
3879 {
3880@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3881 pud_set(pud, (unsigned long)pmd);
3882 }
3883
3884+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3885+{
3886+ pud_populate(mm, pud, pmd);
3887+}
3888+
3889 #define pmd_populate(mm, pmd, pte_page) \
3890 pmd_populate_kernel(mm, pmd, page_address(pte_page))
3891 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
3892@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3893 #else /* CONFIG_PPC_64K_PAGES */
3894
3895 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
3896+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
3897
3898 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
3899 pte_t *pte)
3900diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3901index 2e0e411..7899c68 100644
3902--- a/arch/powerpc/include/asm/pgtable.h
3903+++ b/arch/powerpc/include/asm/pgtable.h
3904@@ -2,6 +2,7 @@
3905 #define _ASM_POWERPC_PGTABLE_H
3906 #ifdef __KERNEL__
3907
3908+#include <linux/const.h>
3909 #ifndef __ASSEMBLY__
3910 #include <asm/processor.h> /* For TASK_SIZE */
3911 #include <asm/mmu.h>
3912diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3913index 4aad413..85d86bf 100644
3914--- a/arch/powerpc/include/asm/pte-hash32.h
3915+++ b/arch/powerpc/include/asm/pte-hash32.h
3916@@ -21,6 +21,7 @@
3917 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3918 #define _PAGE_USER 0x004 /* usermode access allowed */
3919 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3920+#define _PAGE_EXEC _PAGE_GUARDED
3921 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3922 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3923 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3924diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3925index 9d7f0fb..a28fe69 100644
3926--- a/arch/powerpc/include/asm/reg.h
3927+++ b/arch/powerpc/include/asm/reg.h
3928@@ -212,6 +212,7 @@
3929 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3930 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3931 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3932+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3933 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3934 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3935 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3936diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
3937index 4a741c7..c8162227b 100644
3938--- a/arch/powerpc/include/asm/thread_info.h
3939+++ b/arch/powerpc/include/asm/thread_info.h
3940@@ -104,12 +104,14 @@ static inline struct thread_info *current_thread_info(void)
3941 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
3942 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
3943 #define TIF_SINGLESTEP 8 /* singlestepping active */
3944-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
3945 #define TIF_SECCOMP 10 /* secure computing */
3946 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
3947 #define TIF_NOERROR 12 /* Force successful syscall return */
3948 #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
3949 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
3950+#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
3951+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
3952+#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
3953
3954 /* as above, but as bit values */
3955 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
3956@@ -127,8 +129,11 @@ static inline struct thread_info *current_thread_info(void)
3957 #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
3958 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
3959 #define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
3960+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
3961+
3962 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
3963- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
3964+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT \
3965+ _TIF_GRSEC_SETXID)
3966
3967 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
3968 _TIF_NOTIFY_RESUME)
3969diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3970index bd0fb84..a42a14b 100644
3971--- a/arch/powerpc/include/asm/uaccess.h
3972+++ b/arch/powerpc/include/asm/uaccess.h
3973@@ -13,6 +13,8 @@
3974 #define VERIFY_READ 0
3975 #define VERIFY_WRITE 1
3976
3977+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3978+
3979 /*
3980 * The fs value determines whether argument validity checking should be
3981 * performed or not. If get_fs() == USER_DS, checking is performed, with
3982@@ -327,52 +329,6 @@ do { \
3983 extern unsigned long __copy_tofrom_user(void __user *to,
3984 const void __user *from, unsigned long size);
3985
3986-#ifndef __powerpc64__
3987-
3988-static inline unsigned long copy_from_user(void *to,
3989- const void __user *from, unsigned long n)
3990-{
3991- unsigned long over;
3992-
3993- if (access_ok(VERIFY_READ, from, n))
3994- return __copy_tofrom_user((__force void __user *)to, from, n);
3995- if ((unsigned long)from < TASK_SIZE) {
3996- over = (unsigned long)from + n - TASK_SIZE;
3997- return __copy_tofrom_user((__force void __user *)to, from,
3998- n - over) + over;
3999- }
4000- return n;
4001-}
4002-
4003-static inline unsigned long copy_to_user(void __user *to,
4004- const void *from, unsigned long n)
4005-{
4006- unsigned long over;
4007-
4008- if (access_ok(VERIFY_WRITE, to, n))
4009- return __copy_tofrom_user(to, (__force void __user *)from, n);
4010- if ((unsigned long)to < TASK_SIZE) {
4011- over = (unsigned long)to + n - TASK_SIZE;
4012- return __copy_tofrom_user(to, (__force void __user *)from,
4013- n - over) + over;
4014- }
4015- return n;
4016-}
4017-
4018-#else /* __powerpc64__ */
4019-
4020-#define __copy_in_user(to, from, size) \
4021- __copy_tofrom_user((to), (from), (size))
4022-
4023-extern unsigned long copy_from_user(void *to, const void __user *from,
4024- unsigned long n);
4025-extern unsigned long copy_to_user(void __user *to, const void *from,
4026- unsigned long n);
4027-extern unsigned long copy_in_user(void __user *to, const void __user *from,
4028- unsigned long n);
4029-
4030-#endif /* __powerpc64__ */
4031-
4032 static inline unsigned long __copy_from_user_inatomic(void *to,
4033 const void __user *from, unsigned long n)
4034 {
4035@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
4036 if (ret == 0)
4037 return 0;
4038 }
4039+
4040+ if (!__builtin_constant_p(n))
4041+ check_object_size(to, n, false);
4042+
4043 return __copy_tofrom_user((__force void __user *)to, from, n);
4044 }
4045
4046@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
4047 if (ret == 0)
4048 return 0;
4049 }
4050+
4051+ if (!__builtin_constant_p(n))
4052+ check_object_size(from, n, true);
4053+
4054 return __copy_tofrom_user(to, (__force const void __user *)from, n);
4055 }
4056
4057@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
4058 return __copy_to_user_inatomic(to, from, size);
4059 }
4060
4061+#ifndef __powerpc64__
4062+
4063+static inline unsigned long __must_check copy_from_user(void *to,
4064+ const void __user *from, unsigned long n)
4065+{
4066+ unsigned long over;
4067+
4068+ if ((long)n < 0)
4069+ return n;
4070+
4071+ if (access_ok(VERIFY_READ, from, n)) {
4072+ if (!__builtin_constant_p(n))
4073+ check_object_size(to, n, false);
4074+ return __copy_tofrom_user((__force void __user *)to, from, n);
4075+ }
4076+ if ((unsigned long)from < TASK_SIZE) {
4077+ over = (unsigned long)from + n - TASK_SIZE;
4078+ if (!__builtin_constant_p(n - over))
4079+ check_object_size(to, n - over, false);
4080+ return __copy_tofrom_user((__force void __user *)to, from,
4081+ n - over) + over;
4082+ }
4083+ return n;
4084+}
4085+
4086+static inline unsigned long __must_check copy_to_user(void __user *to,
4087+ const void *from, unsigned long n)
4088+{
4089+ unsigned long over;
4090+
4091+ if ((long)n < 0)
4092+ return n;
4093+
4094+ if (access_ok(VERIFY_WRITE, to, n)) {
4095+ if (!__builtin_constant_p(n))
4096+ check_object_size(from, n, true);
4097+ return __copy_tofrom_user(to, (__force void __user *)from, n);
4098+ }
4099+ if ((unsigned long)to < TASK_SIZE) {
4100+ over = (unsigned long)to + n - TASK_SIZE;
4101+ if (!__builtin_constant_p(n))
4102+ check_object_size(from, n - over, true);
4103+ return __copy_tofrom_user(to, (__force void __user *)from,
4104+ n - over) + over;
4105+ }
4106+ return n;
4107+}
4108+
4109+#else /* __powerpc64__ */
4110+
4111+#define __copy_in_user(to, from, size) \
4112+ __copy_tofrom_user((to), (from), (size))
4113+
4114+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
4115+{
4116+ if ((long)n < 0 || n > INT_MAX)
4117+ return n;
4118+
4119+ if (!__builtin_constant_p(n))
4120+ check_object_size(to, n, false);
4121+
4122+ if (likely(access_ok(VERIFY_READ, from, n)))
4123+ n = __copy_from_user(to, from, n);
4124+ else
4125+ memset(to, 0, n);
4126+ return n;
4127+}
4128+
4129+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
4130+{
4131+ if ((long)n < 0 || n > INT_MAX)
4132+ return n;
4133+
4134+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
4135+ if (!__builtin_constant_p(n))
4136+ check_object_size(from, n, true);
4137+ n = __copy_to_user(to, from, n);
4138+ }
4139+ return n;
4140+}
4141+
4142+extern unsigned long copy_in_user(void __user *to, const void __user *from,
4143+ unsigned long n);
4144+
4145+#endif /* __powerpc64__ */
4146+
4147 extern unsigned long __clear_user(void __user *addr, unsigned long size);
4148
4149 static inline unsigned long clear_user(void __user *addr, unsigned long size)
4150diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
4151index 7215cc2..a9730c1 100644
4152--- a/arch/powerpc/kernel/exceptions-64e.S
4153+++ b/arch/powerpc/kernel/exceptions-64e.S
4154@@ -661,6 +661,7 @@ storage_fault_common:
4155 std r14,_DAR(r1)
4156 std r15,_DSISR(r1)
4157 addi r3,r1,STACK_FRAME_OVERHEAD
4158+ bl .save_nvgprs
4159 mr r4,r14
4160 mr r5,r15
4161 ld r14,PACA_EXGEN+EX_R14(r13)
4162@@ -669,8 +670,7 @@ storage_fault_common:
4163 cmpdi r3,0
4164 bne- 1f
4165 b .ret_from_except_lite
4166-1: bl .save_nvgprs
4167- mr r5,r3
4168+1: mr r5,r3
4169 addi r3,r1,STACK_FRAME_OVERHEAD
4170 ld r4,_DAR(r1)
4171 bl .bad_page_fault
4172diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
4173index 8f880bc..c5bd2f3 100644
4174--- a/arch/powerpc/kernel/exceptions-64s.S
4175+++ b/arch/powerpc/kernel/exceptions-64s.S
4176@@ -890,10 +890,10 @@ handle_page_fault:
4177 11: ld r4,_DAR(r1)
4178 ld r5,_DSISR(r1)
4179 addi r3,r1,STACK_FRAME_OVERHEAD
4180+ bl .save_nvgprs
4181 bl .do_page_fault
4182 cmpdi r3,0
4183 beq+ 12f
4184- bl .save_nvgprs
4185 mr r5,r3
4186 addi r3,r1,STACK_FRAME_OVERHEAD
4187 lwz r4,_DAR(r1)
4188diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
4189index 2e3200c..72095ce 100644
4190--- a/arch/powerpc/kernel/module_32.c
4191+++ b/arch/powerpc/kernel/module_32.c
4192@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
4193 me->arch.core_plt_section = i;
4194 }
4195 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
4196- printk("Module doesn't contain .plt or .init.plt sections.\n");
4197+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
4198 return -ENOEXEC;
4199 }
4200
4201@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
4202
4203 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
4204 /* Init, or core PLT? */
4205- if (location >= mod->module_core
4206- && location < mod->module_core + mod->core_size)
4207+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
4208+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
4209 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
4210- else
4211+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
4212+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
4213 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
4214+ else {
4215+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
4216+ return ~0UL;
4217+ }
4218
4219 /* Find this entry, or if that fails, the next avail. entry */
4220 while (entry->jump[0]) {
4221diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
4222index 4937c96..70714b7 100644
4223--- a/arch/powerpc/kernel/process.c
4224+++ b/arch/powerpc/kernel/process.c
4225@@ -681,8 +681,8 @@ void show_regs(struct pt_regs * regs)
4226 * Lookup NIP late so we have the best change of getting the
4227 * above info out without failing
4228 */
4229- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
4230- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
4231+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
4232+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
4233 #endif
4234 show_stack(current, (unsigned long *) regs->gpr[1]);
4235 if (!user_mode(regs))
4236@@ -1186,10 +1186,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4237 newsp = stack[0];
4238 ip = stack[STACK_FRAME_LR_SAVE];
4239 if (!firstframe || ip != lr) {
4240- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
4241+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
4242 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4243 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
4244- printk(" (%pS)",
4245+ printk(" (%pA)",
4246 (void *)current->ret_stack[curr_frame].ret);
4247 curr_frame--;
4248 }
4249@@ -1209,7 +1209,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4250 struct pt_regs *regs = (struct pt_regs *)
4251 (sp + STACK_FRAME_OVERHEAD);
4252 lr = regs->link;
4253- printk("--- Exception: %lx at %pS\n LR = %pS\n",
4254+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
4255 regs->trap, (void *)regs->nip, (void *)lr);
4256 firstframe = 1;
4257 }
4258@@ -1282,58 +1282,3 @@ void thread_info_cache_init(void)
4259 }
4260
4261 #endif /* THREAD_SHIFT < PAGE_SHIFT */
4262-
4263-unsigned long arch_align_stack(unsigned long sp)
4264-{
4265- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4266- sp -= get_random_int() & ~PAGE_MASK;
4267- return sp & ~0xf;
4268-}
4269-
4270-static inline unsigned long brk_rnd(void)
4271-{
4272- unsigned long rnd = 0;
4273-
4274- /* 8MB for 32bit, 1GB for 64bit */
4275- if (is_32bit_task())
4276- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4277- else
4278- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4279-
4280- return rnd << PAGE_SHIFT;
4281-}
4282-
4283-unsigned long arch_randomize_brk(struct mm_struct *mm)
4284-{
4285- unsigned long base = mm->brk;
4286- unsigned long ret;
4287-
4288-#ifdef CONFIG_PPC_STD_MMU_64
4289- /*
4290- * If we are using 1TB segments and we are allowed to randomise
4291- * the heap, we can put it above 1TB so it is backed by a 1TB
4292- * segment. Otherwise the heap will be in the bottom 1TB
4293- * which always uses 256MB segments and this may result in a
4294- * performance penalty.
4295- */
4296- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4297- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4298-#endif
4299-
4300- ret = PAGE_ALIGN(base + brk_rnd());
4301-
4302- if (ret < mm->brk)
4303- return mm->brk;
4304-
4305- return ret;
4306-}
4307-
4308-unsigned long randomize_et_dyn(unsigned long base)
4309-{
4310- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4311-
4312- if (ret < base)
4313- return base;
4314-
4315- return ret;
4316-}
4317diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
4318index 8d8e028..c2aeb50 100644
4319--- a/arch/powerpc/kernel/ptrace.c
4320+++ b/arch/powerpc/kernel/ptrace.c
4321@@ -1702,6 +1702,10 @@ long arch_ptrace(struct task_struct *child, long request,
4322 return ret;
4323 }
4324
4325+#ifdef CONFIG_GRKERNSEC_SETXID
4326+extern void gr_delayed_cred_worker(void);
4327+#endif
4328+
4329 /*
4330 * We must return the syscall number to actually look up in the table.
4331 * This can be -1L to skip running any syscall at all.
4332@@ -1712,6 +1716,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
4333
4334 secure_computing(regs->gpr[0]);
4335
4336+#ifdef CONFIG_GRKERNSEC_SETXID
4337+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4338+ gr_delayed_cred_worker();
4339+#endif
4340+
4341 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
4342 tracehook_report_syscall_entry(regs))
4343 /*
4344@@ -1746,6 +1755,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
4345 {
4346 int step;
4347
4348+#ifdef CONFIG_GRKERNSEC_SETXID
4349+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4350+ gr_delayed_cred_worker();
4351+#endif
4352+
4353 audit_syscall_exit(regs);
4354
4355 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
4356diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
4357index 45eb998..0cb36bc 100644
4358--- a/arch/powerpc/kernel/signal_32.c
4359+++ b/arch/powerpc/kernel/signal_32.c
4360@@ -861,7 +861,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
4361 /* Save user registers on the stack */
4362 frame = &rt_sf->uc.uc_mcontext;
4363 addr = frame;
4364- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
4365+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4366 if (save_user_regs(regs, frame, 0, 1))
4367 goto badframe;
4368 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
4369diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
4370index 2692efd..6673d2e 100644
4371--- a/arch/powerpc/kernel/signal_64.c
4372+++ b/arch/powerpc/kernel/signal_64.c
4373@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
4374 current->thread.fpscr.val = 0;
4375
4376 /* Set up to return from userspace. */
4377- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
4378+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4379 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
4380 } else {
4381 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
4382diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
4383index 1589723..cefe690 100644
4384--- a/arch/powerpc/kernel/traps.c
4385+++ b/arch/powerpc/kernel/traps.c
4386@@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
4387 return flags;
4388 }
4389
4390+extern void gr_handle_kernel_exploit(void);
4391+
4392 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4393 int signr)
4394 {
4395@@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4396 panic("Fatal exception in interrupt");
4397 if (panic_on_oops)
4398 panic("Fatal exception");
4399+
4400+ gr_handle_kernel_exploit();
4401+
4402 do_exit(signr);
4403 }
4404
4405diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
4406index 9eb5b9b..e45498a 100644
4407--- a/arch/powerpc/kernel/vdso.c
4408+++ b/arch/powerpc/kernel/vdso.c
4409@@ -34,6 +34,7 @@
4410 #include <asm/firmware.h>
4411 #include <asm/vdso.h>
4412 #include <asm/vdso_datapage.h>
4413+#include <asm/mman.h>
4414
4415 #include "setup.h"
4416
4417@@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4418 vdso_base = VDSO32_MBASE;
4419 #endif
4420
4421- current->mm->context.vdso_base = 0;
4422+ current->mm->context.vdso_base = ~0UL;
4423
4424 /* vDSO has a problem and was disabled, just don't "enable" it for the
4425 * process
4426@@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4427 vdso_base = get_unmapped_area(NULL, vdso_base,
4428 (vdso_pages << PAGE_SHIFT) +
4429 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
4430- 0, 0);
4431+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
4432 if (IS_ERR_VALUE(vdso_base)) {
4433 rc = vdso_base;
4434 goto fail_mmapsem;
4435diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4436index 5eea6f3..5d10396 100644
4437--- a/arch/powerpc/lib/usercopy_64.c
4438+++ b/arch/powerpc/lib/usercopy_64.c
4439@@ -9,22 +9,6 @@
4440 #include <linux/module.h>
4441 #include <asm/uaccess.h>
4442
4443-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4444-{
4445- if (likely(access_ok(VERIFY_READ, from, n)))
4446- n = __copy_from_user(to, from, n);
4447- else
4448- memset(to, 0, n);
4449- return n;
4450-}
4451-
4452-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4453-{
4454- if (likely(access_ok(VERIFY_WRITE, to, n)))
4455- n = __copy_to_user(to, from, n);
4456- return n;
4457-}
4458-
4459 unsigned long copy_in_user(void __user *to, const void __user *from,
4460 unsigned long n)
4461 {
4462@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4463 return n;
4464 }
4465
4466-EXPORT_SYMBOL(copy_from_user);
4467-EXPORT_SYMBOL(copy_to_user);
4468 EXPORT_SYMBOL(copy_in_user);
4469
4470diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4471index 08ffcf5..a0ab912 100644
4472--- a/arch/powerpc/mm/fault.c
4473+++ b/arch/powerpc/mm/fault.c
4474@@ -32,6 +32,10 @@
4475 #include <linux/perf_event.h>
4476 #include <linux/magic.h>
4477 #include <linux/ratelimit.h>
4478+#include <linux/slab.h>
4479+#include <linux/pagemap.h>
4480+#include <linux/compiler.h>
4481+#include <linux/unistd.h>
4482
4483 #include <asm/firmware.h>
4484 #include <asm/page.h>
4485@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4486 }
4487 #endif
4488
4489+#ifdef CONFIG_PAX_PAGEEXEC
4490+/*
4491+ * PaX: decide what to do with offenders (regs->nip = fault address)
4492+ *
4493+ * returns 1 when task should be killed
4494+ */
4495+static int pax_handle_fetch_fault(struct pt_regs *regs)
4496+{
4497+ return 1;
4498+}
4499+
4500+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4501+{
4502+ unsigned long i;
4503+
4504+ printk(KERN_ERR "PAX: bytes at PC: ");
4505+ for (i = 0; i < 5; i++) {
4506+ unsigned int c;
4507+ if (get_user(c, (unsigned int __user *)pc+i))
4508+ printk(KERN_CONT "???????? ");
4509+ else
4510+ printk(KERN_CONT "%08x ", c);
4511+ }
4512+ printk("\n");
4513+}
4514+#endif
4515+
4516 /*
4517 * Check whether the instruction at regs->nip is a store using
4518 * an update addressing form which will update r1.
4519@@ -215,7 +246,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4520 * indicate errors in DSISR but can validly be set in SRR1.
4521 */
4522 if (trap == 0x400)
4523- error_code &= 0x48200000;
4524+ error_code &= 0x58200000;
4525 else
4526 is_write = error_code & DSISR_ISSTORE;
4527 #else
4528@@ -366,7 +397,7 @@ good_area:
4529 * "undefined". Of those that can be set, this is the only
4530 * one which seems bad.
4531 */
4532- if (error_code & 0x10000000)
4533+ if (error_code & DSISR_GUARDED)
4534 /* Guarded storage error. */
4535 goto bad_area;
4536 #endif /* CONFIG_8xx */
4537@@ -381,7 +412,7 @@ good_area:
4538 * processors use the same I/D cache coherency mechanism
4539 * as embedded.
4540 */
4541- if (error_code & DSISR_PROTFAULT)
4542+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4543 goto bad_area;
4544 #endif /* CONFIG_PPC_STD_MMU */
4545
4546@@ -463,6 +494,23 @@ bad_area:
4547 bad_area_nosemaphore:
4548 /* User mode accesses cause a SIGSEGV */
4549 if (user_mode(regs)) {
4550+
4551+#ifdef CONFIG_PAX_PAGEEXEC
4552+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4553+#ifdef CONFIG_PPC_STD_MMU
4554+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4555+#else
4556+ if (is_exec && regs->nip == address) {
4557+#endif
4558+ switch (pax_handle_fetch_fault(regs)) {
4559+ }
4560+
4561+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4562+ do_group_exit(SIGKILL);
4563+ }
4564+ }
4565+#endif
4566+
4567 _exception(SIGSEGV, regs, code, address);
4568 return 0;
4569 }
4570diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4571index 67a42ed..1c7210c 100644
4572--- a/arch/powerpc/mm/mmap_64.c
4573+++ b/arch/powerpc/mm/mmap_64.c
4574@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4575 */
4576 if (mmap_is_legacy()) {
4577 mm->mmap_base = TASK_UNMAPPED_BASE;
4578+
4579+#ifdef CONFIG_PAX_RANDMMAP
4580+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4581+ mm->mmap_base += mm->delta_mmap;
4582+#endif
4583+
4584 mm->get_unmapped_area = arch_get_unmapped_area;
4585 mm->unmap_area = arch_unmap_area;
4586 } else {
4587 mm->mmap_base = mmap_base();
4588+
4589+#ifdef CONFIG_PAX_RANDMMAP
4590+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4591+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4592+#endif
4593+
4594 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4595 mm->unmap_area = arch_unmap_area_topdown;
4596 }
4597diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4598index 73709f7..6b90313 100644
4599--- a/arch/powerpc/mm/slice.c
4600+++ b/arch/powerpc/mm/slice.c
4601@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4602 if ((mm->task_size - len) < addr)
4603 return 0;
4604 vma = find_vma(mm, addr);
4605- return (!vma || (addr + len) <= vma->vm_start);
4606+ return check_heap_stack_gap(vma, addr, len);
4607 }
4608
4609 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4610@@ -256,7 +256,7 @@ full_search:
4611 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4612 continue;
4613 }
4614- if (!vma || addr + len <= vma->vm_start) {
4615+ if (check_heap_stack_gap(vma, addr, len)) {
4616 /*
4617 * Remember the place where we stopped the search:
4618 */
4619@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4620 }
4621 }
4622
4623- addr = mm->mmap_base;
4624- while (addr > len) {
4625+ if (mm->mmap_base < len)
4626+ addr = -ENOMEM;
4627+ else
4628+ addr = mm->mmap_base - len;
4629+
4630+ while (!IS_ERR_VALUE(addr)) {
4631 /* Go down by chunk size */
4632- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4633+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
4634
4635 /* Check for hit with different page size */
4636 mask = slice_range_to_mask(addr, len);
4637@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4638 * return with success:
4639 */
4640 vma = find_vma(mm, addr);
4641- if (!vma || (addr + len) <= vma->vm_start) {
4642+ if (check_heap_stack_gap(vma, addr, len)) {
4643 /* remember the address as a hint for next time */
4644 if (use_cache)
4645 mm->free_area_cache = addr;
4646@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4647 mm->cached_hole_size = vma->vm_start - addr;
4648
4649 /* try just below the current vma->vm_start */
4650- addr = vma->vm_start;
4651+ addr = skip_heap_stack_gap(vma, len);
4652 }
4653
4654 /*
4655@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4656 if (fixed && addr > (mm->task_size - len))
4657 return -EINVAL;
4658
4659+#ifdef CONFIG_PAX_RANDMMAP
4660+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4661+ addr = 0;
4662+#endif
4663+
4664 /* If hint, make sure it matches our alignment restrictions */
4665 if (!fixed && addr) {
4666 addr = _ALIGN_UP(addr, 1ul << pshift);
4667diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4668index 748347b..81bc6c7 100644
4669--- a/arch/s390/include/asm/atomic.h
4670+++ b/arch/s390/include/asm/atomic.h
4671@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4672 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4673 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4674
4675+#define atomic64_read_unchecked(v) atomic64_read(v)
4676+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4677+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4678+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4679+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4680+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4681+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4682+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4683+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4684+
4685 #define smp_mb__before_atomic_dec() smp_mb()
4686 #define smp_mb__after_atomic_dec() smp_mb()
4687 #define smp_mb__before_atomic_inc() smp_mb()
4688diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4689index 2a30d5a..5e5586f 100644
4690--- a/arch/s390/include/asm/cache.h
4691+++ b/arch/s390/include/asm/cache.h
4692@@ -11,8 +11,10 @@
4693 #ifndef __ARCH_S390_CACHE_H
4694 #define __ARCH_S390_CACHE_H
4695
4696-#define L1_CACHE_BYTES 256
4697+#include <linux/const.h>
4698+
4699 #define L1_CACHE_SHIFT 8
4700+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4701 #define NET_SKB_PAD 32
4702
4703 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4704diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4705index c4ee39f..352881b 100644
4706--- a/arch/s390/include/asm/elf.h
4707+++ b/arch/s390/include/asm/elf.h
4708@@ -161,8 +161,14 @@ extern unsigned int vdso_enabled;
4709 the loader. We need to make sure that it is out of the way of the program
4710 that it will "exec", and that there is sufficient room for the brk. */
4711
4712-extern unsigned long randomize_et_dyn(unsigned long base);
4713-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4714+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4715+
4716+#ifdef CONFIG_PAX_ASLR
4717+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4718+
4719+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4720+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4721+#endif
4722
4723 /* This yields a mask that user programs can use to figure out what
4724 instruction set this CPU supports. */
4725@@ -210,7 +216,4 @@ struct linux_binprm;
4726 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4727 int arch_setup_additional_pages(struct linux_binprm *, int);
4728
4729-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4730-#define arch_randomize_brk arch_randomize_brk
4731-
4732 #endif
4733diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
4734index c4a93d6..4d2a9b4 100644
4735--- a/arch/s390/include/asm/exec.h
4736+++ b/arch/s390/include/asm/exec.h
4737@@ -7,6 +7,6 @@
4738 #ifndef __ASM_EXEC_H
4739 #define __ASM_EXEC_H
4740
4741-extern unsigned long arch_align_stack(unsigned long sp);
4742+#define arch_align_stack(x) ((x) & ~0xfUL)
4743
4744 #endif /* __ASM_EXEC_H */
4745diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4746index 8f2cada..43072c1 100644
4747--- a/arch/s390/include/asm/uaccess.h
4748+++ b/arch/s390/include/asm/uaccess.h
4749@@ -236,6 +236,10 @@ static inline unsigned long __must_check
4750 copy_to_user(void __user *to, const void *from, unsigned long n)
4751 {
4752 might_fault();
4753+
4754+ if ((long)n < 0)
4755+ return n;
4756+
4757 if (access_ok(VERIFY_WRITE, to, n))
4758 n = __copy_to_user(to, from, n);
4759 return n;
4760@@ -261,6 +265,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4761 static inline unsigned long __must_check
4762 __copy_from_user(void *to, const void __user *from, unsigned long n)
4763 {
4764+ if ((long)n < 0)
4765+ return n;
4766+
4767 if (__builtin_constant_p(n) && (n <= 256))
4768 return uaccess.copy_from_user_small(n, from, to);
4769 else
4770@@ -292,10 +299,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
4771 static inline unsigned long __must_check
4772 copy_from_user(void *to, const void __user *from, unsigned long n)
4773 {
4774- unsigned int sz = __compiletime_object_size(to);
4775+ size_t sz = __compiletime_object_size(to);
4776
4777 might_fault();
4778- if (unlikely(sz != -1 && sz < n)) {
4779+
4780+ if ((long)n < 0)
4781+ return n;
4782+
4783+ if (unlikely(sz != (size_t)-1 && sz < n)) {
4784 copy_from_user_overflow();
4785 return n;
4786 }
4787diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4788index dfcb343..eda788a 100644
4789--- a/arch/s390/kernel/module.c
4790+++ b/arch/s390/kernel/module.c
4791@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4792
4793 /* Increase core size by size of got & plt and set start
4794 offsets for got and plt. */
4795- me->core_size = ALIGN(me->core_size, 4);
4796- me->arch.got_offset = me->core_size;
4797- me->core_size += me->arch.got_size;
4798- me->arch.plt_offset = me->core_size;
4799- me->core_size += me->arch.plt_size;
4800+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
4801+ me->arch.got_offset = me->core_size_rw;
4802+ me->core_size_rw += me->arch.got_size;
4803+ me->arch.plt_offset = me->core_size_rx;
4804+ me->core_size_rx += me->arch.plt_size;
4805 return 0;
4806 }
4807
4808@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4809 if (info->got_initialized == 0) {
4810 Elf_Addr *gotent;
4811
4812- gotent = me->module_core + me->arch.got_offset +
4813+ gotent = me->module_core_rw + me->arch.got_offset +
4814 info->got_offset;
4815 *gotent = val;
4816 info->got_initialized = 1;
4817@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4818 else if (r_type == R_390_GOTENT ||
4819 r_type == R_390_GOTPLTENT)
4820 *(unsigned int *) loc =
4821- (val + (Elf_Addr) me->module_core - loc) >> 1;
4822+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4823 else if (r_type == R_390_GOT64 ||
4824 r_type == R_390_GOTPLT64)
4825 *(unsigned long *) loc = val;
4826@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4827 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4828 if (info->plt_initialized == 0) {
4829 unsigned int *ip;
4830- ip = me->module_core + me->arch.plt_offset +
4831+ ip = me->module_core_rx + me->arch.plt_offset +
4832 info->plt_offset;
4833 #ifndef CONFIG_64BIT
4834 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4835@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4836 val - loc + 0xffffUL < 0x1ffffeUL) ||
4837 (r_type == R_390_PLT32DBL &&
4838 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4839- val = (Elf_Addr) me->module_core +
4840+ val = (Elf_Addr) me->module_core_rx +
4841 me->arch.plt_offset +
4842 info->plt_offset;
4843 val += rela->r_addend - loc;
4844@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4845 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4846 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4847 val = val + rela->r_addend -
4848- ((Elf_Addr) me->module_core + me->arch.got_offset);
4849+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4850 if (r_type == R_390_GOTOFF16)
4851 *(unsigned short *) loc = val;
4852 else if (r_type == R_390_GOTOFF32)
4853@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4854 break;
4855 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4856 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4857- val = (Elf_Addr) me->module_core + me->arch.got_offset +
4858+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4859 rela->r_addend - loc;
4860 if (r_type == R_390_GOTPC)
4861 *(unsigned int *) loc = val;
4862diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
4863index 60055ce..ee4b252 100644
4864--- a/arch/s390/kernel/process.c
4865+++ b/arch/s390/kernel/process.c
4866@@ -316,39 +316,3 @@ unsigned long get_wchan(struct task_struct *p)
4867 }
4868 return 0;
4869 }
4870-
4871-unsigned long arch_align_stack(unsigned long sp)
4872-{
4873- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4874- sp -= get_random_int() & ~PAGE_MASK;
4875- return sp & ~0xf;
4876-}
4877-
4878-static inline unsigned long brk_rnd(void)
4879-{
4880- /* 8MB for 32bit, 1GB for 64bit */
4881- if (is_32bit_task())
4882- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4883- else
4884- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4885-}
4886-
4887-unsigned long arch_randomize_brk(struct mm_struct *mm)
4888-{
4889- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4890-
4891- if (ret < mm->brk)
4892- return mm->brk;
4893- return ret;
4894-}
4895-
4896-unsigned long randomize_et_dyn(unsigned long base)
4897-{
4898- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4899-
4900- if (!(current->flags & PF_RANDOMIZE))
4901- return base;
4902- if (ret < base)
4903- return base;
4904- return ret;
4905-}
4906diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4907index 2857c48..d047481 100644
4908--- a/arch/s390/mm/mmap.c
4909+++ b/arch/s390/mm/mmap.c
4910@@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4911 */
4912 if (mmap_is_legacy()) {
4913 mm->mmap_base = TASK_UNMAPPED_BASE;
4914+
4915+#ifdef CONFIG_PAX_RANDMMAP
4916+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4917+ mm->mmap_base += mm->delta_mmap;
4918+#endif
4919+
4920 mm->get_unmapped_area = arch_get_unmapped_area;
4921 mm->unmap_area = arch_unmap_area;
4922 } else {
4923 mm->mmap_base = mmap_base();
4924+
4925+#ifdef CONFIG_PAX_RANDMMAP
4926+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4927+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4928+#endif
4929+
4930 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4931 mm->unmap_area = arch_unmap_area_topdown;
4932 }
4933@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4934 */
4935 if (mmap_is_legacy()) {
4936 mm->mmap_base = TASK_UNMAPPED_BASE;
4937+
4938+#ifdef CONFIG_PAX_RANDMMAP
4939+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4940+ mm->mmap_base += mm->delta_mmap;
4941+#endif
4942+
4943 mm->get_unmapped_area = s390_get_unmapped_area;
4944 mm->unmap_area = arch_unmap_area;
4945 } else {
4946 mm->mmap_base = mmap_base();
4947+
4948+#ifdef CONFIG_PAX_RANDMMAP
4949+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4950+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4951+#endif
4952+
4953 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4954 mm->unmap_area = arch_unmap_area_topdown;
4955 }
4956diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4957index ae3d59f..f65f075 100644
4958--- a/arch/score/include/asm/cache.h
4959+++ b/arch/score/include/asm/cache.h
4960@@ -1,7 +1,9 @@
4961 #ifndef _ASM_SCORE_CACHE_H
4962 #define _ASM_SCORE_CACHE_H
4963
4964+#include <linux/const.h>
4965+
4966 #define L1_CACHE_SHIFT 4
4967-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4968+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4969
4970 #endif /* _ASM_SCORE_CACHE_H */
4971diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
4972index f9f3cd5..58ff438 100644
4973--- a/arch/score/include/asm/exec.h
4974+++ b/arch/score/include/asm/exec.h
4975@@ -1,6 +1,6 @@
4976 #ifndef _ASM_SCORE_EXEC_H
4977 #define _ASM_SCORE_EXEC_H
4978
4979-extern unsigned long arch_align_stack(unsigned long sp);
4980+#define arch_align_stack(x) (x)
4981
4982 #endif /* _ASM_SCORE_EXEC_H */
4983diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4984index 2707023..1c2a3b7 100644
4985--- a/arch/score/kernel/process.c
4986+++ b/arch/score/kernel/process.c
4987@@ -159,8 +159,3 @@ unsigned long get_wchan(struct task_struct *task)
4988
4989 return task_pt_regs(task)->cp0_epc;
4990 }
4991-
4992-unsigned long arch_align_stack(unsigned long sp)
4993-{
4994- return sp;
4995-}
4996diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
4997index ef9e555..331bd29 100644
4998--- a/arch/sh/include/asm/cache.h
4999+++ b/arch/sh/include/asm/cache.h
5000@@ -9,10 +9,11 @@
5001 #define __ASM_SH_CACHE_H
5002 #ifdef __KERNEL__
5003
5004+#include <linux/const.h>
5005 #include <linux/init.h>
5006 #include <cpu/cache.h>
5007
5008-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5009+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5010
5011 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5012
5013diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
5014index afeb710..d1d1289 100644
5015--- a/arch/sh/mm/mmap.c
5016+++ b/arch/sh/mm/mmap.c
5017@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5018 addr = PAGE_ALIGN(addr);
5019
5020 vma = find_vma(mm, addr);
5021- if (TASK_SIZE - len >= addr &&
5022- (!vma || addr + len <= vma->vm_start))
5023+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5024 return addr;
5025 }
5026
5027@@ -106,7 +105,7 @@ full_search:
5028 }
5029 return -ENOMEM;
5030 }
5031- if (likely(!vma || addr + len <= vma->vm_start)) {
5032+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5033 /*
5034 * Remember the place where we stopped the search:
5035 */
5036@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5037 addr = PAGE_ALIGN(addr);
5038
5039 vma = find_vma(mm, addr);
5040- if (TASK_SIZE - len >= addr &&
5041- (!vma || addr + len <= vma->vm_start))
5042+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5043 return addr;
5044 }
5045
5046@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5047 /* make sure it can fit in the remaining address space */
5048 if (likely(addr > len)) {
5049 vma = find_vma(mm, addr-len);
5050- if (!vma || addr <= vma->vm_start) {
5051+ if (check_heap_stack_gap(vma, addr - len, len)) {
5052 /* remember the address as a hint for next time */
5053 return (mm->free_area_cache = addr-len);
5054 }
5055@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5056 if (unlikely(mm->mmap_base < len))
5057 goto bottomup;
5058
5059- addr = mm->mmap_base-len;
5060- if (do_colour_align)
5061- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5062+ addr = mm->mmap_base - len;
5063
5064 do {
5065+ if (do_colour_align)
5066+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5067 /*
5068 * Lookup failure means no vma is above this address,
5069 * else if new region fits below vma->vm_start,
5070 * return with success:
5071 */
5072 vma = find_vma(mm, addr);
5073- if (likely(!vma || addr+len <= vma->vm_start)) {
5074+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5075 /* remember the address as a hint for next time */
5076 return (mm->free_area_cache = addr);
5077 }
5078@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5079 mm->cached_hole_size = vma->vm_start - addr;
5080
5081 /* try just below the current vma->vm_start */
5082- addr = vma->vm_start-len;
5083- if (do_colour_align)
5084- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5085- } while (likely(len < vma->vm_start));
5086+ addr = skip_heap_stack_gap(vma, len);
5087+ } while (!IS_ERR_VALUE(addr));
5088
5089 bottomup:
5090 /*
5091diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
5092index eddcfb3..b117d90 100644
5093--- a/arch/sparc/Makefile
5094+++ b/arch/sparc/Makefile
5095@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
5096 # Export what is needed by arch/sparc/boot/Makefile
5097 export VMLINUX_INIT VMLINUX_MAIN
5098 VMLINUX_INIT := $(head-y) $(init-y)
5099-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5100+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5101 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5102 VMLINUX_MAIN += $(drivers-y) $(net-y)
5103
5104diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
5105index ce35a1c..2e7b8f9 100644
5106--- a/arch/sparc/include/asm/atomic_64.h
5107+++ b/arch/sparc/include/asm/atomic_64.h
5108@@ -14,18 +14,40 @@
5109 #define ATOMIC64_INIT(i) { (i) }
5110
5111 #define atomic_read(v) (*(volatile int *)&(v)->counter)
5112+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5113+{
5114+ return v->counter;
5115+}
5116 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
5117+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5118+{
5119+ return v->counter;
5120+}
5121
5122 #define atomic_set(v, i) (((v)->counter) = i)
5123+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5124+{
5125+ v->counter = i;
5126+}
5127 #define atomic64_set(v, i) (((v)->counter) = i)
5128+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5129+{
5130+ v->counter = i;
5131+}
5132
5133 extern void atomic_add(int, atomic_t *);
5134+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
5135 extern void atomic64_add(long, atomic64_t *);
5136+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
5137 extern void atomic_sub(int, atomic_t *);
5138+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
5139 extern void atomic64_sub(long, atomic64_t *);
5140+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
5141
5142 extern int atomic_add_ret(int, atomic_t *);
5143+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
5144 extern long atomic64_add_ret(long, atomic64_t *);
5145+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
5146 extern int atomic_sub_ret(int, atomic_t *);
5147 extern long atomic64_sub_ret(long, atomic64_t *);
5148
5149@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5150 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
5151
5152 #define atomic_inc_return(v) atomic_add_ret(1, v)
5153+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5154+{
5155+ return atomic_add_ret_unchecked(1, v);
5156+}
5157 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
5158+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5159+{
5160+ return atomic64_add_ret_unchecked(1, v);
5161+}
5162
5163 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
5164 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
5165
5166 #define atomic_add_return(i, v) atomic_add_ret(i, v)
5167+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5168+{
5169+ return atomic_add_ret_unchecked(i, v);
5170+}
5171 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
5172+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
5173+{
5174+ return atomic64_add_ret_unchecked(i, v);
5175+}
5176
5177 /*
5178 * atomic_inc_and_test - increment and test
5179@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5180 * other cases.
5181 */
5182 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5183+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5184+{
5185+ return atomic_inc_return_unchecked(v) == 0;
5186+}
5187 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
5188
5189 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
5190@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5191 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
5192
5193 #define atomic_inc(v) atomic_add(1, v)
5194+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
5195+{
5196+ atomic_add_unchecked(1, v);
5197+}
5198 #define atomic64_inc(v) atomic64_add(1, v)
5199+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
5200+{
5201+ atomic64_add_unchecked(1, v);
5202+}
5203
5204 #define atomic_dec(v) atomic_sub(1, v)
5205+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
5206+{
5207+ atomic_sub_unchecked(1, v);
5208+}
5209 #define atomic64_dec(v) atomic64_sub(1, v)
5210+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
5211+{
5212+ atomic64_sub_unchecked(1, v);
5213+}
5214
5215 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
5216 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
5217
5218 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5219+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
5220+{
5221+ return cmpxchg(&v->counter, old, new);
5222+}
5223 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
5224+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5225+{
5226+ return xchg(&v->counter, new);
5227+}
5228
5229 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5230 {
5231- int c, old;
5232+ int c, old, new;
5233 c = atomic_read(v);
5234 for (;;) {
5235- if (unlikely(c == (u)))
5236+ if (unlikely(c == u))
5237 break;
5238- old = atomic_cmpxchg((v), c, c + (a));
5239+
5240+ asm volatile("addcc %2, %0, %0\n"
5241+
5242+#ifdef CONFIG_PAX_REFCOUNT
5243+ "tvs %%icc, 6\n"
5244+#endif
5245+
5246+ : "=r" (new)
5247+ : "0" (c), "ir" (a)
5248+ : "cc");
5249+
5250+ old = atomic_cmpxchg(v, c, new);
5251 if (likely(old == c))
5252 break;
5253 c = old;
5254@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5255 #define atomic64_cmpxchg(v, o, n) \
5256 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5257 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5258+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5259+{
5260+ return xchg(&v->counter, new);
5261+}
5262
5263 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5264 {
5265- long c, old;
5266+ long c, old, new;
5267 c = atomic64_read(v);
5268 for (;;) {
5269- if (unlikely(c == (u)))
5270+ if (unlikely(c == u))
5271 break;
5272- old = atomic64_cmpxchg((v), c, c + (a));
5273+
5274+ asm volatile("addcc %2, %0, %0\n"
5275+
5276+#ifdef CONFIG_PAX_REFCOUNT
5277+ "tvs %%xcc, 6\n"
5278+#endif
5279+
5280+ : "=r" (new)
5281+ : "0" (c), "ir" (a)
5282+ : "cc");
5283+
5284+ old = atomic64_cmpxchg(v, c, new);
5285 if (likely(old == c))
5286 break;
5287 c = old;
5288 }
5289- return c != (u);
5290+ return c != u;
5291 }
5292
5293 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5294diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
5295index 69358b5..9d0d492 100644
5296--- a/arch/sparc/include/asm/cache.h
5297+++ b/arch/sparc/include/asm/cache.h
5298@@ -7,10 +7,12 @@
5299 #ifndef _SPARC_CACHE_H
5300 #define _SPARC_CACHE_H
5301
5302+#include <linux/const.h>
5303+
5304 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
5305
5306 #define L1_CACHE_SHIFT 5
5307-#define L1_CACHE_BYTES 32
5308+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5309
5310 #ifdef CONFIG_SPARC32
5311 #define SMP_CACHE_BYTES_SHIFT 5
5312diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5313index 4269ca6..e3da77f 100644
5314--- a/arch/sparc/include/asm/elf_32.h
5315+++ b/arch/sparc/include/asm/elf_32.h
5316@@ -114,6 +114,13 @@ typedef struct {
5317
5318 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5319
5320+#ifdef CONFIG_PAX_ASLR
5321+#define PAX_ELF_ET_DYN_BASE 0x10000UL
5322+
5323+#define PAX_DELTA_MMAP_LEN 16
5324+#define PAX_DELTA_STACK_LEN 16
5325+#endif
5326+
5327 /* This yields a mask that user programs can use to figure out what
5328 instruction set this cpu supports. This can NOT be done in userspace
5329 on Sparc. */
5330diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5331index 7df8b7f..4946269 100644
5332--- a/arch/sparc/include/asm/elf_64.h
5333+++ b/arch/sparc/include/asm/elf_64.h
5334@@ -180,6 +180,13 @@ typedef struct {
5335 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5336 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5337
5338+#ifdef CONFIG_PAX_ASLR
5339+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5340+
5341+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5342+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
5343+#endif
5344+
5345 extern unsigned long sparc64_elf_hwcap;
5346 #define ELF_HWCAP sparc64_elf_hwcap
5347
5348diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
5349index ca2b344..c6084f89 100644
5350--- a/arch/sparc/include/asm/pgalloc_32.h
5351+++ b/arch/sparc/include/asm/pgalloc_32.h
5352@@ -37,6 +37,7 @@ BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
5353 BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
5354 #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
5355 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
5356+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
5357
5358 BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
5359 #define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
5360diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
5361index 40b2d7a..22a665b 100644
5362--- a/arch/sparc/include/asm/pgalloc_64.h
5363+++ b/arch/sparc/include/asm/pgalloc_64.h
5364@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
5365 }
5366
5367 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
5368+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
5369
5370 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5371 {
5372diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
5373index 3d71018..48a11c5 100644
5374--- a/arch/sparc/include/asm/pgtable_32.h
5375+++ b/arch/sparc/include/asm/pgtable_32.h
5376@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
5377 BTFIXUPDEF_INT(page_none)
5378 BTFIXUPDEF_INT(page_copy)
5379 BTFIXUPDEF_INT(page_readonly)
5380+
5381+#ifdef CONFIG_PAX_PAGEEXEC
5382+BTFIXUPDEF_INT(page_shared_noexec)
5383+BTFIXUPDEF_INT(page_copy_noexec)
5384+BTFIXUPDEF_INT(page_readonly_noexec)
5385+#endif
5386+
5387 BTFIXUPDEF_INT(page_kernel)
5388
5389 #define PMD_SHIFT SUN4C_PMD_SHIFT
5390@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
5391 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
5392 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
5393
5394+#ifdef CONFIG_PAX_PAGEEXEC
5395+extern pgprot_t PAGE_SHARED_NOEXEC;
5396+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
5397+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
5398+#else
5399+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5400+# define PAGE_COPY_NOEXEC PAGE_COPY
5401+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5402+#endif
5403+
5404 extern unsigned long page_kernel;
5405
5406 #ifdef MODULE
5407diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5408index f6ae2b2..b03ffc7 100644
5409--- a/arch/sparc/include/asm/pgtsrmmu.h
5410+++ b/arch/sparc/include/asm/pgtsrmmu.h
5411@@ -115,6 +115,13 @@
5412 SRMMU_EXEC | SRMMU_REF)
5413 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5414 SRMMU_EXEC | SRMMU_REF)
5415+
5416+#ifdef CONFIG_PAX_PAGEEXEC
5417+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5418+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5419+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5420+#endif
5421+
5422 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5423 SRMMU_DIRTY | SRMMU_REF)
5424
5425diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5426index 9689176..63c18ea 100644
5427--- a/arch/sparc/include/asm/spinlock_64.h
5428+++ b/arch/sparc/include/asm/spinlock_64.h
5429@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
5430
5431 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5432
5433-static void inline arch_read_lock(arch_rwlock_t *lock)
5434+static inline void arch_read_lock(arch_rwlock_t *lock)
5435 {
5436 unsigned long tmp1, tmp2;
5437
5438 __asm__ __volatile__ (
5439 "1: ldsw [%2], %0\n"
5440 " brlz,pn %0, 2f\n"
5441-"4: add %0, 1, %1\n"
5442+"4: addcc %0, 1, %1\n"
5443+
5444+#ifdef CONFIG_PAX_REFCOUNT
5445+" tvs %%icc, 6\n"
5446+#endif
5447+
5448 " cas [%2], %0, %1\n"
5449 " cmp %0, %1\n"
5450 " bne,pn %%icc, 1b\n"
5451@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
5452 " .previous"
5453 : "=&r" (tmp1), "=&r" (tmp2)
5454 : "r" (lock)
5455- : "memory");
5456+ : "memory", "cc");
5457 }
5458
5459-static int inline arch_read_trylock(arch_rwlock_t *lock)
5460+static inline int arch_read_trylock(arch_rwlock_t *lock)
5461 {
5462 int tmp1, tmp2;
5463
5464@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5465 "1: ldsw [%2], %0\n"
5466 " brlz,a,pn %0, 2f\n"
5467 " mov 0, %0\n"
5468-" add %0, 1, %1\n"
5469+" addcc %0, 1, %1\n"
5470+
5471+#ifdef CONFIG_PAX_REFCOUNT
5472+" tvs %%icc, 6\n"
5473+#endif
5474+
5475 " cas [%2], %0, %1\n"
5476 " cmp %0, %1\n"
5477 " bne,pn %%icc, 1b\n"
5478@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5479 return tmp1;
5480 }
5481
5482-static void inline arch_read_unlock(arch_rwlock_t *lock)
5483+static inline void arch_read_unlock(arch_rwlock_t *lock)
5484 {
5485 unsigned long tmp1, tmp2;
5486
5487 __asm__ __volatile__(
5488 "1: lduw [%2], %0\n"
5489-" sub %0, 1, %1\n"
5490+" subcc %0, 1, %1\n"
5491+
5492+#ifdef CONFIG_PAX_REFCOUNT
5493+" tvs %%icc, 6\n"
5494+#endif
5495+
5496 " cas [%2], %0, %1\n"
5497 " cmp %0, %1\n"
5498 " bne,pn %%xcc, 1b\n"
5499@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
5500 : "memory");
5501 }
5502
5503-static void inline arch_write_lock(arch_rwlock_t *lock)
5504+static inline void arch_write_lock(arch_rwlock_t *lock)
5505 {
5506 unsigned long mask, tmp1, tmp2;
5507
5508@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
5509 : "memory");
5510 }
5511
5512-static void inline arch_write_unlock(arch_rwlock_t *lock)
5513+static inline void arch_write_unlock(arch_rwlock_t *lock)
5514 {
5515 __asm__ __volatile__(
5516 " stw %%g0, [%0]"
5517@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
5518 : "memory");
5519 }
5520
5521-static int inline arch_write_trylock(arch_rwlock_t *lock)
5522+static inline int arch_write_trylock(arch_rwlock_t *lock)
5523 {
5524 unsigned long mask, tmp1, tmp2, result;
5525
5526diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5527index c2a1080..21ed218 100644
5528--- a/arch/sparc/include/asm/thread_info_32.h
5529+++ b/arch/sparc/include/asm/thread_info_32.h
5530@@ -50,6 +50,8 @@ struct thread_info {
5531 unsigned long w_saved;
5532
5533 struct restart_block restart_block;
5534+
5535+ unsigned long lowest_stack;
5536 };
5537
5538 /*
5539diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5540index 01d057f..13a7d2f 100644
5541--- a/arch/sparc/include/asm/thread_info_64.h
5542+++ b/arch/sparc/include/asm/thread_info_64.h
5543@@ -63,6 +63,8 @@ struct thread_info {
5544 struct pt_regs *kern_una_regs;
5545 unsigned int kern_una_insn;
5546
5547+ unsigned long lowest_stack;
5548+
5549 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5550 };
5551
5552@@ -214,10 +216,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
5553 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
5554 /* flag bit 6 is available */
5555 #define TIF_32BIT 7 /* 32-bit binary */
5556-/* flag bit 8 is available */
5557+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
5558 #define TIF_SECCOMP 9 /* secure computing */
5559 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
5560 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
5561+
5562 /* NOTE: Thread flags >= 12 should be ones we have no interest
5563 * in using in assembly, else we can't use the mask as
5564 * an immediate value in instructions such as andcc.
5565@@ -236,12 +239,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
5566 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
5567 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
5568 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
5569+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5570
5571 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
5572 _TIF_DO_NOTIFY_RESUME_MASK | \
5573 _TIF_NEED_RESCHED)
5574 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
5575
5576+#define _TIF_WORK_SYSCALL \
5577+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
5578+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
5579+
5580+
5581 /*
5582 * Thread-synchronous status.
5583 *
5584diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5585index e88fbe5..96b0ce5 100644
5586--- a/arch/sparc/include/asm/uaccess.h
5587+++ b/arch/sparc/include/asm/uaccess.h
5588@@ -1,5 +1,13 @@
5589 #ifndef ___ASM_SPARC_UACCESS_H
5590 #define ___ASM_SPARC_UACCESS_H
5591+
5592+#ifdef __KERNEL__
5593+#ifndef __ASSEMBLY__
5594+#include <linux/types.h>
5595+extern void check_object_size(const void *ptr, unsigned long n, bool to);
5596+#endif
5597+#endif
5598+
5599 #if defined(__sparc__) && defined(__arch64__)
5600 #include <asm/uaccess_64.h>
5601 #else
5602diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5603index 8303ac4..07f333d 100644
5604--- a/arch/sparc/include/asm/uaccess_32.h
5605+++ b/arch/sparc/include/asm/uaccess_32.h
5606@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5607
5608 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5609 {
5610- if (n && __access_ok((unsigned long) to, n))
5611+ if ((long)n < 0)
5612+ return n;
5613+
5614+ if (n && __access_ok((unsigned long) to, n)) {
5615+ if (!__builtin_constant_p(n))
5616+ check_object_size(from, n, true);
5617 return __copy_user(to, (__force void __user *) from, n);
5618- else
5619+ } else
5620 return n;
5621 }
5622
5623 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5624 {
5625+ if ((long)n < 0)
5626+ return n;
5627+
5628+ if (!__builtin_constant_p(n))
5629+ check_object_size(from, n, true);
5630+
5631 return __copy_user(to, (__force void __user *) from, n);
5632 }
5633
5634 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5635 {
5636- if (n && __access_ok((unsigned long) from, n))
5637+ if ((long)n < 0)
5638+ return n;
5639+
5640+ if (n && __access_ok((unsigned long) from, n)) {
5641+ if (!__builtin_constant_p(n))
5642+ check_object_size(to, n, false);
5643 return __copy_user((__force void __user *) to, from, n);
5644- else
5645+ } else
5646 return n;
5647 }
5648
5649 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5650 {
5651+ if ((long)n < 0)
5652+ return n;
5653+
5654 return __copy_user((__force void __user *) to, from, n);
5655 }
5656
5657diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5658index a1091afb..380228e 100644
5659--- a/arch/sparc/include/asm/uaccess_64.h
5660+++ b/arch/sparc/include/asm/uaccess_64.h
5661@@ -10,6 +10,7 @@
5662 #include <linux/compiler.h>
5663 #include <linux/string.h>
5664 #include <linux/thread_info.h>
5665+#include <linux/kernel.h>
5666 #include <asm/asi.h>
5667 #include <asm/spitfire.h>
5668 #include <asm-generic/uaccess-unaligned.h>
5669@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5670 static inline unsigned long __must_check
5671 copy_from_user(void *to, const void __user *from, unsigned long size)
5672 {
5673- unsigned long ret = ___copy_from_user(to, from, size);
5674+ unsigned long ret;
5675
5676+ if ((long)size < 0 || size > INT_MAX)
5677+ return size;
5678+
5679+ if (!__builtin_constant_p(size))
5680+ check_object_size(to, size, false);
5681+
5682+ ret = ___copy_from_user(to, from, size);
5683 if (unlikely(ret))
5684 ret = copy_from_user_fixup(to, from, size);
5685
5686@@ -229,8 +237,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5687 static inline unsigned long __must_check
5688 copy_to_user(void __user *to, const void *from, unsigned long size)
5689 {
5690- unsigned long ret = ___copy_to_user(to, from, size);
5691+ unsigned long ret;
5692
5693+ if ((long)size < 0 || size > INT_MAX)
5694+ return size;
5695+
5696+ if (!__builtin_constant_p(size))
5697+ check_object_size(from, size, true);
5698+
5699+ ret = ___copy_to_user(to, from, size);
5700 if (unlikely(ret))
5701 ret = copy_to_user_fixup(to, from, size);
5702 return ret;
5703diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5704index cb85458..e063f17 100644
5705--- a/arch/sparc/kernel/Makefile
5706+++ b/arch/sparc/kernel/Makefile
5707@@ -3,7 +3,7 @@
5708 #
5709
5710 asflags-y := -ansi
5711-ccflags-y := -Werror
5712+#ccflags-y := -Werror
5713
5714 extra-y := head_$(BITS).o
5715 extra-y += init_task.o
5716diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5717index efa0754..74b03fe 100644
5718--- a/arch/sparc/kernel/process_32.c
5719+++ b/arch/sparc/kernel/process_32.c
5720@@ -200,7 +200,7 @@ void __show_backtrace(unsigned long fp)
5721 rw->ins[4], rw->ins[5],
5722 rw->ins[6],
5723 rw->ins[7]);
5724- printk("%pS\n", (void *) rw->ins[7]);
5725+ printk("%pA\n", (void *) rw->ins[7]);
5726 rw = (struct reg_window32 *) rw->ins[6];
5727 }
5728 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5729@@ -267,14 +267,14 @@ void show_regs(struct pt_regs *r)
5730
5731 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5732 r->psr, r->pc, r->npc, r->y, print_tainted());
5733- printk("PC: <%pS>\n", (void *) r->pc);
5734+ printk("PC: <%pA>\n", (void *) r->pc);
5735 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5736 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5737 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5738 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5739 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5740 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5741- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5742+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5743
5744 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5745 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5746@@ -309,7 +309,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5747 rw = (struct reg_window32 *) fp;
5748 pc = rw->ins[7];
5749 printk("[%08lx : ", pc);
5750- printk("%pS ] ", (void *) pc);
5751+ printk("%pA ] ", (void *) pc);
5752 fp = rw->ins[6];
5753 } while (++count < 16);
5754 printk("\n");
5755diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5756index aff0c72..9067b39 100644
5757--- a/arch/sparc/kernel/process_64.c
5758+++ b/arch/sparc/kernel/process_64.c
5759@@ -179,14 +179,14 @@ static void show_regwindow(struct pt_regs *regs)
5760 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5761 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5762 if (regs->tstate & TSTATE_PRIV)
5763- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5764+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5765 }
5766
5767 void show_regs(struct pt_regs *regs)
5768 {
5769 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5770 regs->tpc, regs->tnpc, regs->y, print_tainted());
5771- printk("TPC: <%pS>\n", (void *) regs->tpc);
5772+ printk("TPC: <%pA>\n", (void *) regs->tpc);
5773 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5774 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5775 regs->u_regs[3]);
5776@@ -199,7 +199,7 @@ void show_regs(struct pt_regs *regs)
5777 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5778 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5779 regs->u_regs[15]);
5780- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5781+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5782 show_regwindow(regs);
5783 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5784 }
5785@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5786 ((tp && tp->task) ? tp->task->pid : -1));
5787
5788 if (gp->tstate & TSTATE_PRIV) {
5789- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5790+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5791 (void *) gp->tpc,
5792 (void *) gp->o7,
5793 (void *) gp->i7,
5794diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
5795index 6f97c07..b1300ec 100644
5796--- a/arch/sparc/kernel/ptrace_64.c
5797+++ b/arch/sparc/kernel/ptrace_64.c
5798@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
5799 return ret;
5800 }
5801
5802+#ifdef CONFIG_GRKERNSEC_SETXID
5803+extern void gr_delayed_cred_worker(void);
5804+#endif
5805+
5806 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5807 {
5808 int ret = 0;
5809@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5810 /* do the secure computing check first */
5811 secure_computing(regs->u_regs[UREG_G1]);
5812
5813+#ifdef CONFIG_GRKERNSEC_SETXID
5814+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5815+ gr_delayed_cred_worker();
5816+#endif
5817+
5818 if (test_thread_flag(TIF_SYSCALL_TRACE))
5819 ret = tracehook_report_syscall_entry(regs);
5820
5821@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5822
5823 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
5824 {
5825+#ifdef CONFIG_GRKERNSEC_SETXID
5826+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5827+ gr_delayed_cred_worker();
5828+#endif
5829+
5830 audit_syscall_exit(regs);
5831
5832 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
5833diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5834index 42b282f..28ce9f2 100644
5835--- a/arch/sparc/kernel/sys_sparc_32.c
5836+++ b/arch/sparc/kernel/sys_sparc_32.c
5837@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5838 if (ARCH_SUN4C && len > 0x20000000)
5839 return -ENOMEM;
5840 if (!addr)
5841- addr = TASK_UNMAPPED_BASE;
5842+ addr = current->mm->mmap_base;
5843
5844 if (flags & MAP_SHARED)
5845 addr = COLOUR_ALIGN(addr);
5846@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5847 }
5848 if (TASK_SIZE - PAGE_SIZE - len < addr)
5849 return -ENOMEM;
5850- if (!vmm || addr + len <= vmm->vm_start)
5851+ if (check_heap_stack_gap(vmm, addr, len))
5852 return addr;
5853 addr = vmm->vm_end;
5854 if (flags & MAP_SHARED)
5855diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5856index 3ee51f1..2ba4913 100644
5857--- a/arch/sparc/kernel/sys_sparc_64.c
5858+++ b/arch/sparc/kernel/sys_sparc_64.c
5859@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5860 /* We do not accept a shared mapping if it would violate
5861 * cache aliasing constraints.
5862 */
5863- if ((flags & MAP_SHARED) &&
5864+ if ((filp || (flags & MAP_SHARED)) &&
5865 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5866 return -EINVAL;
5867 return addr;
5868@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5869 if (filp || (flags & MAP_SHARED))
5870 do_color_align = 1;
5871
5872+#ifdef CONFIG_PAX_RANDMMAP
5873+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5874+#endif
5875+
5876 if (addr) {
5877 if (do_color_align)
5878 addr = COLOUR_ALIGN(addr, pgoff);
5879@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5880 addr = PAGE_ALIGN(addr);
5881
5882 vma = find_vma(mm, addr);
5883- if (task_size - len >= addr &&
5884- (!vma || addr + len <= vma->vm_start))
5885+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5886 return addr;
5887 }
5888
5889 if (len > mm->cached_hole_size) {
5890- start_addr = addr = mm->free_area_cache;
5891+ start_addr = addr = mm->free_area_cache;
5892 } else {
5893- start_addr = addr = TASK_UNMAPPED_BASE;
5894+ start_addr = addr = mm->mmap_base;
5895 mm->cached_hole_size = 0;
5896 }
5897
5898@@ -174,14 +177,14 @@ full_search:
5899 vma = find_vma(mm, VA_EXCLUDE_END);
5900 }
5901 if (unlikely(task_size < addr)) {
5902- if (start_addr != TASK_UNMAPPED_BASE) {
5903- start_addr = addr = TASK_UNMAPPED_BASE;
5904+ if (start_addr != mm->mmap_base) {
5905+ start_addr = addr = mm->mmap_base;
5906 mm->cached_hole_size = 0;
5907 goto full_search;
5908 }
5909 return -ENOMEM;
5910 }
5911- if (likely(!vma || addr + len <= vma->vm_start)) {
5912+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5913 /*
5914 * Remember the place where we stopped the search:
5915 */
5916@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5917 /* We do not accept a shared mapping if it would violate
5918 * cache aliasing constraints.
5919 */
5920- if ((flags & MAP_SHARED) &&
5921+ if ((filp || (flags & MAP_SHARED)) &&
5922 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5923 return -EINVAL;
5924 return addr;
5925@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5926 addr = PAGE_ALIGN(addr);
5927
5928 vma = find_vma(mm, addr);
5929- if (task_size - len >= addr &&
5930- (!vma || addr + len <= vma->vm_start))
5931+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5932 return addr;
5933 }
5934
5935@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5936 /* make sure it can fit in the remaining address space */
5937 if (likely(addr > len)) {
5938 vma = find_vma(mm, addr-len);
5939- if (!vma || addr <= vma->vm_start) {
5940+ if (check_heap_stack_gap(vma, addr - len, len)) {
5941 /* remember the address as a hint for next time */
5942 return (mm->free_area_cache = addr-len);
5943 }
5944@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5945 if (unlikely(mm->mmap_base < len))
5946 goto bottomup;
5947
5948- addr = mm->mmap_base-len;
5949- if (do_color_align)
5950- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5951+ addr = mm->mmap_base - len;
5952
5953 do {
5954+ if (do_color_align)
5955+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5956 /*
5957 * Lookup failure means no vma is above this address,
5958 * else if new region fits below vma->vm_start,
5959 * return with success:
5960 */
5961 vma = find_vma(mm, addr);
5962- if (likely(!vma || addr+len <= vma->vm_start)) {
5963+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5964 /* remember the address as a hint for next time */
5965 return (mm->free_area_cache = addr);
5966 }
5967@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5968 mm->cached_hole_size = vma->vm_start - addr;
5969
5970 /* try just below the current vma->vm_start */
5971- addr = vma->vm_start-len;
5972- if (do_color_align)
5973- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5974- } while (likely(len < vma->vm_start));
5975+ addr = skip_heap_stack_gap(vma, len);
5976+ } while (!IS_ERR_VALUE(addr));
5977
5978 bottomup:
5979 /*
5980@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5981 gap == RLIM_INFINITY ||
5982 sysctl_legacy_va_layout) {
5983 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5984+
5985+#ifdef CONFIG_PAX_RANDMMAP
5986+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5987+ mm->mmap_base += mm->delta_mmap;
5988+#endif
5989+
5990 mm->get_unmapped_area = arch_get_unmapped_area;
5991 mm->unmap_area = arch_unmap_area;
5992 } else {
5993@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5994 gap = (task_size / 6 * 5);
5995
5996 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5997+
5998+#ifdef CONFIG_PAX_RANDMMAP
5999+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6000+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6001+#endif
6002+
6003 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6004 mm->unmap_area = arch_unmap_area_topdown;
6005 }
6006diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
6007index 1d7e274..b39c527 100644
6008--- a/arch/sparc/kernel/syscalls.S
6009+++ b/arch/sparc/kernel/syscalls.S
6010@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
6011 #endif
6012 .align 32
6013 1: ldx [%g6 + TI_FLAGS], %l5
6014- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6015+ andcc %l5, _TIF_WORK_SYSCALL, %g0
6016 be,pt %icc, rtrap
6017 nop
6018 call syscall_trace_leave
6019@@ -179,7 +179,7 @@ linux_sparc_syscall32:
6020
6021 srl %i5, 0, %o5 ! IEU1
6022 srl %i2, 0, %o2 ! IEU0 Group
6023- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6024+ andcc %l0, _TIF_WORK_SYSCALL, %g0
6025 bne,pn %icc, linux_syscall_trace32 ! CTI
6026 mov %i0, %l5 ! IEU1
6027 call %l7 ! CTI Group brk forced
6028@@ -202,7 +202,7 @@ linux_sparc_syscall:
6029
6030 mov %i3, %o3 ! IEU1
6031 mov %i4, %o4 ! IEU0 Group
6032- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6033+ andcc %l0, _TIF_WORK_SYSCALL, %g0
6034 bne,pn %icc, linux_syscall_trace ! CTI Group
6035 mov %i0, %l5 ! IEU0
6036 2: call %l7 ! CTI Group brk forced
6037@@ -226,7 +226,7 @@ ret_sys_call:
6038
6039 cmp %o0, -ERESTART_RESTARTBLOCK
6040 bgeu,pn %xcc, 1f
6041- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6042+ andcc %l0, _TIF_WORK_SYSCALL, %l6
6043 80:
6044 /* System call success, clear Carry condition code. */
6045 andn %g3, %g2, %g3
6046@@ -241,7 +241,7 @@ ret_sys_call:
6047 /* System call failure, set Carry condition code.
6048 * Also, get abs(errno) to return to the process.
6049 */
6050- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6051+ andcc %l0, _TIF_WORK_SYSCALL, %l6
6052 sub %g0, %o0, %o0
6053 or %g3, %g2, %g3
6054 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
6055diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
6056index d2de213..6b22bc3 100644
6057--- a/arch/sparc/kernel/traps_32.c
6058+++ b/arch/sparc/kernel/traps_32.c
6059@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
6060 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
6061 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
6062
6063+extern void gr_handle_kernel_exploit(void);
6064+
6065 void die_if_kernel(char *str, struct pt_regs *regs)
6066 {
6067 static int die_counter;
6068@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6069 count++ < 30 &&
6070 (((unsigned long) rw) >= PAGE_OFFSET) &&
6071 !(((unsigned long) rw) & 0x7)) {
6072- printk("Caller[%08lx]: %pS\n", rw->ins[7],
6073+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
6074 (void *) rw->ins[7]);
6075 rw = (struct reg_window32 *)rw->ins[6];
6076 }
6077 }
6078 printk("Instruction DUMP:");
6079 instruction_dump ((unsigned long *) regs->pc);
6080- if(regs->psr & PSR_PS)
6081+ if(regs->psr & PSR_PS) {
6082+ gr_handle_kernel_exploit();
6083 do_exit(SIGKILL);
6084+ }
6085 do_exit(SIGSEGV);
6086 }
6087
6088diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
6089index c72fdf5..743a344 100644
6090--- a/arch/sparc/kernel/traps_64.c
6091+++ b/arch/sparc/kernel/traps_64.c
6092@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
6093 i + 1,
6094 p->trapstack[i].tstate, p->trapstack[i].tpc,
6095 p->trapstack[i].tnpc, p->trapstack[i].tt);
6096- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
6097+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
6098 }
6099 }
6100
6101@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
6102
6103 lvl -= 0x100;
6104 if (regs->tstate & TSTATE_PRIV) {
6105+
6106+#ifdef CONFIG_PAX_REFCOUNT
6107+ if (lvl == 6)
6108+ pax_report_refcount_overflow(regs);
6109+#endif
6110+
6111 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
6112 die_if_kernel(buffer, regs);
6113 }
6114@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
6115 void bad_trap_tl1(struct pt_regs *regs, long lvl)
6116 {
6117 char buffer[32];
6118-
6119+
6120 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
6121 0, lvl, SIGTRAP) == NOTIFY_STOP)
6122 return;
6123
6124+#ifdef CONFIG_PAX_REFCOUNT
6125+ if (lvl == 6)
6126+ pax_report_refcount_overflow(regs);
6127+#endif
6128+
6129 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6130
6131 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
6132@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
6133 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
6134 printk("%s" "ERROR(%d): ",
6135 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
6136- printk("TPC<%pS>\n", (void *) regs->tpc);
6137+ printk("TPC<%pA>\n", (void *) regs->tpc);
6138 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
6139 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
6140 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
6141@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6142 smp_processor_id(),
6143 (type & 0x1) ? 'I' : 'D',
6144 regs->tpc);
6145- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
6146+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
6147 panic("Irrecoverable Cheetah+ parity error.");
6148 }
6149
6150@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6151 smp_processor_id(),
6152 (type & 0x1) ? 'I' : 'D',
6153 regs->tpc);
6154- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
6155+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
6156 }
6157
6158 struct sun4v_error_entry {
6159@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
6160
6161 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
6162 regs->tpc, tl);
6163- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6164+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
6165 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6166- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
6167+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
6168 (void *) regs->u_regs[UREG_I7]);
6169 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
6170 "pte[%lx] error[%lx]\n",
6171@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
6172
6173 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
6174 regs->tpc, tl);
6175- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6176+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
6177 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6178- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
6179+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
6180 (void *) regs->u_regs[UREG_I7]);
6181 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
6182 "pte[%lx] error[%lx]\n",
6183@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6184 fp = (unsigned long)sf->fp + STACK_BIAS;
6185 }
6186
6187- printk(" [%016lx] %pS\n", pc, (void *) pc);
6188+ printk(" [%016lx] %pA\n", pc, (void *) pc);
6189 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6190 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
6191 int index = tsk->curr_ret_stack;
6192 if (tsk->ret_stack && index >= graph) {
6193 pc = tsk->ret_stack[index - graph].ret;
6194- printk(" [%016lx] %pS\n", pc, (void *) pc);
6195+ printk(" [%016lx] %pA\n", pc, (void *) pc);
6196 graph++;
6197 }
6198 }
6199@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
6200 return (struct reg_window *) (fp + STACK_BIAS);
6201 }
6202
6203+extern void gr_handle_kernel_exploit(void);
6204+
6205 void die_if_kernel(char *str, struct pt_regs *regs)
6206 {
6207 static int die_counter;
6208@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6209 while (rw &&
6210 count++ < 30 &&
6211 kstack_valid(tp, (unsigned long) rw)) {
6212- printk("Caller[%016lx]: %pS\n", rw->ins[7],
6213+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
6214 (void *) rw->ins[7]);
6215
6216 rw = kernel_stack_up(rw);
6217@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6218 }
6219 user_instruction_dump ((unsigned int __user *) regs->tpc);
6220 }
6221- if (regs->tstate & TSTATE_PRIV)
6222+ if (regs->tstate & TSTATE_PRIV) {
6223+ gr_handle_kernel_exploit();
6224 do_exit(SIGKILL);
6225+ }
6226 do_exit(SIGSEGV);
6227 }
6228 EXPORT_SYMBOL(die_if_kernel);
6229diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
6230index dae85bc..af1e19d 100644
6231--- a/arch/sparc/kernel/unaligned_64.c
6232+++ b/arch/sparc/kernel/unaligned_64.c
6233@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
6234 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
6235
6236 if (__ratelimit(&ratelimit)) {
6237- printk("Kernel unaligned access at TPC[%lx] %pS\n",
6238+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
6239 regs->tpc, (void *) regs->tpc);
6240 }
6241 }
6242diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6243index a3fc437..fea9957 100644
6244--- a/arch/sparc/lib/Makefile
6245+++ b/arch/sparc/lib/Makefile
6246@@ -2,7 +2,7 @@
6247 #
6248
6249 asflags-y := -ansi -DST_DIV0=0x02
6250-ccflags-y := -Werror
6251+#ccflags-y := -Werror
6252
6253 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
6254 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6255diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6256index 59186e0..f747d7a 100644
6257--- a/arch/sparc/lib/atomic_64.S
6258+++ b/arch/sparc/lib/atomic_64.S
6259@@ -18,7 +18,12 @@
6260 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6261 BACKOFF_SETUP(%o2)
6262 1: lduw [%o1], %g1
6263- add %g1, %o0, %g7
6264+ addcc %g1, %o0, %g7
6265+
6266+#ifdef CONFIG_PAX_REFCOUNT
6267+ tvs %icc, 6
6268+#endif
6269+
6270 cas [%o1], %g1, %g7
6271 cmp %g1, %g7
6272 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6273@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6274 2: BACKOFF_SPIN(%o2, %o3, 1b)
6275 .size atomic_add, .-atomic_add
6276
6277+ .globl atomic_add_unchecked
6278+ .type atomic_add_unchecked,#function
6279+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6280+ BACKOFF_SETUP(%o2)
6281+1: lduw [%o1], %g1
6282+ add %g1, %o0, %g7
6283+ cas [%o1], %g1, %g7
6284+ cmp %g1, %g7
6285+ bne,pn %icc, 2f
6286+ nop
6287+ retl
6288+ nop
6289+2: BACKOFF_SPIN(%o2, %o3, 1b)
6290+ .size atomic_add_unchecked, .-atomic_add_unchecked
6291+
6292 .globl atomic_sub
6293 .type atomic_sub,#function
6294 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6295 BACKOFF_SETUP(%o2)
6296 1: lduw [%o1], %g1
6297- sub %g1, %o0, %g7
6298+ subcc %g1, %o0, %g7
6299+
6300+#ifdef CONFIG_PAX_REFCOUNT
6301+ tvs %icc, 6
6302+#endif
6303+
6304 cas [%o1], %g1, %g7
6305 cmp %g1, %g7
6306 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6307@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6308 2: BACKOFF_SPIN(%o2, %o3, 1b)
6309 .size atomic_sub, .-atomic_sub
6310
6311+ .globl atomic_sub_unchecked
6312+ .type atomic_sub_unchecked,#function
6313+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6314+ BACKOFF_SETUP(%o2)
6315+1: lduw [%o1], %g1
6316+ sub %g1, %o0, %g7
6317+ cas [%o1], %g1, %g7
6318+ cmp %g1, %g7
6319+ bne,pn %icc, 2f
6320+ nop
6321+ retl
6322+ nop
6323+2: BACKOFF_SPIN(%o2, %o3, 1b)
6324+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
6325+
6326 .globl atomic_add_ret
6327 .type atomic_add_ret,#function
6328 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6329 BACKOFF_SETUP(%o2)
6330 1: lduw [%o1], %g1
6331- add %g1, %o0, %g7
6332+ addcc %g1, %o0, %g7
6333+
6334+#ifdef CONFIG_PAX_REFCOUNT
6335+ tvs %icc, 6
6336+#endif
6337+
6338 cas [%o1], %g1, %g7
6339 cmp %g1, %g7
6340 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6341@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6342 2: BACKOFF_SPIN(%o2, %o3, 1b)
6343 .size atomic_add_ret, .-atomic_add_ret
6344
6345+ .globl atomic_add_ret_unchecked
6346+ .type atomic_add_ret_unchecked,#function
6347+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6348+ BACKOFF_SETUP(%o2)
6349+1: lduw [%o1], %g1
6350+ addcc %g1, %o0, %g7
6351+ cas [%o1], %g1, %g7
6352+ cmp %g1, %g7
6353+ bne,pn %icc, 2f
6354+ add %g7, %o0, %g7
6355+ sra %g7, 0, %o0
6356+ retl
6357+ nop
6358+2: BACKOFF_SPIN(%o2, %o3, 1b)
6359+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
6360+
6361 .globl atomic_sub_ret
6362 .type atomic_sub_ret,#function
6363 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6364 BACKOFF_SETUP(%o2)
6365 1: lduw [%o1], %g1
6366- sub %g1, %o0, %g7
6367+ subcc %g1, %o0, %g7
6368+
6369+#ifdef CONFIG_PAX_REFCOUNT
6370+ tvs %icc, 6
6371+#endif
6372+
6373 cas [%o1], %g1, %g7
6374 cmp %g1, %g7
6375 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6376@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6377 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6378 BACKOFF_SETUP(%o2)
6379 1: ldx [%o1], %g1
6380- add %g1, %o0, %g7
6381+ addcc %g1, %o0, %g7
6382+
6383+#ifdef CONFIG_PAX_REFCOUNT
6384+ tvs %xcc, 6
6385+#endif
6386+
6387 casx [%o1], %g1, %g7
6388 cmp %g1, %g7
6389 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6390@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6391 2: BACKOFF_SPIN(%o2, %o3, 1b)
6392 .size atomic64_add, .-atomic64_add
6393
6394+ .globl atomic64_add_unchecked
6395+ .type atomic64_add_unchecked,#function
6396+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6397+ BACKOFF_SETUP(%o2)
6398+1: ldx [%o1], %g1
6399+ addcc %g1, %o0, %g7
6400+ casx [%o1], %g1, %g7
6401+ cmp %g1, %g7
6402+ bne,pn %xcc, 2f
6403+ nop
6404+ retl
6405+ nop
6406+2: BACKOFF_SPIN(%o2, %o3, 1b)
6407+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
6408+
6409 .globl atomic64_sub
6410 .type atomic64_sub,#function
6411 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6412 BACKOFF_SETUP(%o2)
6413 1: ldx [%o1], %g1
6414- sub %g1, %o0, %g7
6415+ subcc %g1, %o0, %g7
6416+
6417+#ifdef CONFIG_PAX_REFCOUNT
6418+ tvs %xcc, 6
6419+#endif
6420+
6421 casx [%o1], %g1, %g7
6422 cmp %g1, %g7
6423 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6424@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6425 2: BACKOFF_SPIN(%o2, %o3, 1b)
6426 .size atomic64_sub, .-atomic64_sub
6427
6428+ .globl atomic64_sub_unchecked
6429+ .type atomic64_sub_unchecked,#function
6430+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6431+ BACKOFF_SETUP(%o2)
6432+1: ldx [%o1], %g1
6433+ subcc %g1, %o0, %g7
6434+ casx [%o1], %g1, %g7
6435+ cmp %g1, %g7
6436+ bne,pn %xcc, 2f
6437+ nop
6438+ retl
6439+ nop
6440+2: BACKOFF_SPIN(%o2, %o3, 1b)
6441+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
6442+
6443 .globl atomic64_add_ret
6444 .type atomic64_add_ret,#function
6445 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6446 BACKOFF_SETUP(%o2)
6447 1: ldx [%o1], %g1
6448- add %g1, %o0, %g7
6449+ addcc %g1, %o0, %g7
6450+
6451+#ifdef CONFIG_PAX_REFCOUNT
6452+ tvs %xcc, 6
6453+#endif
6454+
6455 casx [%o1], %g1, %g7
6456 cmp %g1, %g7
6457 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6458@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6459 2: BACKOFF_SPIN(%o2, %o3, 1b)
6460 .size atomic64_add_ret, .-atomic64_add_ret
6461
6462+ .globl atomic64_add_ret_unchecked
6463+ .type atomic64_add_ret_unchecked,#function
6464+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6465+ BACKOFF_SETUP(%o2)
6466+1: ldx [%o1], %g1
6467+ addcc %g1, %o0, %g7
6468+ casx [%o1], %g1, %g7
6469+ cmp %g1, %g7
6470+ bne,pn %xcc, 2f
6471+ add %g7, %o0, %g7
6472+ mov %g7, %o0
6473+ retl
6474+ nop
6475+2: BACKOFF_SPIN(%o2, %o3, 1b)
6476+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
6477+
6478 .globl atomic64_sub_ret
6479 .type atomic64_sub_ret,#function
6480 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6481 BACKOFF_SETUP(%o2)
6482 1: ldx [%o1], %g1
6483- sub %g1, %o0, %g7
6484+ subcc %g1, %o0, %g7
6485+
6486+#ifdef CONFIG_PAX_REFCOUNT
6487+ tvs %xcc, 6
6488+#endif
6489+
6490 casx [%o1], %g1, %g7
6491 cmp %g1, %g7
6492 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6493diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
6494index f73c224..662af10 100644
6495--- a/arch/sparc/lib/ksyms.c
6496+++ b/arch/sparc/lib/ksyms.c
6497@@ -136,12 +136,18 @@ EXPORT_SYMBOL(__downgrade_write);
6498
6499 /* Atomic counter implementation. */
6500 EXPORT_SYMBOL(atomic_add);
6501+EXPORT_SYMBOL(atomic_add_unchecked);
6502 EXPORT_SYMBOL(atomic_add_ret);
6503+EXPORT_SYMBOL(atomic_add_ret_unchecked);
6504 EXPORT_SYMBOL(atomic_sub);
6505+EXPORT_SYMBOL(atomic_sub_unchecked);
6506 EXPORT_SYMBOL(atomic_sub_ret);
6507 EXPORT_SYMBOL(atomic64_add);
6508+EXPORT_SYMBOL(atomic64_add_unchecked);
6509 EXPORT_SYMBOL(atomic64_add_ret);
6510+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
6511 EXPORT_SYMBOL(atomic64_sub);
6512+EXPORT_SYMBOL(atomic64_sub_unchecked);
6513 EXPORT_SYMBOL(atomic64_sub_ret);
6514
6515 /* Atomic bit operations. */
6516diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6517index 301421c..e2535d1 100644
6518--- a/arch/sparc/mm/Makefile
6519+++ b/arch/sparc/mm/Makefile
6520@@ -2,7 +2,7 @@
6521 #
6522
6523 asflags-y := -ansi
6524-ccflags-y := -Werror
6525+#ccflags-y := -Werror
6526
6527 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
6528 obj-y += fault_$(BITS).o
6529diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6530index df3155a..eb708b8 100644
6531--- a/arch/sparc/mm/fault_32.c
6532+++ b/arch/sparc/mm/fault_32.c
6533@@ -21,6 +21,9 @@
6534 #include <linux/perf_event.h>
6535 #include <linux/interrupt.h>
6536 #include <linux/kdebug.h>
6537+#include <linux/slab.h>
6538+#include <linux/pagemap.h>
6539+#include <linux/compiler.h>
6540
6541 #include <asm/page.h>
6542 #include <asm/pgtable.h>
6543@@ -207,6 +210,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6544 return safe_compute_effective_address(regs, insn);
6545 }
6546
6547+#ifdef CONFIG_PAX_PAGEEXEC
6548+#ifdef CONFIG_PAX_DLRESOLVE
6549+static void pax_emuplt_close(struct vm_area_struct *vma)
6550+{
6551+ vma->vm_mm->call_dl_resolve = 0UL;
6552+}
6553+
6554+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6555+{
6556+ unsigned int *kaddr;
6557+
6558+ vmf->page = alloc_page(GFP_HIGHUSER);
6559+ if (!vmf->page)
6560+ return VM_FAULT_OOM;
6561+
6562+ kaddr = kmap(vmf->page);
6563+ memset(kaddr, 0, PAGE_SIZE);
6564+ kaddr[0] = 0x9DE3BFA8U; /* save */
6565+ flush_dcache_page(vmf->page);
6566+ kunmap(vmf->page);
6567+ return VM_FAULT_MAJOR;
6568+}
6569+
6570+static const struct vm_operations_struct pax_vm_ops = {
6571+ .close = pax_emuplt_close,
6572+ .fault = pax_emuplt_fault
6573+};
6574+
6575+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6576+{
6577+ int ret;
6578+
6579+ INIT_LIST_HEAD(&vma->anon_vma_chain);
6580+ vma->vm_mm = current->mm;
6581+ vma->vm_start = addr;
6582+ vma->vm_end = addr + PAGE_SIZE;
6583+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6584+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6585+ vma->vm_ops = &pax_vm_ops;
6586+
6587+ ret = insert_vm_struct(current->mm, vma);
6588+ if (ret)
6589+ return ret;
6590+
6591+ ++current->mm->total_vm;
6592+ return 0;
6593+}
6594+#endif
6595+
6596+/*
6597+ * PaX: decide what to do with offenders (regs->pc = fault address)
6598+ *
6599+ * returns 1 when task should be killed
6600+ * 2 when patched PLT trampoline was detected
6601+ * 3 when unpatched PLT trampoline was detected
6602+ */
6603+static int pax_handle_fetch_fault(struct pt_regs *regs)
6604+{
6605+
6606+#ifdef CONFIG_PAX_EMUPLT
6607+ int err;
6608+
6609+ do { /* PaX: patched PLT emulation #1 */
6610+ unsigned int sethi1, sethi2, jmpl;
6611+
6612+ err = get_user(sethi1, (unsigned int *)regs->pc);
6613+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6614+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6615+
6616+ if (err)
6617+ break;
6618+
6619+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6620+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6621+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6622+ {
6623+ unsigned int addr;
6624+
6625+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6626+ addr = regs->u_regs[UREG_G1];
6627+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6628+ regs->pc = addr;
6629+ regs->npc = addr+4;
6630+ return 2;
6631+ }
6632+ } while (0);
6633+
6634+ { /* PaX: patched PLT emulation #2 */
6635+ unsigned int ba;
6636+
6637+ err = get_user(ba, (unsigned int *)regs->pc);
6638+
6639+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6640+ unsigned int addr;
6641+
6642+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6643+ regs->pc = addr;
6644+ regs->npc = addr+4;
6645+ return 2;
6646+ }
6647+ }
6648+
6649+ do { /* PaX: patched PLT emulation #3 */
6650+ unsigned int sethi, jmpl, nop;
6651+
6652+ err = get_user(sethi, (unsigned int *)regs->pc);
6653+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6654+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6655+
6656+ if (err)
6657+ break;
6658+
6659+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6660+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6661+ nop == 0x01000000U)
6662+ {
6663+ unsigned int addr;
6664+
6665+ addr = (sethi & 0x003FFFFFU) << 10;
6666+ regs->u_regs[UREG_G1] = addr;
6667+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6668+ regs->pc = addr;
6669+ regs->npc = addr+4;
6670+ return 2;
6671+ }
6672+ } while (0);
6673+
6674+ do { /* PaX: unpatched PLT emulation step 1 */
6675+ unsigned int sethi, ba, nop;
6676+
6677+ err = get_user(sethi, (unsigned int *)regs->pc);
6678+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
6679+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6680+
6681+ if (err)
6682+ break;
6683+
6684+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6685+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6686+ nop == 0x01000000U)
6687+ {
6688+ unsigned int addr, save, call;
6689+
6690+ if ((ba & 0xFFC00000U) == 0x30800000U)
6691+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6692+ else
6693+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6694+
6695+ err = get_user(save, (unsigned int *)addr);
6696+ err |= get_user(call, (unsigned int *)(addr+4));
6697+ err |= get_user(nop, (unsigned int *)(addr+8));
6698+ if (err)
6699+ break;
6700+
6701+#ifdef CONFIG_PAX_DLRESOLVE
6702+ if (save == 0x9DE3BFA8U &&
6703+ (call & 0xC0000000U) == 0x40000000U &&
6704+ nop == 0x01000000U)
6705+ {
6706+ struct vm_area_struct *vma;
6707+ unsigned long call_dl_resolve;
6708+
6709+ down_read(&current->mm->mmap_sem);
6710+ call_dl_resolve = current->mm->call_dl_resolve;
6711+ up_read(&current->mm->mmap_sem);
6712+ if (likely(call_dl_resolve))
6713+ goto emulate;
6714+
6715+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6716+
6717+ down_write(&current->mm->mmap_sem);
6718+ if (current->mm->call_dl_resolve) {
6719+ call_dl_resolve = current->mm->call_dl_resolve;
6720+ up_write(&current->mm->mmap_sem);
6721+ if (vma)
6722+ kmem_cache_free(vm_area_cachep, vma);
6723+ goto emulate;
6724+ }
6725+
6726+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6727+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6728+ up_write(&current->mm->mmap_sem);
6729+ if (vma)
6730+ kmem_cache_free(vm_area_cachep, vma);
6731+ return 1;
6732+ }
6733+
6734+ if (pax_insert_vma(vma, call_dl_resolve)) {
6735+ up_write(&current->mm->mmap_sem);
6736+ kmem_cache_free(vm_area_cachep, vma);
6737+ return 1;
6738+ }
6739+
6740+ current->mm->call_dl_resolve = call_dl_resolve;
6741+ up_write(&current->mm->mmap_sem);
6742+
6743+emulate:
6744+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6745+ regs->pc = call_dl_resolve;
6746+ regs->npc = addr+4;
6747+ return 3;
6748+ }
6749+#endif
6750+
6751+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6752+ if ((save & 0xFFC00000U) == 0x05000000U &&
6753+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6754+ nop == 0x01000000U)
6755+ {
6756+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6757+ regs->u_regs[UREG_G2] = addr + 4;
6758+ addr = (save & 0x003FFFFFU) << 10;
6759+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6760+ regs->pc = addr;
6761+ regs->npc = addr+4;
6762+ return 3;
6763+ }
6764+ }
6765+ } while (0);
6766+
6767+ do { /* PaX: unpatched PLT emulation step 2 */
6768+ unsigned int save, call, nop;
6769+
6770+ err = get_user(save, (unsigned int *)(regs->pc-4));
6771+ err |= get_user(call, (unsigned int *)regs->pc);
6772+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
6773+ if (err)
6774+ break;
6775+
6776+ if (save == 0x9DE3BFA8U &&
6777+ (call & 0xC0000000U) == 0x40000000U &&
6778+ nop == 0x01000000U)
6779+ {
6780+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6781+
6782+ regs->u_regs[UREG_RETPC] = regs->pc;
6783+ regs->pc = dl_resolve;
6784+ regs->npc = dl_resolve+4;
6785+ return 3;
6786+ }
6787+ } while (0);
6788+#endif
6789+
6790+ return 1;
6791+}
6792+
6793+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6794+{
6795+ unsigned long i;
6796+
6797+ printk(KERN_ERR "PAX: bytes at PC: ");
6798+ for (i = 0; i < 8; i++) {
6799+ unsigned int c;
6800+ if (get_user(c, (unsigned int *)pc+i))
6801+ printk(KERN_CONT "???????? ");
6802+ else
6803+ printk(KERN_CONT "%08x ", c);
6804+ }
6805+ printk("\n");
6806+}
6807+#endif
6808+
6809 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6810 int text_fault)
6811 {
6812@@ -282,6 +547,24 @@ good_area:
6813 if(!(vma->vm_flags & VM_WRITE))
6814 goto bad_area;
6815 } else {
6816+
6817+#ifdef CONFIG_PAX_PAGEEXEC
6818+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6819+ up_read(&mm->mmap_sem);
6820+ switch (pax_handle_fetch_fault(regs)) {
6821+
6822+#ifdef CONFIG_PAX_EMUPLT
6823+ case 2:
6824+ case 3:
6825+ return;
6826+#endif
6827+
6828+ }
6829+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6830+ do_group_exit(SIGKILL);
6831+ }
6832+#endif
6833+
6834 /* Allow reads even for write-only mappings */
6835 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6836 goto bad_area;
6837diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6838index 1fe0429..aee2e87 100644
6839--- a/arch/sparc/mm/fault_64.c
6840+++ b/arch/sparc/mm/fault_64.c
6841@@ -21,6 +21,9 @@
6842 #include <linux/kprobes.h>
6843 #include <linux/kdebug.h>
6844 #include <linux/percpu.h>
6845+#include <linux/slab.h>
6846+#include <linux/pagemap.h>
6847+#include <linux/compiler.h>
6848
6849 #include <asm/page.h>
6850 #include <asm/pgtable.h>
6851@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6852 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6853 regs->tpc);
6854 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6855- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6856+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6857 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6858 dump_stack();
6859 unhandled_fault(regs->tpc, current, regs);
6860@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
6861 show_regs(regs);
6862 }
6863
6864+#ifdef CONFIG_PAX_PAGEEXEC
6865+#ifdef CONFIG_PAX_DLRESOLVE
6866+static void pax_emuplt_close(struct vm_area_struct *vma)
6867+{
6868+ vma->vm_mm->call_dl_resolve = 0UL;
6869+}
6870+
6871+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6872+{
6873+ unsigned int *kaddr;
6874+
6875+ vmf->page = alloc_page(GFP_HIGHUSER);
6876+ if (!vmf->page)
6877+ return VM_FAULT_OOM;
6878+
6879+ kaddr = kmap(vmf->page);
6880+ memset(kaddr, 0, PAGE_SIZE);
6881+ kaddr[0] = 0x9DE3BFA8U; /* save */
6882+ flush_dcache_page(vmf->page);
6883+ kunmap(vmf->page);
6884+ return VM_FAULT_MAJOR;
6885+}
6886+
6887+static const struct vm_operations_struct pax_vm_ops = {
6888+ .close = pax_emuplt_close,
6889+ .fault = pax_emuplt_fault
6890+};
6891+
6892+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6893+{
6894+ int ret;
6895+
6896+ INIT_LIST_HEAD(&vma->anon_vma_chain);
6897+ vma->vm_mm = current->mm;
6898+ vma->vm_start = addr;
6899+ vma->vm_end = addr + PAGE_SIZE;
6900+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6901+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6902+ vma->vm_ops = &pax_vm_ops;
6903+
6904+ ret = insert_vm_struct(current->mm, vma);
6905+ if (ret)
6906+ return ret;
6907+
6908+ ++current->mm->total_vm;
6909+ return 0;
6910+}
6911+#endif
6912+
6913+/*
6914+ * PaX: decide what to do with offenders (regs->tpc = fault address)
6915+ *
6916+ * returns 1 when task should be killed
6917+ * 2 when patched PLT trampoline was detected
6918+ * 3 when unpatched PLT trampoline was detected
6919+ */
6920+static int pax_handle_fetch_fault(struct pt_regs *regs)
6921+{
6922+
6923+#ifdef CONFIG_PAX_EMUPLT
6924+ int err;
6925+
6926+ do { /* PaX: patched PLT emulation #1 */
6927+ unsigned int sethi1, sethi2, jmpl;
6928+
6929+ err = get_user(sethi1, (unsigned int *)regs->tpc);
6930+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6931+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6932+
6933+ if (err)
6934+ break;
6935+
6936+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6937+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6938+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6939+ {
6940+ unsigned long addr;
6941+
6942+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6943+ addr = regs->u_regs[UREG_G1];
6944+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6945+
6946+ if (test_thread_flag(TIF_32BIT))
6947+ addr &= 0xFFFFFFFFUL;
6948+
6949+ regs->tpc = addr;
6950+ regs->tnpc = addr+4;
6951+ return 2;
6952+ }
6953+ } while (0);
6954+
6955+ { /* PaX: patched PLT emulation #2 */
6956+ unsigned int ba;
6957+
6958+ err = get_user(ba, (unsigned int *)regs->tpc);
6959+
6960+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6961+ unsigned long addr;
6962+
6963+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6964+
6965+ if (test_thread_flag(TIF_32BIT))
6966+ addr &= 0xFFFFFFFFUL;
6967+
6968+ regs->tpc = addr;
6969+ regs->tnpc = addr+4;
6970+ return 2;
6971+ }
6972+ }
6973+
6974+ do { /* PaX: patched PLT emulation #3 */
6975+ unsigned int sethi, jmpl, nop;
6976+
6977+ err = get_user(sethi, (unsigned int *)regs->tpc);
6978+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6979+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6980+
6981+ if (err)
6982+ break;
6983+
6984+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6985+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6986+ nop == 0x01000000U)
6987+ {
6988+ unsigned long addr;
6989+
6990+ addr = (sethi & 0x003FFFFFU) << 10;
6991+ regs->u_regs[UREG_G1] = addr;
6992+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6993+
6994+ if (test_thread_flag(TIF_32BIT))
6995+ addr &= 0xFFFFFFFFUL;
6996+
6997+ regs->tpc = addr;
6998+ regs->tnpc = addr+4;
6999+ return 2;
7000+ }
7001+ } while (0);
7002+
7003+ do { /* PaX: patched PLT emulation #4 */
7004+ unsigned int sethi, mov1, call, mov2;
7005+
7006+ err = get_user(sethi, (unsigned int *)regs->tpc);
7007+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
7008+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
7009+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
7010+
7011+ if (err)
7012+ break;
7013+
7014+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7015+ mov1 == 0x8210000FU &&
7016+ (call & 0xC0000000U) == 0x40000000U &&
7017+ mov2 == 0x9E100001U)
7018+ {
7019+ unsigned long addr;
7020+
7021+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
7022+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7023+
7024+ if (test_thread_flag(TIF_32BIT))
7025+ addr &= 0xFFFFFFFFUL;
7026+
7027+ regs->tpc = addr;
7028+ regs->tnpc = addr+4;
7029+ return 2;
7030+ }
7031+ } while (0);
7032+
7033+ do { /* PaX: patched PLT emulation #5 */
7034+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
7035+
7036+ err = get_user(sethi, (unsigned int *)regs->tpc);
7037+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7038+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7039+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
7040+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
7041+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
7042+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
7043+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
7044+
7045+ if (err)
7046+ break;
7047+
7048+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7049+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
7050+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7051+ (or1 & 0xFFFFE000U) == 0x82106000U &&
7052+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
7053+ sllx == 0x83287020U &&
7054+ jmpl == 0x81C04005U &&
7055+ nop == 0x01000000U)
7056+ {
7057+ unsigned long addr;
7058+
7059+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7060+ regs->u_regs[UREG_G1] <<= 32;
7061+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7062+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7063+ regs->tpc = addr;
7064+ regs->tnpc = addr+4;
7065+ return 2;
7066+ }
7067+ } while (0);
7068+
7069+ do { /* PaX: patched PLT emulation #6 */
7070+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
7071+
7072+ err = get_user(sethi, (unsigned int *)regs->tpc);
7073+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7074+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7075+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
7076+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
7077+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
7078+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
7079+
7080+ if (err)
7081+ break;
7082+
7083+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7084+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
7085+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7086+ sllx == 0x83287020U &&
7087+ (or & 0xFFFFE000U) == 0x8A116000U &&
7088+ jmpl == 0x81C04005U &&
7089+ nop == 0x01000000U)
7090+ {
7091+ unsigned long addr;
7092+
7093+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
7094+ regs->u_regs[UREG_G1] <<= 32;
7095+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
7096+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7097+ regs->tpc = addr;
7098+ regs->tnpc = addr+4;
7099+ return 2;
7100+ }
7101+ } while (0);
7102+
7103+ do { /* PaX: unpatched PLT emulation step 1 */
7104+ unsigned int sethi, ba, nop;
7105+
7106+ err = get_user(sethi, (unsigned int *)regs->tpc);
7107+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7108+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7109+
7110+ if (err)
7111+ break;
7112+
7113+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7114+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7115+ nop == 0x01000000U)
7116+ {
7117+ unsigned long addr;
7118+ unsigned int save, call;
7119+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
7120+
7121+ if ((ba & 0xFFC00000U) == 0x30800000U)
7122+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7123+ else
7124+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7125+
7126+ if (test_thread_flag(TIF_32BIT))
7127+ addr &= 0xFFFFFFFFUL;
7128+
7129+ err = get_user(save, (unsigned int *)addr);
7130+ err |= get_user(call, (unsigned int *)(addr+4));
7131+ err |= get_user(nop, (unsigned int *)(addr+8));
7132+ if (err)
7133+ break;
7134+
7135+#ifdef CONFIG_PAX_DLRESOLVE
7136+ if (save == 0x9DE3BFA8U &&
7137+ (call & 0xC0000000U) == 0x40000000U &&
7138+ nop == 0x01000000U)
7139+ {
7140+ struct vm_area_struct *vma;
7141+ unsigned long call_dl_resolve;
7142+
7143+ down_read(&current->mm->mmap_sem);
7144+ call_dl_resolve = current->mm->call_dl_resolve;
7145+ up_read(&current->mm->mmap_sem);
7146+ if (likely(call_dl_resolve))
7147+ goto emulate;
7148+
7149+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7150+
7151+ down_write(&current->mm->mmap_sem);
7152+ if (current->mm->call_dl_resolve) {
7153+ call_dl_resolve = current->mm->call_dl_resolve;
7154+ up_write(&current->mm->mmap_sem);
7155+ if (vma)
7156+ kmem_cache_free(vm_area_cachep, vma);
7157+ goto emulate;
7158+ }
7159+
7160+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7161+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7162+ up_write(&current->mm->mmap_sem);
7163+ if (vma)
7164+ kmem_cache_free(vm_area_cachep, vma);
7165+ return 1;
7166+ }
7167+
7168+ if (pax_insert_vma(vma, call_dl_resolve)) {
7169+ up_write(&current->mm->mmap_sem);
7170+ kmem_cache_free(vm_area_cachep, vma);
7171+ return 1;
7172+ }
7173+
7174+ current->mm->call_dl_resolve = call_dl_resolve;
7175+ up_write(&current->mm->mmap_sem);
7176+
7177+emulate:
7178+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7179+ regs->tpc = call_dl_resolve;
7180+ regs->tnpc = addr+4;
7181+ return 3;
7182+ }
7183+#endif
7184+
7185+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7186+ if ((save & 0xFFC00000U) == 0x05000000U &&
7187+ (call & 0xFFFFE000U) == 0x85C0A000U &&
7188+ nop == 0x01000000U)
7189+ {
7190+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7191+ regs->u_regs[UREG_G2] = addr + 4;
7192+ addr = (save & 0x003FFFFFU) << 10;
7193+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7194+
7195+ if (test_thread_flag(TIF_32BIT))
7196+ addr &= 0xFFFFFFFFUL;
7197+
7198+ regs->tpc = addr;
7199+ regs->tnpc = addr+4;
7200+ return 3;
7201+ }
7202+
7203+ /* PaX: 64-bit PLT stub */
7204+ err = get_user(sethi1, (unsigned int *)addr);
7205+ err |= get_user(sethi2, (unsigned int *)(addr+4));
7206+ err |= get_user(or1, (unsigned int *)(addr+8));
7207+ err |= get_user(or2, (unsigned int *)(addr+12));
7208+ err |= get_user(sllx, (unsigned int *)(addr+16));
7209+ err |= get_user(add, (unsigned int *)(addr+20));
7210+ err |= get_user(jmpl, (unsigned int *)(addr+24));
7211+ err |= get_user(nop, (unsigned int *)(addr+28));
7212+ if (err)
7213+ break;
7214+
7215+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7216+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7217+ (or1 & 0xFFFFE000U) == 0x88112000U &&
7218+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
7219+ sllx == 0x89293020U &&
7220+ add == 0x8A010005U &&
7221+ jmpl == 0x89C14000U &&
7222+ nop == 0x01000000U)
7223+ {
7224+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7225+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7226+ regs->u_regs[UREG_G4] <<= 32;
7227+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7228+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7229+ regs->u_regs[UREG_G4] = addr + 24;
7230+ addr = regs->u_regs[UREG_G5];
7231+ regs->tpc = addr;
7232+ regs->tnpc = addr+4;
7233+ return 3;
7234+ }
7235+ }
7236+ } while (0);
7237+
7238+#ifdef CONFIG_PAX_DLRESOLVE
7239+ do { /* PaX: unpatched PLT emulation step 2 */
7240+ unsigned int save, call, nop;
7241+
7242+ err = get_user(save, (unsigned int *)(regs->tpc-4));
7243+ err |= get_user(call, (unsigned int *)regs->tpc);
7244+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7245+ if (err)
7246+ break;
7247+
7248+ if (save == 0x9DE3BFA8U &&
7249+ (call & 0xC0000000U) == 0x40000000U &&
7250+ nop == 0x01000000U)
7251+ {
7252+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7253+
7254+ if (test_thread_flag(TIF_32BIT))
7255+ dl_resolve &= 0xFFFFFFFFUL;
7256+
7257+ regs->u_regs[UREG_RETPC] = regs->tpc;
7258+ regs->tpc = dl_resolve;
7259+ regs->tnpc = dl_resolve+4;
7260+ return 3;
7261+ }
7262+ } while (0);
7263+#endif
7264+
7265+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7266+ unsigned int sethi, ba, nop;
7267+
7268+ err = get_user(sethi, (unsigned int *)regs->tpc);
7269+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7270+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7271+
7272+ if (err)
7273+ break;
7274+
7275+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7276+ (ba & 0xFFF00000U) == 0x30600000U &&
7277+ nop == 0x01000000U)
7278+ {
7279+ unsigned long addr;
7280+
7281+ addr = (sethi & 0x003FFFFFU) << 10;
7282+ regs->u_regs[UREG_G1] = addr;
7283+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7284+
7285+ if (test_thread_flag(TIF_32BIT))
7286+ addr &= 0xFFFFFFFFUL;
7287+
7288+ regs->tpc = addr;
7289+ regs->tnpc = addr+4;
7290+ return 2;
7291+ }
7292+ } while (0);
7293+
7294+#endif
7295+
7296+ return 1;
7297+}
7298+
7299+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7300+{
7301+ unsigned long i;
7302+
7303+ printk(KERN_ERR "PAX: bytes at PC: ");
7304+ for (i = 0; i < 8; i++) {
7305+ unsigned int c;
7306+ if (get_user(c, (unsigned int *)pc+i))
7307+ printk(KERN_CONT "???????? ");
7308+ else
7309+ printk(KERN_CONT "%08x ", c);
7310+ }
7311+ printk("\n");
7312+}
7313+#endif
7314+
7315 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7316 {
7317 struct mm_struct *mm = current->mm;
7318@@ -343,6 +797,29 @@ retry:
7319 if (!vma)
7320 goto bad_area;
7321
7322+#ifdef CONFIG_PAX_PAGEEXEC
7323+ /* PaX: detect ITLB misses on non-exec pages */
7324+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
7325+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
7326+ {
7327+ if (address != regs->tpc)
7328+ goto good_area;
7329+
7330+ up_read(&mm->mmap_sem);
7331+ switch (pax_handle_fetch_fault(regs)) {
7332+
7333+#ifdef CONFIG_PAX_EMUPLT
7334+ case 2:
7335+ case 3:
7336+ return;
7337+#endif
7338+
7339+ }
7340+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
7341+ do_group_exit(SIGKILL);
7342+ }
7343+#endif
7344+
7345 /* Pure DTLB misses do not tell us whether the fault causing
7346 * load/store/atomic was a write or not, it only says that there
7347 * was no match. So in such a case we (carefully) read the
7348diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
7349index 07e1453..0a7d9e9 100644
7350--- a/arch/sparc/mm/hugetlbpage.c
7351+++ b/arch/sparc/mm/hugetlbpage.c
7352@@ -67,7 +67,7 @@ full_search:
7353 }
7354 return -ENOMEM;
7355 }
7356- if (likely(!vma || addr + len <= vma->vm_start)) {
7357+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7358 /*
7359 * Remember the place where we stopped the search:
7360 */
7361@@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7362 /* make sure it can fit in the remaining address space */
7363 if (likely(addr > len)) {
7364 vma = find_vma(mm, addr-len);
7365- if (!vma || addr <= vma->vm_start) {
7366+ if (check_heap_stack_gap(vma, addr - len, len)) {
7367 /* remember the address as a hint for next time */
7368 return (mm->free_area_cache = addr-len);
7369 }
7370@@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7371 if (unlikely(mm->mmap_base < len))
7372 goto bottomup;
7373
7374- addr = (mm->mmap_base-len) & HPAGE_MASK;
7375+ addr = mm->mmap_base - len;
7376
7377 do {
7378+ addr &= HPAGE_MASK;
7379 /*
7380 * Lookup failure means no vma is above this address,
7381 * else if new region fits below vma->vm_start,
7382 * return with success:
7383 */
7384 vma = find_vma(mm, addr);
7385- if (likely(!vma || addr+len <= vma->vm_start)) {
7386+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7387 /* remember the address as a hint for next time */
7388 return (mm->free_area_cache = addr);
7389 }
7390@@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7391 mm->cached_hole_size = vma->vm_start - addr;
7392
7393 /* try just below the current vma->vm_start */
7394- addr = (vma->vm_start-len) & HPAGE_MASK;
7395- } while (likely(len < vma->vm_start));
7396+ addr = skip_heap_stack_gap(vma, len);
7397+ } while (!IS_ERR_VALUE(addr));
7398
7399 bottomup:
7400 /*
7401@@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7402 if (addr) {
7403 addr = ALIGN(addr, HPAGE_SIZE);
7404 vma = find_vma(mm, addr);
7405- if (task_size - len >= addr &&
7406- (!vma || addr + len <= vma->vm_start))
7407+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7408 return addr;
7409 }
7410 if (mm->get_unmapped_area == arch_get_unmapped_area)
7411diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
7412index c5f9021..7591bae 100644
7413--- a/arch/sparc/mm/init_32.c
7414+++ b/arch/sparc/mm/init_32.c
7415@@ -315,6 +315,9 @@ extern void device_scan(void);
7416 pgprot_t PAGE_SHARED __read_mostly;
7417 EXPORT_SYMBOL(PAGE_SHARED);
7418
7419+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
7420+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
7421+
7422 void __init paging_init(void)
7423 {
7424 switch(sparc_cpu_model) {
7425@@ -343,17 +346,17 @@ void __init paging_init(void)
7426
7427 /* Initialize the protection map with non-constant, MMU dependent values. */
7428 protection_map[0] = PAGE_NONE;
7429- protection_map[1] = PAGE_READONLY;
7430- protection_map[2] = PAGE_COPY;
7431- protection_map[3] = PAGE_COPY;
7432+ protection_map[1] = PAGE_READONLY_NOEXEC;
7433+ protection_map[2] = PAGE_COPY_NOEXEC;
7434+ protection_map[3] = PAGE_COPY_NOEXEC;
7435 protection_map[4] = PAGE_READONLY;
7436 protection_map[5] = PAGE_READONLY;
7437 protection_map[6] = PAGE_COPY;
7438 protection_map[7] = PAGE_COPY;
7439 protection_map[8] = PAGE_NONE;
7440- protection_map[9] = PAGE_READONLY;
7441- protection_map[10] = PAGE_SHARED;
7442- protection_map[11] = PAGE_SHARED;
7443+ protection_map[9] = PAGE_READONLY_NOEXEC;
7444+ protection_map[10] = PAGE_SHARED_NOEXEC;
7445+ protection_map[11] = PAGE_SHARED_NOEXEC;
7446 protection_map[12] = PAGE_READONLY;
7447 protection_map[13] = PAGE_READONLY;
7448 protection_map[14] = PAGE_SHARED;
7449diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
7450index cbef74e..c38fead 100644
7451--- a/arch/sparc/mm/srmmu.c
7452+++ b/arch/sparc/mm/srmmu.c
7453@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
7454 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
7455 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
7456 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
7457+
7458+#ifdef CONFIG_PAX_PAGEEXEC
7459+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
7460+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
7461+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
7462+#endif
7463+
7464 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
7465 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
7466
7467diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
7468index f4500c6..889656c 100644
7469--- a/arch/tile/include/asm/atomic_64.h
7470+++ b/arch/tile/include/asm/atomic_64.h
7471@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7472
7473 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7474
7475+#define atomic64_read_unchecked(v) atomic64_read(v)
7476+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7477+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7478+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7479+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7480+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7481+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7482+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7483+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7484+
7485 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
7486 #define smp_mb__before_atomic_dec() smp_mb()
7487 #define smp_mb__after_atomic_dec() smp_mb()
7488diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
7489index 392e533..536b092 100644
7490--- a/arch/tile/include/asm/cache.h
7491+++ b/arch/tile/include/asm/cache.h
7492@@ -15,11 +15,12 @@
7493 #ifndef _ASM_TILE_CACHE_H
7494 #define _ASM_TILE_CACHE_H
7495
7496+#include <linux/const.h>
7497 #include <arch/chip.h>
7498
7499 /* bytes per L1 data cache line */
7500 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
7501-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7502+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7503
7504 /* bytes per L2 cache line */
7505 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
7506diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
7507index ef34d2c..d6ce60c 100644
7508--- a/arch/tile/include/asm/uaccess.h
7509+++ b/arch/tile/include/asm/uaccess.h
7510@@ -361,9 +361,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
7511 const void __user *from,
7512 unsigned long n)
7513 {
7514- int sz = __compiletime_object_size(to);
7515+ size_t sz = __compiletime_object_size(to);
7516
7517- if (likely(sz == -1 || sz >= n))
7518+ if (likely(sz == (size_t)-1 || sz >= n))
7519 n = _copy_from_user(to, from, n);
7520 else
7521 copy_from_user_overflow();
7522diff --git a/arch/um/Makefile b/arch/um/Makefile
7523index 55c0661..86ad413 100644
7524--- a/arch/um/Makefile
7525+++ b/arch/um/Makefile
7526@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
7527 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
7528 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
7529
7530+ifdef CONSTIFY_PLUGIN
7531+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7532+endif
7533+
7534 #This will adjust *FLAGS accordingly to the platform.
7535 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
7536
7537diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7538index 19e1bdd..3665b77 100644
7539--- a/arch/um/include/asm/cache.h
7540+++ b/arch/um/include/asm/cache.h
7541@@ -1,6 +1,7 @@
7542 #ifndef __UM_CACHE_H
7543 #define __UM_CACHE_H
7544
7545+#include <linux/const.h>
7546
7547 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7548 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7549@@ -12,6 +13,6 @@
7550 # define L1_CACHE_SHIFT 5
7551 #endif
7552
7553-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7554+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7555
7556 #endif
7557diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7558index 6c03acd..a5e0215 100644
7559--- a/arch/um/include/asm/kmap_types.h
7560+++ b/arch/um/include/asm/kmap_types.h
7561@@ -23,6 +23,7 @@ enum km_type {
7562 KM_IRQ1,
7563 KM_SOFTIRQ0,
7564 KM_SOFTIRQ1,
7565+ KM_CLEARPAGE,
7566 KM_TYPE_NR
7567 };
7568
7569diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
7570index 7cfc3ce..cbd1a58 100644
7571--- a/arch/um/include/asm/page.h
7572+++ b/arch/um/include/asm/page.h
7573@@ -14,6 +14,9 @@
7574 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7575 #define PAGE_MASK (~(PAGE_SIZE-1))
7576
7577+#define ktla_ktva(addr) (addr)
7578+#define ktva_ktla(addr) (addr)
7579+
7580 #ifndef __ASSEMBLY__
7581
7582 struct page;
7583diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
7584index 0032f92..cd151e0 100644
7585--- a/arch/um/include/asm/pgtable-3level.h
7586+++ b/arch/um/include/asm/pgtable-3level.h
7587@@ -58,6 +58,7 @@
7588 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
7589 #define pud_populate(mm, pud, pmd) \
7590 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
7591+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7592
7593 #ifdef CONFIG_64BIT
7594 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
7595diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
7596index 2b73ded..804f540 100644
7597--- a/arch/um/kernel/process.c
7598+++ b/arch/um/kernel/process.c
7599@@ -404,22 +404,6 @@ int singlestepping(void * t)
7600 return 2;
7601 }
7602
7603-/*
7604- * Only x86 and x86_64 have an arch_align_stack().
7605- * All other arches have "#define arch_align_stack(x) (x)"
7606- * in their asm/system.h
7607- * As this is included in UML from asm-um/system-generic.h,
7608- * we can use it to behave as the subarch does.
7609- */
7610-#ifndef arch_align_stack
7611-unsigned long arch_align_stack(unsigned long sp)
7612-{
7613- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7614- sp -= get_random_int() % 8192;
7615- return sp & ~0xf;
7616-}
7617-#endif
7618-
7619 unsigned long get_wchan(struct task_struct *p)
7620 {
7621 unsigned long stack_page, sp, ip;
7622diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7623index ad8f795..2c7eec6 100644
7624--- a/arch/unicore32/include/asm/cache.h
7625+++ b/arch/unicore32/include/asm/cache.h
7626@@ -12,8 +12,10 @@
7627 #ifndef __UNICORE_CACHE_H__
7628 #define __UNICORE_CACHE_H__
7629
7630-#define L1_CACHE_SHIFT (5)
7631-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7632+#include <linux/const.h>
7633+
7634+#define L1_CACHE_SHIFT 5
7635+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7636
7637 /*
7638 * Memory returned by kmalloc() may be used for DMA, so we must make
7639diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7640index c9866b0..fe53aef 100644
7641--- a/arch/x86/Kconfig
7642+++ b/arch/x86/Kconfig
7643@@ -229,7 +229,7 @@ config X86_HT
7644
7645 config X86_32_LAZY_GS
7646 def_bool y
7647- depends on X86_32 && !CC_STACKPROTECTOR
7648+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7649
7650 config ARCH_HWEIGHT_CFLAGS
7651 string
7652@@ -1042,7 +1042,7 @@ choice
7653
7654 config NOHIGHMEM
7655 bool "off"
7656- depends on !X86_NUMAQ
7657+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7658 ---help---
7659 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7660 However, the address space of 32-bit x86 processors is only 4
7661@@ -1079,7 +1079,7 @@ config NOHIGHMEM
7662
7663 config HIGHMEM4G
7664 bool "4GB"
7665- depends on !X86_NUMAQ
7666+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7667 ---help---
7668 Select this if you have a 32-bit processor and between 1 and 4
7669 gigabytes of physical RAM.
7670@@ -1133,7 +1133,7 @@ config PAGE_OFFSET
7671 hex
7672 default 0xB0000000 if VMSPLIT_3G_OPT
7673 default 0x80000000 if VMSPLIT_2G
7674- default 0x78000000 if VMSPLIT_2G_OPT
7675+ default 0x70000000 if VMSPLIT_2G_OPT
7676 default 0x40000000 if VMSPLIT_1G
7677 default 0xC0000000
7678 depends on X86_32
7679@@ -1523,6 +1523,7 @@ config SECCOMP
7680
7681 config CC_STACKPROTECTOR
7682 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7683+ depends on X86_64 || !PAX_MEMORY_UDEREF
7684 ---help---
7685 This option turns on the -fstack-protector GCC feature. This
7686 feature puts, at the beginning of functions, a canary value on
7687@@ -1580,6 +1581,7 @@ config KEXEC_JUMP
7688 config PHYSICAL_START
7689 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7690 default "0x1000000"
7691+ range 0x400000 0x40000000
7692 ---help---
7693 This gives the physical address where the kernel is loaded.
7694
7695@@ -1643,6 +1645,7 @@ config X86_NEED_RELOCS
7696 config PHYSICAL_ALIGN
7697 hex "Alignment value to which kernel should be aligned" if X86_32
7698 default "0x1000000"
7699+ range 0x400000 0x1000000 if PAX_KERNEXEC
7700 range 0x2000 0x1000000
7701 ---help---
7702 This value puts the alignment restrictions on physical address
7703@@ -1674,9 +1677,10 @@ config HOTPLUG_CPU
7704 Say N if you want to disable CPU hotplug.
7705
7706 config COMPAT_VDSO
7707- def_bool y
7708+ def_bool n
7709 prompt "Compat VDSO support"
7710 depends on X86_32 || IA32_EMULATION
7711+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7712 ---help---
7713 Map the 32-bit VDSO to the predictable old-style address too.
7714
7715diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7716index 706e12e..62e4feb 100644
7717--- a/arch/x86/Kconfig.cpu
7718+++ b/arch/x86/Kconfig.cpu
7719@@ -334,7 +334,7 @@ config X86_PPRO_FENCE
7720
7721 config X86_F00F_BUG
7722 def_bool y
7723- depends on M586MMX || M586TSC || M586 || M486 || M386
7724+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7725
7726 config X86_INVD_BUG
7727 def_bool y
7728@@ -358,7 +358,7 @@ config X86_POPAD_OK
7729
7730 config X86_ALIGNMENT_16
7731 def_bool y
7732- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7733+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7734
7735 config X86_INTEL_USERCOPY
7736 def_bool y
7737@@ -404,7 +404,7 @@ config X86_CMPXCHG64
7738 # generates cmov.
7739 config X86_CMOV
7740 def_bool y
7741- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7742+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7743
7744 config X86_MINIMUM_CPU_FAMILY
7745 int
7746diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7747index e46c214..ab62fd1 100644
7748--- a/arch/x86/Kconfig.debug
7749+++ b/arch/x86/Kconfig.debug
7750@@ -84,7 +84,7 @@ config X86_PTDUMP
7751 config DEBUG_RODATA
7752 bool "Write protect kernel read-only data structures"
7753 default y
7754- depends on DEBUG_KERNEL
7755+ depends on DEBUG_KERNEL && BROKEN
7756 ---help---
7757 Mark the kernel read-only data as write-protected in the pagetables,
7758 in order to catch accidental (and incorrect) writes to such const
7759@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
7760
7761 config DEBUG_SET_MODULE_RONX
7762 bool "Set loadable kernel module data as NX and text as RO"
7763- depends on MODULES
7764+ depends on MODULES && BROKEN
7765 ---help---
7766 This option helps catch unintended modifications to loadable
7767 kernel module's text and read-only data. It also prevents execution
7768@@ -275,7 +275,7 @@ config OPTIMIZE_INLINING
7769
7770 config DEBUG_STRICT_USER_COPY_CHECKS
7771 bool "Strict copy size checks"
7772- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
7773+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
7774 ---help---
7775 Enabling this option turns a certain set of sanity checks for user
7776 copy operations into compile time failures.
7777diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7778index b1c611e..2c1a823 100644
7779--- a/arch/x86/Makefile
7780+++ b/arch/x86/Makefile
7781@@ -46,6 +46,7 @@ else
7782 UTS_MACHINE := x86_64
7783 CHECKFLAGS += -D__x86_64__ -m64
7784
7785+ biarch := $(call cc-option,-m64)
7786 KBUILD_AFLAGS += -m64
7787 KBUILD_CFLAGS += -m64
7788
7789@@ -222,3 +223,12 @@ define archhelp
7790 echo ' FDARGS="..." arguments for the booted kernel'
7791 echo ' FDINITRD=file initrd for the booted kernel'
7792 endef
7793+
7794+define OLD_LD
7795+
7796+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7797+*** Please upgrade your binutils to 2.18 or newer
7798+endef
7799+
7800+archprepare:
7801+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7802diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7803index 5a747dd..ff7b12c 100644
7804--- a/arch/x86/boot/Makefile
7805+++ b/arch/x86/boot/Makefile
7806@@ -64,6 +64,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7807 $(call cc-option, -fno-stack-protector) \
7808 $(call cc-option, -mpreferred-stack-boundary=2)
7809 KBUILD_CFLAGS += $(call cc-option, -m32)
7810+ifdef CONSTIFY_PLUGIN
7811+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7812+endif
7813 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7814 GCOV_PROFILE := n
7815
7816diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7817index 878e4b9..20537ab 100644
7818--- a/arch/x86/boot/bitops.h
7819+++ b/arch/x86/boot/bitops.h
7820@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7821 u8 v;
7822 const u32 *p = (const u32 *)addr;
7823
7824- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7825+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7826 return v;
7827 }
7828
7829@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7830
7831 static inline void set_bit(int nr, void *addr)
7832 {
7833- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7834+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7835 }
7836
7837 #endif /* BOOT_BITOPS_H */
7838diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7839index 18997e5..83d9c67 100644
7840--- a/arch/x86/boot/boot.h
7841+++ b/arch/x86/boot/boot.h
7842@@ -85,7 +85,7 @@ static inline void io_delay(void)
7843 static inline u16 ds(void)
7844 {
7845 u16 seg;
7846- asm("movw %%ds,%0" : "=rm" (seg));
7847+ asm volatile("movw %%ds,%0" : "=rm" (seg));
7848 return seg;
7849 }
7850
7851@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7852 static inline int memcmp(const void *s1, const void *s2, size_t len)
7853 {
7854 u8 diff;
7855- asm("repe; cmpsb; setnz %0"
7856+ asm volatile("repe; cmpsb; setnz %0"
7857 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7858 return diff;
7859 }
7860diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7861index e398bb5..3a382ca 100644
7862--- a/arch/x86/boot/compressed/Makefile
7863+++ b/arch/x86/boot/compressed/Makefile
7864@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7865 KBUILD_CFLAGS += $(cflags-y)
7866 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7867 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7868+ifdef CONSTIFY_PLUGIN
7869+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7870+endif
7871
7872 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7873 GCOV_PROFILE := n
7874diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
7875index 0cdfc0d..6e79437 100644
7876--- a/arch/x86/boot/compressed/eboot.c
7877+++ b/arch/x86/boot/compressed/eboot.c
7878@@ -122,7 +122,6 @@ again:
7879 *addr = max_addr;
7880 }
7881
7882-free_pool:
7883 efi_call_phys1(sys_table->boottime->free_pool, map);
7884
7885 fail:
7886@@ -186,7 +185,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
7887 if (i == map_size / desc_size)
7888 status = EFI_NOT_FOUND;
7889
7890-free_pool:
7891 efi_call_phys1(sys_table->boottime->free_pool, map);
7892 fail:
7893 return status;
7894diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7895index c85e3ac..6f5aa80 100644
7896--- a/arch/x86/boot/compressed/head_32.S
7897+++ b/arch/x86/boot/compressed/head_32.S
7898@@ -106,7 +106,7 @@ preferred_addr:
7899 notl %eax
7900 andl %eax, %ebx
7901 #else
7902- movl $LOAD_PHYSICAL_ADDR, %ebx
7903+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7904 #endif
7905
7906 /* Target address to relocate to for decompression */
7907@@ -192,7 +192,7 @@ relocated:
7908 * and where it was actually loaded.
7909 */
7910 movl %ebp, %ebx
7911- subl $LOAD_PHYSICAL_ADDR, %ebx
7912+ subl $____LOAD_PHYSICAL_ADDR, %ebx
7913 jz 2f /* Nothing to be done if loaded at compiled addr. */
7914 /*
7915 * Process relocations.
7916@@ -200,8 +200,7 @@ relocated:
7917
7918 1: subl $4, %edi
7919 movl (%edi), %ecx
7920- testl %ecx, %ecx
7921- jz 2f
7922+ jecxz 2f
7923 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7924 jmp 1b
7925 2:
7926diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7927index 87e03a1..0d94c76 100644
7928--- a/arch/x86/boot/compressed/head_64.S
7929+++ b/arch/x86/boot/compressed/head_64.S
7930@@ -91,7 +91,7 @@ ENTRY(startup_32)
7931 notl %eax
7932 andl %eax, %ebx
7933 #else
7934- movl $LOAD_PHYSICAL_ADDR, %ebx
7935+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7936 #endif
7937
7938 /* Target address to relocate to for decompression */
7939@@ -263,7 +263,7 @@ preferred_addr:
7940 notq %rax
7941 andq %rax, %rbp
7942 #else
7943- movq $LOAD_PHYSICAL_ADDR, %rbp
7944+ movq $____LOAD_PHYSICAL_ADDR, %rbp
7945 #endif
7946
7947 /* Target address to relocate to for decompression */
7948diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7949index 7116dcb..d9ae1d7 100644
7950--- a/arch/x86/boot/compressed/misc.c
7951+++ b/arch/x86/boot/compressed/misc.c
7952@@ -310,7 +310,7 @@ static void parse_elf(void *output)
7953 case PT_LOAD:
7954 #ifdef CONFIG_RELOCATABLE
7955 dest = output;
7956- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7957+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7958 #else
7959 dest = (void *)(phdr->p_paddr);
7960 #endif
7961@@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7962 error("Destination address too large");
7963 #endif
7964 #ifndef CONFIG_RELOCATABLE
7965- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7966+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7967 error("Wrong destination address");
7968 #endif
7969
7970diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7971index 4d3ff03..e4972ff 100644
7972--- a/arch/x86/boot/cpucheck.c
7973+++ b/arch/x86/boot/cpucheck.c
7974@@ -74,7 +74,7 @@ static int has_fpu(void)
7975 u16 fcw = -1, fsw = -1;
7976 u32 cr0;
7977
7978- asm("movl %%cr0,%0" : "=r" (cr0));
7979+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
7980 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7981 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7982 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7983@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7984 {
7985 u32 f0, f1;
7986
7987- asm("pushfl ; "
7988+ asm volatile("pushfl ; "
7989 "pushfl ; "
7990 "popl %0 ; "
7991 "movl %0,%1 ; "
7992@@ -115,7 +115,7 @@ static void get_flags(void)
7993 set_bit(X86_FEATURE_FPU, cpu.flags);
7994
7995 if (has_eflag(X86_EFLAGS_ID)) {
7996- asm("cpuid"
7997+ asm volatile("cpuid"
7998 : "=a" (max_intel_level),
7999 "=b" (cpu_vendor[0]),
8000 "=d" (cpu_vendor[1]),
8001@@ -124,7 +124,7 @@ static void get_flags(void)
8002
8003 if (max_intel_level >= 0x00000001 &&
8004 max_intel_level <= 0x0000ffff) {
8005- asm("cpuid"
8006+ asm volatile("cpuid"
8007 : "=a" (tfms),
8008 "=c" (cpu.flags[4]),
8009 "=d" (cpu.flags[0])
8010@@ -136,7 +136,7 @@ static void get_flags(void)
8011 cpu.model += ((tfms >> 16) & 0xf) << 4;
8012 }
8013
8014- asm("cpuid"
8015+ asm volatile("cpuid"
8016 : "=a" (max_amd_level)
8017 : "a" (0x80000000)
8018 : "ebx", "ecx", "edx");
8019@@ -144,7 +144,7 @@ static void get_flags(void)
8020 if (max_amd_level >= 0x80000001 &&
8021 max_amd_level <= 0x8000ffff) {
8022 u32 eax = 0x80000001;
8023- asm("cpuid"
8024+ asm volatile("cpuid"
8025 : "+a" (eax),
8026 "=c" (cpu.flags[6]),
8027 "=d" (cpu.flags[1])
8028@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8029 u32 ecx = MSR_K7_HWCR;
8030 u32 eax, edx;
8031
8032- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8033+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8034 eax &= ~(1 << 15);
8035- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8036+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8037
8038 get_flags(); /* Make sure it really did something */
8039 err = check_flags();
8040@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8041 u32 ecx = MSR_VIA_FCR;
8042 u32 eax, edx;
8043
8044- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8045+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8046 eax |= (1<<1)|(1<<7);
8047- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8048+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8049
8050 set_bit(X86_FEATURE_CX8, cpu.flags);
8051 err = check_flags();
8052@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8053 u32 eax, edx;
8054 u32 level = 1;
8055
8056- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8057- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8058- asm("cpuid"
8059+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8060+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8061+ asm volatile("cpuid"
8062 : "+a" (level), "=d" (cpu.flags[0])
8063 : : "ecx", "ebx");
8064- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8065+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8066
8067 err = check_flags();
8068 }
8069diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
8070index f1bbeeb..aff09cb 100644
8071--- a/arch/x86/boot/header.S
8072+++ b/arch/x86/boot/header.S
8073@@ -372,7 +372,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
8074 # single linked list of
8075 # struct setup_data
8076
8077-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8078+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8079
8080 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8081 #define VO_INIT_SIZE (VO__end - VO__text)
8082diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
8083index db75d07..8e6d0af 100644
8084--- a/arch/x86/boot/memory.c
8085+++ b/arch/x86/boot/memory.c
8086@@ -19,7 +19,7 @@
8087
8088 static int detect_memory_e820(void)
8089 {
8090- int count = 0;
8091+ unsigned int count = 0;
8092 struct biosregs ireg, oreg;
8093 struct e820entry *desc = boot_params.e820_map;
8094 static struct e820entry buf; /* static so it is zeroed */
8095diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
8096index 11e8c6e..fdbb1ed 100644
8097--- a/arch/x86/boot/video-vesa.c
8098+++ b/arch/x86/boot/video-vesa.c
8099@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
8100
8101 boot_params.screen_info.vesapm_seg = oreg.es;
8102 boot_params.screen_info.vesapm_off = oreg.di;
8103+ boot_params.screen_info.vesapm_size = oreg.cx;
8104 }
8105
8106 /*
8107diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8108index 43eda28..5ab5fdb 100644
8109--- a/arch/x86/boot/video.c
8110+++ b/arch/x86/boot/video.c
8111@@ -96,7 +96,7 @@ static void store_mode_params(void)
8112 static unsigned int get_entry(void)
8113 {
8114 char entry_buf[4];
8115- int i, len = 0;
8116+ unsigned int i, len = 0;
8117 int key;
8118 unsigned int v;
8119
8120diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8121index 5b577d5..3c1fed4 100644
8122--- a/arch/x86/crypto/aes-x86_64-asm_64.S
8123+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
8124@@ -8,6 +8,8 @@
8125 * including this sentence is retained in full.
8126 */
8127
8128+#include <asm/alternative-asm.h>
8129+
8130 .extern crypto_ft_tab
8131 .extern crypto_it_tab
8132 .extern crypto_fl_tab
8133@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
8134 je B192; \
8135 leaq 32(r9),r9;
8136
8137+#define ret pax_force_retaddr 0, 1; ret
8138+
8139 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8140 movq r1,r2; \
8141 movq r3,r4; \
8142diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
8143index 3470624..201259d 100644
8144--- a/arch/x86/crypto/aesni-intel_asm.S
8145+++ b/arch/x86/crypto/aesni-intel_asm.S
8146@@ -31,6 +31,7 @@
8147
8148 #include <linux/linkage.h>
8149 #include <asm/inst.h>
8150+#include <asm/alternative-asm.h>
8151
8152 #ifdef __x86_64__
8153 .data
8154@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
8155 pop %r14
8156 pop %r13
8157 pop %r12
8158+ pax_force_retaddr 0, 1
8159 ret
8160+ENDPROC(aesni_gcm_dec)
8161
8162
8163 /*****************************************************************************
8164@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
8165 pop %r14
8166 pop %r13
8167 pop %r12
8168+ pax_force_retaddr 0, 1
8169 ret
8170+ENDPROC(aesni_gcm_enc)
8171
8172 #endif
8173
8174@@ -1714,6 +1719,7 @@ _key_expansion_256a:
8175 pxor %xmm1, %xmm0
8176 movaps %xmm0, (TKEYP)
8177 add $0x10, TKEYP
8178+ pax_force_retaddr_bts
8179 ret
8180
8181 .align 4
8182@@ -1738,6 +1744,7 @@ _key_expansion_192a:
8183 shufps $0b01001110, %xmm2, %xmm1
8184 movaps %xmm1, 0x10(TKEYP)
8185 add $0x20, TKEYP
8186+ pax_force_retaddr_bts
8187 ret
8188
8189 .align 4
8190@@ -1757,6 +1764,7 @@ _key_expansion_192b:
8191
8192 movaps %xmm0, (TKEYP)
8193 add $0x10, TKEYP
8194+ pax_force_retaddr_bts
8195 ret
8196
8197 .align 4
8198@@ -1769,6 +1777,7 @@ _key_expansion_256b:
8199 pxor %xmm1, %xmm2
8200 movaps %xmm2, (TKEYP)
8201 add $0x10, TKEYP
8202+ pax_force_retaddr_bts
8203 ret
8204
8205 /*
8206@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
8207 #ifndef __x86_64__
8208 popl KEYP
8209 #endif
8210+ pax_force_retaddr 0, 1
8211 ret
8212+ENDPROC(aesni_set_key)
8213
8214 /*
8215 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8216@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
8217 popl KLEN
8218 popl KEYP
8219 #endif
8220+ pax_force_retaddr 0, 1
8221 ret
8222+ENDPROC(aesni_enc)
8223
8224 /*
8225 * _aesni_enc1: internal ABI
8226@@ -1959,6 +1972,7 @@ _aesni_enc1:
8227 AESENC KEY STATE
8228 movaps 0x70(TKEYP), KEY
8229 AESENCLAST KEY STATE
8230+ pax_force_retaddr_bts
8231 ret
8232
8233 /*
8234@@ -2067,6 +2081,7 @@ _aesni_enc4:
8235 AESENCLAST KEY STATE2
8236 AESENCLAST KEY STATE3
8237 AESENCLAST KEY STATE4
8238+ pax_force_retaddr_bts
8239 ret
8240
8241 /*
8242@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
8243 popl KLEN
8244 popl KEYP
8245 #endif
8246+ pax_force_retaddr 0, 1
8247 ret
8248+ENDPROC(aesni_dec)
8249
8250 /*
8251 * _aesni_dec1: internal ABI
8252@@ -2146,6 +2163,7 @@ _aesni_dec1:
8253 AESDEC KEY STATE
8254 movaps 0x70(TKEYP), KEY
8255 AESDECLAST KEY STATE
8256+ pax_force_retaddr_bts
8257 ret
8258
8259 /*
8260@@ -2254,6 +2272,7 @@ _aesni_dec4:
8261 AESDECLAST KEY STATE2
8262 AESDECLAST KEY STATE3
8263 AESDECLAST KEY STATE4
8264+ pax_force_retaddr_bts
8265 ret
8266
8267 /*
8268@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
8269 popl KEYP
8270 popl LEN
8271 #endif
8272+ pax_force_retaddr 0, 1
8273 ret
8274+ENDPROC(aesni_ecb_enc)
8275
8276 /*
8277 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8278@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
8279 popl KEYP
8280 popl LEN
8281 #endif
8282+ pax_force_retaddr 0, 1
8283 ret
8284+ENDPROC(aesni_ecb_dec)
8285
8286 /*
8287 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8288@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
8289 popl LEN
8290 popl IVP
8291 #endif
8292+ pax_force_retaddr 0, 1
8293 ret
8294+ENDPROC(aesni_cbc_enc)
8295
8296 /*
8297 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8298@@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
8299 popl LEN
8300 popl IVP
8301 #endif
8302+ pax_force_retaddr 0, 1
8303 ret
8304+ENDPROC(aesni_cbc_dec)
8305
8306 #ifdef __x86_64__
8307 .align 16
8308@@ -2526,6 +2553,7 @@ _aesni_inc_init:
8309 mov $1, TCTR_LOW
8310 MOVQ_R64_XMM TCTR_LOW INC
8311 MOVQ_R64_XMM CTR TCTR_LOW
8312+ pax_force_retaddr_bts
8313 ret
8314
8315 /*
8316@@ -2554,6 +2582,7 @@ _aesni_inc:
8317 .Linc_low:
8318 movaps CTR, IV
8319 PSHUFB_XMM BSWAP_MASK IV
8320+ pax_force_retaddr_bts
8321 ret
8322
8323 /*
8324@@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
8325 .Lctr_enc_ret:
8326 movups IV, (IVP)
8327 .Lctr_enc_just_ret:
8328+ pax_force_retaddr 0, 1
8329 ret
8330+ENDPROC(aesni_ctr_enc)
8331 #endif
8332diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8333index 391d245..67f35c2 100644
8334--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
8335+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8336@@ -20,6 +20,8 @@
8337 *
8338 */
8339
8340+#include <asm/alternative-asm.h>
8341+
8342 .file "blowfish-x86_64-asm.S"
8343 .text
8344
8345@@ -151,9 +153,11 @@ __blowfish_enc_blk:
8346 jnz __enc_xor;
8347
8348 write_block();
8349+ pax_force_retaddr 0, 1
8350 ret;
8351 __enc_xor:
8352 xor_block();
8353+ pax_force_retaddr 0, 1
8354 ret;
8355
8356 .align 8
8357@@ -188,6 +192,7 @@ blowfish_dec_blk:
8358
8359 movq %r11, %rbp;
8360
8361+ pax_force_retaddr 0, 1
8362 ret;
8363
8364 /**********************************************************************
8365@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
8366
8367 popq %rbx;
8368 popq %rbp;
8369+ pax_force_retaddr 0, 1
8370 ret;
8371
8372 __enc_xor4:
8373@@ -349,6 +355,7 @@ __enc_xor4:
8374
8375 popq %rbx;
8376 popq %rbp;
8377+ pax_force_retaddr 0, 1
8378 ret;
8379
8380 .align 8
8381@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
8382 popq %rbx;
8383 popq %rbp;
8384
8385+ pax_force_retaddr 0, 1
8386 ret;
8387
8388diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
8389index 0b33743..7a56206 100644
8390--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
8391+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
8392@@ -20,6 +20,8 @@
8393 *
8394 */
8395
8396+#include <asm/alternative-asm.h>
8397+
8398 .file "camellia-x86_64-asm_64.S"
8399 .text
8400
8401@@ -229,12 +231,14 @@ __enc_done:
8402 enc_outunpack(mov, RT1);
8403
8404 movq RRBP, %rbp;
8405+ pax_force_retaddr 0, 1
8406 ret;
8407
8408 __enc_xor:
8409 enc_outunpack(xor, RT1);
8410
8411 movq RRBP, %rbp;
8412+ pax_force_retaddr 0, 1
8413 ret;
8414
8415 .global camellia_dec_blk;
8416@@ -275,6 +279,7 @@ __dec_rounds16:
8417 dec_outunpack();
8418
8419 movq RRBP, %rbp;
8420+ pax_force_retaddr 0, 1
8421 ret;
8422
8423 /**********************************************************************
8424@@ -468,6 +473,7 @@ __enc2_done:
8425
8426 movq RRBP, %rbp;
8427 popq %rbx;
8428+ pax_force_retaddr 0, 1
8429 ret;
8430
8431 __enc2_xor:
8432@@ -475,6 +481,7 @@ __enc2_xor:
8433
8434 movq RRBP, %rbp;
8435 popq %rbx;
8436+ pax_force_retaddr 0, 1
8437 ret;
8438
8439 .global camellia_dec_blk_2way;
8440@@ -517,4 +524,5 @@ __dec2_rounds16:
8441
8442 movq RRBP, %rbp;
8443 movq RXOR, %rbx;
8444+ pax_force_retaddr 0, 1
8445 ret;
8446diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8447index 6214a9b..1f4fc9a 100644
8448--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8449+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8450@@ -1,3 +1,5 @@
8451+#include <asm/alternative-asm.h>
8452+
8453 # enter ECRYPT_encrypt_bytes
8454 .text
8455 .p2align 5
8456@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
8457 add %r11,%rsp
8458 mov %rdi,%rax
8459 mov %rsi,%rdx
8460+ pax_force_retaddr 0, 1
8461 ret
8462 # bytesatleast65:
8463 ._bytesatleast65:
8464@@ -891,6 +894,7 @@ ECRYPT_keysetup:
8465 add %r11,%rsp
8466 mov %rdi,%rax
8467 mov %rsi,%rdx
8468+ pax_force_retaddr
8469 ret
8470 # enter ECRYPT_ivsetup
8471 .text
8472@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
8473 add %r11,%rsp
8474 mov %rdi,%rax
8475 mov %rsi,%rdx
8476+ pax_force_retaddr
8477 ret
8478diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8479index 3ee1ff0..cbc568b 100644
8480--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8481+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8482@@ -24,6 +24,8 @@
8483 *
8484 */
8485
8486+#include <asm/alternative-asm.h>
8487+
8488 .file "serpent-sse2-x86_64-asm_64.S"
8489 .text
8490
8491@@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
8492 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8493 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8494
8495+ pax_force_retaddr
8496 ret;
8497
8498 __enc_xor8:
8499 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8500 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8501
8502+ pax_force_retaddr
8503 ret;
8504
8505 .align 8
8506@@ -755,4 +759,5 @@ serpent_dec_blk_8way:
8507 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
8508 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
8509
8510+ pax_force_retaddr
8511 ret;
8512diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
8513index b2c2f57..8470cab 100644
8514--- a/arch/x86/crypto/sha1_ssse3_asm.S
8515+++ b/arch/x86/crypto/sha1_ssse3_asm.S
8516@@ -28,6 +28,8 @@
8517 * (at your option) any later version.
8518 */
8519
8520+#include <asm/alternative-asm.h>
8521+
8522 #define CTX %rdi // arg1
8523 #define BUF %rsi // arg2
8524 #define CNT %rdx // arg3
8525@@ -104,6 +106,7 @@
8526 pop %r12
8527 pop %rbp
8528 pop %rbx
8529+ pax_force_retaddr 0, 1
8530 ret
8531
8532 .size \name, .-\name
8533diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8534index 5b012a2..36d5364 100644
8535--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8536+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8537@@ -20,6 +20,8 @@
8538 *
8539 */
8540
8541+#include <asm/alternative-asm.h>
8542+
8543 .file "twofish-x86_64-asm-3way.S"
8544 .text
8545
8546@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8547 popq %r13;
8548 popq %r14;
8549 popq %r15;
8550+ pax_force_retaddr 0, 1
8551 ret;
8552
8553 __enc_xor3:
8554@@ -271,6 +274,7 @@ __enc_xor3:
8555 popq %r13;
8556 popq %r14;
8557 popq %r15;
8558+ pax_force_retaddr 0, 1
8559 ret;
8560
8561 .global twofish_dec_blk_3way
8562@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8563 popq %r13;
8564 popq %r14;
8565 popq %r15;
8566+ pax_force_retaddr 0, 1
8567 ret;
8568
8569diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8570index 7bcf3fc..f53832f 100644
8571--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8572+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8573@@ -21,6 +21,7 @@
8574 .text
8575
8576 #include <asm/asm-offsets.h>
8577+#include <asm/alternative-asm.h>
8578
8579 #define a_offset 0
8580 #define b_offset 4
8581@@ -268,6 +269,7 @@ twofish_enc_blk:
8582
8583 popq R1
8584 movq $1,%rax
8585+ pax_force_retaddr 0, 1
8586 ret
8587
8588 twofish_dec_blk:
8589@@ -319,4 +321,5 @@ twofish_dec_blk:
8590
8591 popq R1
8592 movq $1,%rax
8593+ pax_force_retaddr 0, 1
8594 ret
8595diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8596index 07b3a68..bd2a388 100644
8597--- a/arch/x86/ia32/ia32_aout.c
8598+++ b/arch/x86/ia32/ia32_aout.c
8599@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8600 unsigned long dump_start, dump_size;
8601 struct user32 dump;
8602
8603+ memset(&dump, 0, sizeof(dump));
8604+
8605 fs = get_fs();
8606 set_fs(KERNEL_DS);
8607 has_dumped = 1;
8608diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8609index 4f5bfac..e1ef0d3 100644
8610--- a/arch/x86/ia32/ia32_signal.c
8611+++ b/arch/x86/ia32/ia32_signal.c
8612@@ -168,7 +168,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8613 }
8614 seg = get_fs();
8615 set_fs(KERNEL_DS);
8616- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8617+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8618 set_fs(seg);
8619 if (ret >= 0 && uoss_ptr) {
8620 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8621@@ -369,7 +369,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8622 */
8623 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8624 size_t frame_size,
8625- void **fpstate)
8626+ void __user **fpstate)
8627 {
8628 unsigned long sp;
8629
8630@@ -390,7 +390,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8631
8632 if (used_math()) {
8633 sp = sp - sig_xstate_ia32_size;
8634- *fpstate = (struct _fpstate_ia32 *) sp;
8635+ *fpstate = (struct _fpstate_ia32 __user *) sp;
8636 if (save_i387_xstate_ia32(*fpstate) < 0)
8637 return (void __user *) -1L;
8638 }
8639@@ -398,7 +398,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8640 sp -= frame_size;
8641 /* Align the stack pointer according to the i386 ABI,
8642 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8643- sp = ((sp + 4) & -16ul) - 4;
8644+ sp = ((sp - 12) & -16ul) - 4;
8645 return (void __user *) sp;
8646 }
8647
8648@@ -456,7 +456,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8649 * These are actually not used anymore, but left because some
8650 * gdb versions depend on them as a marker.
8651 */
8652- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8653+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8654 } put_user_catch(err);
8655
8656 if (err)
8657@@ -498,7 +498,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8658 0xb8,
8659 __NR_ia32_rt_sigreturn,
8660 0x80cd,
8661- 0,
8662+ 0
8663 };
8664
8665 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8666@@ -528,16 +528,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8667
8668 if (ka->sa.sa_flags & SA_RESTORER)
8669 restorer = ka->sa.sa_restorer;
8670+ else if (current->mm->context.vdso)
8671+ /* Return stub is in 32bit vsyscall page */
8672+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8673 else
8674- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8675- rt_sigreturn);
8676+ restorer = &frame->retcode;
8677 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8678
8679 /*
8680 * Not actually used anymore, but left because some gdb
8681 * versions need it.
8682 */
8683- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8684+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8685 } put_user_catch(err);
8686
8687 if (err)
8688diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8689index e3e7340..05ed805 100644
8690--- a/arch/x86/ia32/ia32entry.S
8691+++ b/arch/x86/ia32/ia32entry.S
8692@@ -13,8 +13,10 @@
8693 #include <asm/thread_info.h>
8694 #include <asm/segment.h>
8695 #include <asm/irqflags.h>
8696+#include <asm/pgtable.h>
8697 #include <linux/linkage.h>
8698 #include <linux/err.h>
8699+#include <asm/alternative-asm.h>
8700
8701 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8702 #include <linux/elf-em.h>
8703@@ -94,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
8704 ENDPROC(native_irq_enable_sysexit)
8705 #endif
8706
8707+ .macro pax_enter_kernel_user
8708+ pax_set_fptr_mask
8709+#ifdef CONFIG_PAX_MEMORY_UDEREF
8710+ call pax_enter_kernel_user
8711+#endif
8712+ .endm
8713+
8714+ .macro pax_exit_kernel_user
8715+#ifdef CONFIG_PAX_MEMORY_UDEREF
8716+ call pax_exit_kernel_user
8717+#endif
8718+#ifdef CONFIG_PAX_RANDKSTACK
8719+ pushq %rax
8720+ pushq %r11
8721+ call pax_randomize_kstack
8722+ popq %r11
8723+ popq %rax
8724+#endif
8725+ .endm
8726+
8727+.macro pax_erase_kstack
8728+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8729+ call pax_erase_kstack
8730+#endif
8731+.endm
8732+
8733 /*
8734 * 32bit SYSENTER instruction entry.
8735 *
8736@@ -120,12 +148,6 @@ ENTRY(ia32_sysenter_target)
8737 CFI_REGISTER rsp,rbp
8738 SWAPGS_UNSAFE_STACK
8739 movq PER_CPU_VAR(kernel_stack), %rsp
8740- addq $(KERNEL_STACK_OFFSET),%rsp
8741- /*
8742- * No need to follow this irqs on/off section: the syscall
8743- * disabled irqs, here we enable it straight after entry:
8744- */
8745- ENABLE_INTERRUPTS(CLBR_NONE)
8746 movl %ebp,%ebp /* zero extension */
8747 pushq_cfi $__USER32_DS
8748 /*CFI_REL_OFFSET ss,0*/
8749@@ -133,24 +155,39 @@ ENTRY(ia32_sysenter_target)
8750 CFI_REL_OFFSET rsp,0
8751 pushfq_cfi
8752 /*CFI_REL_OFFSET rflags,0*/
8753- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
8754- CFI_REGISTER rip,r10
8755+ orl $X86_EFLAGS_IF,(%rsp)
8756+ GET_THREAD_INFO(%r11)
8757+ movl TI_sysenter_return(%r11), %r11d
8758+ CFI_REGISTER rip,r11
8759 pushq_cfi $__USER32_CS
8760 /*CFI_REL_OFFSET cs,0*/
8761 movl %eax, %eax
8762- pushq_cfi %r10
8763+ pushq_cfi %r11
8764 CFI_REL_OFFSET rip,0
8765 pushq_cfi %rax
8766 cld
8767 SAVE_ARGS 0,1,0
8768+ pax_enter_kernel_user
8769+ /*
8770+ * No need to follow this irqs on/off section: the syscall
8771+ * disabled irqs, here we enable it straight after entry:
8772+ */
8773+ ENABLE_INTERRUPTS(CLBR_NONE)
8774 /* no need to do an access_ok check here because rbp has been
8775 32bit zero extended */
8776+
8777+#ifdef CONFIG_PAX_MEMORY_UDEREF
8778+ mov $PAX_USER_SHADOW_BASE,%r11
8779+ add %r11,%rbp
8780+#endif
8781+
8782 1: movl (%rbp),%ebp
8783 .section __ex_table,"a"
8784 .quad 1b,ia32_badarg
8785 .previous
8786- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8787- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8788+ GET_THREAD_INFO(%r11)
8789+ orl $TS_COMPAT,TI_status(%r11)
8790+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8791 CFI_REMEMBER_STATE
8792 jnz sysenter_tracesys
8793 cmpq $(IA32_NR_syscalls-1),%rax
8794@@ -160,12 +197,15 @@ sysenter_do_call:
8795 sysenter_dispatch:
8796 call *ia32_sys_call_table(,%rax,8)
8797 movq %rax,RAX-ARGOFFSET(%rsp)
8798+ GET_THREAD_INFO(%r11)
8799 DISABLE_INTERRUPTS(CLBR_NONE)
8800 TRACE_IRQS_OFF
8801- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8802+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8803 jnz sysexit_audit
8804 sysexit_from_sys_call:
8805- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8806+ pax_exit_kernel_user
8807+ pax_erase_kstack
8808+ andl $~TS_COMPAT,TI_status(%r11)
8809 /* clear IF, that popfq doesn't enable interrupts early */
8810 andl $~0x200,EFLAGS-R11(%rsp)
8811 movl RIP-R11(%rsp),%edx /* User %eip */
8812@@ -191,6 +231,9 @@ sysexit_from_sys_call:
8813 movl %eax,%esi /* 2nd arg: syscall number */
8814 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8815 call __audit_syscall_entry
8816+
8817+ pax_erase_kstack
8818+
8819 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8820 cmpq $(IA32_NR_syscalls-1),%rax
8821 ja ia32_badsys
8822@@ -202,7 +245,7 @@ sysexit_from_sys_call:
8823 .endm
8824
8825 .macro auditsys_exit exit
8826- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8827+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8828 jnz ia32_ret_from_sys_call
8829 TRACE_IRQS_ON
8830 sti
8831@@ -213,11 +256,12 @@ sysexit_from_sys_call:
8832 1: setbe %al /* 1 if error, 0 if not */
8833 movzbl %al,%edi /* zero-extend that into %edi */
8834 call __audit_syscall_exit
8835+ GET_THREAD_INFO(%r11)
8836 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
8837 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8838 cli
8839 TRACE_IRQS_OFF
8840- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8841+ testl %edi,TI_flags(%r11)
8842 jz \exit
8843 CLEAR_RREGS -ARGOFFSET
8844 jmp int_with_check
8845@@ -235,7 +279,7 @@ sysexit_audit:
8846
8847 sysenter_tracesys:
8848 #ifdef CONFIG_AUDITSYSCALL
8849- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8850+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8851 jz sysenter_auditsys
8852 #endif
8853 SAVE_REST
8854@@ -243,6 +287,9 @@ sysenter_tracesys:
8855 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8856 movq %rsp,%rdi /* &pt_regs -> arg1 */
8857 call syscall_trace_enter
8858+
8859+ pax_erase_kstack
8860+
8861 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8862 RESTORE_REST
8863 cmpq $(IA32_NR_syscalls-1),%rax
8864@@ -274,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
8865 ENTRY(ia32_cstar_target)
8866 CFI_STARTPROC32 simple
8867 CFI_SIGNAL_FRAME
8868- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8869+ CFI_DEF_CFA rsp,0
8870 CFI_REGISTER rip,rcx
8871 /*CFI_REGISTER rflags,r11*/
8872 SWAPGS_UNSAFE_STACK
8873 movl %esp,%r8d
8874 CFI_REGISTER rsp,r8
8875 movq PER_CPU_VAR(kernel_stack),%rsp
8876+ SAVE_ARGS 8*6,0,0
8877+ pax_enter_kernel_user
8878 /*
8879 * No need to follow this irqs on/off section: the syscall
8880 * disabled irqs and here we enable it straight after entry:
8881 */
8882 ENABLE_INTERRUPTS(CLBR_NONE)
8883- SAVE_ARGS 8,0,0
8884 movl %eax,%eax /* zero extension */
8885 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8886 movq %rcx,RIP-ARGOFFSET(%rsp)
8887@@ -302,12 +350,19 @@ ENTRY(ia32_cstar_target)
8888 /* no need to do an access_ok check here because r8 has been
8889 32bit zero extended */
8890 /* hardware stack frame is complete now */
8891+
8892+#ifdef CONFIG_PAX_MEMORY_UDEREF
8893+ mov $PAX_USER_SHADOW_BASE,%r11
8894+ add %r11,%r8
8895+#endif
8896+
8897 1: movl (%r8),%r9d
8898 .section __ex_table,"a"
8899 .quad 1b,ia32_badarg
8900 .previous
8901- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8902- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8903+ GET_THREAD_INFO(%r11)
8904+ orl $TS_COMPAT,TI_status(%r11)
8905+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8906 CFI_REMEMBER_STATE
8907 jnz cstar_tracesys
8908 cmpq $IA32_NR_syscalls-1,%rax
8909@@ -317,12 +372,15 @@ cstar_do_call:
8910 cstar_dispatch:
8911 call *ia32_sys_call_table(,%rax,8)
8912 movq %rax,RAX-ARGOFFSET(%rsp)
8913+ GET_THREAD_INFO(%r11)
8914 DISABLE_INTERRUPTS(CLBR_NONE)
8915 TRACE_IRQS_OFF
8916- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8917+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8918 jnz sysretl_audit
8919 sysretl_from_sys_call:
8920- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8921+ pax_exit_kernel_user
8922+ pax_erase_kstack
8923+ andl $~TS_COMPAT,TI_status(%r11)
8924 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8925 movl RIP-ARGOFFSET(%rsp),%ecx
8926 CFI_REGISTER rip,rcx
8927@@ -350,7 +408,7 @@ sysretl_audit:
8928
8929 cstar_tracesys:
8930 #ifdef CONFIG_AUDITSYSCALL
8931- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8932+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8933 jz cstar_auditsys
8934 #endif
8935 xchgl %r9d,%ebp
8936@@ -359,6 +417,9 @@ cstar_tracesys:
8937 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8938 movq %rsp,%rdi /* &pt_regs -> arg1 */
8939 call syscall_trace_enter
8940+
8941+ pax_erase_kstack
8942+
8943 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8944 RESTORE_REST
8945 xchgl %ebp,%r9d
8946@@ -404,19 +465,21 @@ ENTRY(ia32_syscall)
8947 CFI_REL_OFFSET rip,RIP-RIP
8948 PARAVIRT_ADJUST_EXCEPTION_FRAME
8949 SWAPGS
8950- /*
8951- * No need to follow this irqs on/off section: the syscall
8952- * disabled irqs and here we enable it straight after entry:
8953- */
8954- ENABLE_INTERRUPTS(CLBR_NONE)
8955 movl %eax,%eax
8956 pushq_cfi %rax
8957 cld
8958 /* note the registers are not zero extended to the sf.
8959 this could be a problem. */
8960 SAVE_ARGS 0,1,0
8961- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8962- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8963+ pax_enter_kernel_user
8964+ /*
8965+ * No need to follow this irqs on/off section: the syscall
8966+ * disabled irqs and here we enable it straight after entry:
8967+ */
8968+ ENABLE_INTERRUPTS(CLBR_NONE)
8969+ GET_THREAD_INFO(%r11)
8970+ orl $TS_COMPAT,TI_status(%r11)
8971+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8972 jnz ia32_tracesys
8973 cmpq $(IA32_NR_syscalls-1),%rax
8974 ja ia32_badsys
8975@@ -435,6 +498,9 @@ ia32_tracesys:
8976 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8977 movq %rsp,%rdi /* &pt_regs -> arg1 */
8978 call syscall_trace_enter
8979+
8980+ pax_erase_kstack
8981+
8982 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8983 RESTORE_REST
8984 cmpq $(IA32_NR_syscalls-1),%rax
8985diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8986index aec2202..f76174e 100644
8987--- a/arch/x86/ia32/sys_ia32.c
8988+++ b/arch/x86/ia32/sys_ia32.c
8989@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8990 */
8991 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8992 {
8993- typeof(ubuf->st_uid) uid = 0;
8994- typeof(ubuf->st_gid) gid = 0;
8995+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
8996+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
8997 SET_UID(uid, stat->uid);
8998 SET_GID(gid, stat->gid);
8999 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
9000@@ -292,7 +292,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
9001 return alarm_setitimer(seconds);
9002 }
9003
9004-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
9005+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
9006 int options)
9007 {
9008 return compat_sys_wait4(pid, stat_addr, options, NULL);
9009@@ -313,7 +313,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
9010 mm_segment_t old_fs = get_fs();
9011
9012 set_fs(KERNEL_DS);
9013- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
9014+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
9015 set_fs(old_fs);
9016 if (put_compat_timespec(&t, interval))
9017 return -EFAULT;
9018@@ -329,7 +329,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
9019 mm_segment_t old_fs = get_fs();
9020
9021 set_fs(KERNEL_DS);
9022- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
9023+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
9024 set_fs(old_fs);
9025 if (!ret) {
9026 switch (_NSIG_WORDS) {
9027@@ -354,7 +354,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
9028 if (copy_siginfo_from_user32(&info, uinfo))
9029 return -EFAULT;
9030 set_fs(KERNEL_DS);
9031- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
9032+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
9033 set_fs(old_fs);
9034 return ret;
9035 }
9036@@ -399,7 +399,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
9037 return -EFAULT;
9038
9039 set_fs(KERNEL_DS);
9040- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
9041+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
9042 count);
9043 set_fs(old_fs);
9044
9045diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
9046index 952bd01..7692c6f 100644
9047--- a/arch/x86/include/asm/alternative-asm.h
9048+++ b/arch/x86/include/asm/alternative-asm.h
9049@@ -15,6 +15,45 @@
9050 .endm
9051 #endif
9052
9053+#ifdef KERNEXEC_PLUGIN
9054+ .macro pax_force_retaddr_bts rip=0
9055+ btsq $63,\rip(%rsp)
9056+ .endm
9057+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9058+ .macro pax_force_retaddr rip=0, reload=0
9059+ btsq $63,\rip(%rsp)
9060+ .endm
9061+ .macro pax_force_fptr ptr
9062+ btsq $63,\ptr
9063+ .endm
9064+ .macro pax_set_fptr_mask
9065+ .endm
9066+#endif
9067+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
9068+ .macro pax_force_retaddr rip=0, reload=0
9069+ .if \reload
9070+ pax_set_fptr_mask
9071+ .endif
9072+ orq %r10,\rip(%rsp)
9073+ .endm
9074+ .macro pax_force_fptr ptr
9075+ orq %r10,\ptr
9076+ .endm
9077+ .macro pax_set_fptr_mask
9078+ movabs $0x8000000000000000,%r10
9079+ .endm
9080+#endif
9081+#else
9082+ .macro pax_force_retaddr rip=0, reload=0
9083+ .endm
9084+ .macro pax_force_fptr ptr
9085+ .endm
9086+ .macro pax_force_retaddr_bts rip=0
9087+ .endm
9088+ .macro pax_set_fptr_mask
9089+ .endm
9090+#endif
9091+
9092 .macro altinstruction_entry orig alt feature orig_len alt_len
9093 .long \orig - .
9094 .long \alt - .
9095diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
9096index 49331be..9706065 100644
9097--- a/arch/x86/include/asm/alternative.h
9098+++ b/arch/x86/include/asm/alternative.h
9099@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
9100 ".section .discard,\"aw\",@progbits\n" \
9101 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
9102 ".previous\n" \
9103- ".section .altinstr_replacement, \"ax\"\n" \
9104+ ".section .altinstr_replacement, \"a\"\n" \
9105 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
9106 ".previous"
9107
9108diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
9109index d854101..f6ea947 100644
9110--- a/arch/x86/include/asm/apic.h
9111+++ b/arch/x86/include/asm/apic.h
9112@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
9113
9114 #ifdef CONFIG_X86_LOCAL_APIC
9115
9116-extern unsigned int apic_verbosity;
9117+extern int apic_verbosity;
9118 extern int local_apic_timer_c2_ok;
9119
9120 extern int disable_apic;
9121diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
9122index 20370c6..a2eb9b0 100644
9123--- a/arch/x86/include/asm/apm.h
9124+++ b/arch/x86/include/asm/apm.h
9125@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
9126 __asm__ __volatile__(APM_DO_ZERO_SEGS
9127 "pushl %%edi\n\t"
9128 "pushl %%ebp\n\t"
9129- "lcall *%%cs:apm_bios_entry\n\t"
9130+ "lcall *%%ss:apm_bios_entry\n\t"
9131 "setc %%al\n\t"
9132 "popl %%ebp\n\t"
9133 "popl %%edi\n\t"
9134@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
9135 __asm__ __volatile__(APM_DO_ZERO_SEGS
9136 "pushl %%edi\n\t"
9137 "pushl %%ebp\n\t"
9138- "lcall *%%cs:apm_bios_entry\n\t"
9139+ "lcall *%%ss:apm_bios_entry\n\t"
9140 "setc %%bl\n\t"
9141 "popl %%ebp\n\t"
9142 "popl %%edi\n\t"
9143diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
9144index 58cb6d4..a4b806c 100644
9145--- a/arch/x86/include/asm/atomic.h
9146+++ b/arch/x86/include/asm/atomic.h
9147@@ -22,7 +22,18 @@
9148 */
9149 static inline int atomic_read(const atomic_t *v)
9150 {
9151- return (*(volatile int *)&(v)->counter);
9152+ return (*(volatile const int *)&(v)->counter);
9153+}
9154+
9155+/**
9156+ * atomic_read_unchecked - read atomic variable
9157+ * @v: pointer of type atomic_unchecked_t
9158+ *
9159+ * Atomically reads the value of @v.
9160+ */
9161+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9162+{
9163+ return (*(volatile const int *)&(v)->counter);
9164 }
9165
9166 /**
9167@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
9168 }
9169
9170 /**
9171+ * atomic_set_unchecked - set atomic variable
9172+ * @v: pointer of type atomic_unchecked_t
9173+ * @i: required value
9174+ *
9175+ * Atomically sets the value of @v to @i.
9176+ */
9177+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9178+{
9179+ v->counter = i;
9180+}
9181+
9182+/**
9183 * atomic_add - add integer to atomic variable
9184 * @i: integer value to add
9185 * @v: pointer of type atomic_t
9186@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
9187 */
9188 static inline void atomic_add(int i, atomic_t *v)
9189 {
9190- asm volatile(LOCK_PREFIX "addl %1,%0"
9191+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9192+
9193+#ifdef CONFIG_PAX_REFCOUNT
9194+ "jno 0f\n"
9195+ LOCK_PREFIX "subl %1,%0\n"
9196+ "int $4\n0:\n"
9197+ _ASM_EXTABLE(0b, 0b)
9198+#endif
9199+
9200+ : "+m" (v->counter)
9201+ : "ir" (i));
9202+}
9203+
9204+/**
9205+ * atomic_add_unchecked - add integer to atomic variable
9206+ * @i: integer value to add
9207+ * @v: pointer of type atomic_unchecked_t
9208+ *
9209+ * Atomically adds @i to @v.
9210+ */
9211+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9212+{
9213+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9214 : "+m" (v->counter)
9215 : "ir" (i));
9216 }
9217@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
9218 */
9219 static inline void atomic_sub(int i, atomic_t *v)
9220 {
9221- asm volatile(LOCK_PREFIX "subl %1,%0"
9222+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9223+
9224+#ifdef CONFIG_PAX_REFCOUNT
9225+ "jno 0f\n"
9226+ LOCK_PREFIX "addl %1,%0\n"
9227+ "int $4\n0:\n"
9228+ _ASM_EXTABLE(0b, 0b)
9229+#endif
9230+
9231+ : "+m" (v->counter)
9232+ : "ir" (i));
9233+}
9234+
9235+/**
9236+ * atomic_sub_unchecked - subtract integer from atomic variable
9237+ * @i: integer value to subtract
9238+ * @v: pointer of type atomic_unchecked_t
9239+ *
9240+ * Atomically subtracts @i from @v.
9241+ */
9242+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9243+{
9244+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9245 : "+m" (v->counter)
9246 : "ir" (i));
9247 }
9248@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9249 {
9250 unsigned char c;
9251
9252- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9253+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
9254+
9255+#ifdef CONFIG_PAX_REFCOUNT
9256+ "jno 0f\n"
9257+ LOCK_PREFIX "addl %2,%0\n"
9258+ "int $4\n0:\n"
9259+ _ASM_EXTABLE(0b, 0b)
9260+#endif
9261+
9262+ "sete %1\n"
9263 : "+m" (v->counter), "=qm" (c)
9264 : "ir" (i) : "memory");
9265 return c;
9266@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9267 */
9268 static inline void atomic_inc(atomic_t *v)
9269 {
9270- asm volatile(LOCK_PREFIX "incl %0"
9271+ asm volatile(LOCK_PREFIX "incl %0\n"
9272+
9273+#ifdef CONFIG_PAX_REFCOUNT
9274+ "jno 0f\n"
9275+ LOCK_PREFIX "decl %0\n"
9276+ "int $4\n0:\n"
9277+ _ASM_EXTABLE(0b, 0b)
9278+#endif
9279+
9280+ : "+m" (v->counter));
9281+}
9282+
9283+/**
9284+ * atomic_inc_unchecked - increment atomic variable
9285+ * @v: pointer of type atomic_unchecked_t
9286+ *
9287+ * Atomically increments @v by 1.
9288+ */
9289+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9290+{
9291+ asm volatile(LOCK_PREFIX "incl %0\n"
9292 : "+m" (v->counter));
9293 }
9294
9295@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
9296 */
9297 static inline void atomic_dec(atomic_t *v)
9298 {
9299- asm volatile(LOCK_PREFIX "decl %0"
9300+ asm volatile(LOCK_PREFIX "decl %0\n"
9301+
9302+#ifdef CONFIG_PAX_REFCOUNT
9303+ "jno 0f\n"
9304+ LOCK_PREFIX "incl %0\n"
9305+ "int $4\n0:\n"
9306+ _ASM_EXTABLE(0b, 0b)
9307+#endif
9308+
9309+ : "+m" (v->counter));
9310+}
9311+
9312+/**
9313+ * atomic_dec_unchecked - decrement atomic variable
9314+ * @v: pointer of type atomic_unchecked_t
9315+ *
9316+ * Atomically decrements @v by 1.
9317+ */
9318+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9319+{
9320+ asm volatile(LOCK_PREFIX "decl %0\n"
9321 : "+m" (v->counter));
9322 }
9323
9324@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9325 {
9326 unsigned char c;
9327
9328- asm volatile(LOCK_PREFIX "decl %0; sete %1"
9329+ asm volatile(LOCK_PREFIX "decl %0\n"
9330+
9331+#ifdef CONFIG_PAX_REFCOUNT
9332+ "jno 0f\n"
9333+ LOCK_PREFIX "incl %0\n"
9334+ "int $4\n0:\n"
9335+ _ASM_EXTABLE(0b, 0b)
9336+#endif
9337+
9338+ "sete %1\n"
9339 : "+m" (v->counter), "=qm" (c)
9340 : : "memory");
9341 return c != 0;
9342@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9343 {
9344 unsigned char c;
9345
9346- asm volatile(LOCK_PREFIX "incl %0; sete %1"
9347+ asm volatile(LOCK_PREFIX "incl %0\n"
9348+
9349+#ifdef CONFIG_PAX_REFCOUNT
9350+ "jno 0f\n"
9351+ LOCK_PREFIX "decl %0\n"
9352+ "int $4\n0:\n"
9353+ _ASM_EXTABLE(0b, 0b)
9354+#endif
9355+
9356+ "sete %1\n"
9357+ : "+m" (v->counter), "=qm" (c)
9358+ : : "memory");
9359+ return c != 0;
9360+}
9361+
9362+/**
9363+ * atomic_inc_and_test_unchecked - increment and test
9364+ * @v: pointer of type atomic_unchecked_t
9365+ *
9366+ * Atomically increments @v by 1
9367+ * and returns true if the result is zero, or false for all
9368+ * other cases.
9369+ */
9370+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9371+{
9372+ unsigned char c;
9373+
9374+ asm volatile(LOCK_PREFIX "incl %0\n"
9375+ "sete %1\n"
9376 : "+m" (v->counter), "=qm" (c)
9377 : : "memory");
9378 return c != 0;
9379@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9380 {
9381 unsigned char c;
9382
9383- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9384+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
9385+
9386+#ifdef CONFIG_PAX_REFCOUNT
9387+ "jno 0f\n"
9388+ LOCK_PREFIX "subl %2,%0\n"
9389+ "int $4\n0:\n"
9390+ _ASM_EXTABLE(0b, 0b)
9391+#endif
9392+
9393+ "sets %1\n"
9394 : "+m" (v->counter), "=qm" (c)
9395 : "ir" (i) : "memory");
9396 return c;
9397@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
9398 goto no_xadd;
9399 #endif
9400 /* Modern 486+ processor */
9401- return i + xadd(&v->counter, i);
9402+ return i + xadd_check_overflow(&v->counter, i);
9403
9404 #ifdef CONFIG_M386
9405 no_xadd: /* Legacy 386 processor */
9406@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
9407 }
9408
9409 /**
9410+ * atomic_add_return_unchecked - add integer and return
9411+ * @i: integer value to add
9412+ * @v: pointer of type atomic_unchecked_t
9413+ *
9414+ * Atomically adds @i to @v and returns @i + @v
9415+ */
9416+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9417+{
9418+#ifdef CONFIG_M386
9419+ int __i;
9420+ unsigned long flags;
9421+ if (unlikely(boot_cpu_data.x86 <= 3))
9422+ goto no_xadd;
9423+#endif
9424+ /* Modern 486+ processor */
9425+ return i + xadd(&v->counter, i);
9426+
9427+#ifdef CONFIG_M386
9428+no_xadd: /* Legacy 386 processor */
9429+ raw_local_irq_save(flags);
9430+ __i = atomic_read_unchecked(v);
9431+ atomic_set_unchecked(v, i + __i);
9432+ raw_local_irq_restore(flags);
9433+ return i + __i;
9434+#endif
9435+}
9436+
9437+/**
9438 * atomic_sub_return - subtract integer and return
9439 * @v: pointer of type atomic_t
9440 * @i: integer value to subtract
9441@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9442 }
9443
9444 #define atomic_inc_return(v) (atomic_add_return(1, v))
9445+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9446+{
9447+ return atomic_add_return_unchecked(1, v);
9448+}
9449 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9450
9451 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9452@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9453 return cmpxchg(&v->counter, old, new);
9454 }
9455
9456+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9457+{
9458+ return cmpxchg(&v->counter, old, new);
9459+}
9460+
9461 static inline int atomic_xchg(atomic_t *v, int new)
9462 {
9463 return xchg(&v->counter, new);
9464 }
9465
9466+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9467+{
9468+ return xchg(&v->counter, new);
9469+}
9470+
9471 /**
9472 * __atomic_add_unless - add unless the number is already a given value
9473 * @v: pointer of type atomic_t
9474@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
9475 */
9476 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9477 {
9478- int c, old;
9479+ int c, old, new;
9480 c = atomic_read(v);
9481 for (;;) {
9482- if (unlikely(c == (u)))
9483+ if (unlikely(c == u))
9484 break;
9485- old = atomic_cmpxchg((v), c, c + (a));
9486+
9487+ asm volatile("addl %2,%0\n"
9488+
9489+#ifdef CONFIG_PAX_REFCOUNT
9490+ "jno 0f\n"
9491+ "subl %2,%0\n"
9492+ "int $4\n0:\n"
9493+ _ASM_EXTABLE(0b, 0b)
9494+#endif
9495+
9496+ : "=r" (new)
9497+ : "0" (c), "ir" (a));
9498+
9499+ old = atomic_cmpxchg(v, c, new);
9500 if (likely(old == c))
9501 break;
9502 c = old;
9503@@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9504 return c;
9505 }
9506
9507+/**
9508+ * atomic_inc_not_zero_hint - increment if not null
9509+ * @v: pointer of type atomic_t
9510+ * @hint: probable value of the atomic before the increment
9511+ *
9512+ * This version of atomic_inc_not_zero() gives a hint of probable
9513+ * value of the atomic. This helps processor to not read the memory
9514+ * before doing the atomic read/modify/write cycle, lowering
9515+ * number of bus transactions on some arches.
9516+ *
9517+ * Returns: 0 if increment was not done, 1 otherwise.
9518+ */
9519+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9520+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9521+{
9522+ int val, c = hint, new;
9523+
9524+ /* sanity test, should be removed by compiler if hint is a constant */
9525+ if (!hint)
9526+ return __atomic_add_unless(v, 1, 0);
9527+
9528+ do {
9529+ asm volatile("incl %0\n"
9530+
9531+#ifdef CONFIG_PAX_REFCOUNT
9532+ "jno 0f\n"
9533+ "decl %0\n"
9534+ "int $4\n0:\n"
9535+ _ASM_EXTABLE(0b, 0b)
9536+#endif
9537+
9538+ : "=r" (new)
9539+ : "0" (c));
9540+
9541+ val = atomic_cmpxchg(v, c, new);
9542+ if (val == c)
9543+ return 1;
9544+ c = val;
9545+ } while (c);
9546+
9547+ return 0;
9548+}
9549
9550 /*
9551 * atomic_dec_if_positive - decrement by 1 if old value positive
9552@@ -293,14 +552,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
9553 #endif
9554
9555 /* These are x86-specific, used by some header files */
9556-#define atomic_clear_mask(mask, addr) \
9557- asm volatile(LOCK_PREFIX "andl %0,%1" \
9558- : : "r" (~(mask)), "m" (*(addr)) : "memory")
9559+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
9560+{
9561+ asm volatile(LOCK_PREFIX "andl %1,%0"
9562+ : "+m" (v->counter)
9563+ : "r" (~(mask))
9564+ : "memory");
9565+}
9566
9567-#define atomic_set_mask(mask, addr) \
9568- asm volatile(LOCK_PREFIX "orl %0,%1" \
9569- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
9570- : "memory")
9571+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
9572+{
9573+ asm volatile(LOCK_PREFIX "andl %1,%0"
9574+ : "+m" (v->counter)
9575+ : "r" (~(mask))
9576+ : "memory");
9577+}
9578+
9579+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
9580+{
9581+ asm volatile(LOCK_PREFIX "orl %1,%0"
9582+ : "+m" (v->counter)
9583+ : "r" (mask)
9584+ : "memory");
9585+}
9586+
9587+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
9588+{
9589+ asm volatile(LOCK_PREFIX "orl %1,%0"
9590+ : "+m" (v->counter)
9591+ : "r" (mask)
9592+ : "memory");
9593+}
9594
9595 /* Atomic operations are already serializing on x86 */
9596 #define smp_mb__before_atomic_dec() barrier()
9597diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
9598index 1981199..36b9dfb 100644
9599--- a/arch/x86/include/asm/atomic64_32.h
9600+++ b/arch/x86/include/asm/atomic64_32.h
9601@@ -12,6 +12,14 @@ typedef struct {
9602 u64 __aligned(8) counter;
9603 } atomic64_t;
9604
9605+#ifdef CONFIG_PAX_REFCOUNT
9606+typedef struct {
9607+ u64 __aligned(8) counter;
9608+} atomic64_unchecked_t;
9609+#else
9610+typedef atomic64_t atomic64_unchecked_t;
9611+#endif
9612+
9613 #define ATOMIC64_INIT(val) { (val) }
9614
9615 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
9616@@ -37,21 +45,31 @@ typedef struct {
9617 ATOMIC64_DECL_ONE(sym##_386)
9618
9619 ATOMIC64_DECL_ONE(add_386);
9620+ATOMIC64_DECL_ONE(add_unchecked_386);
9621 ATOMIC64_DECL_ONE(sub_386);
9622+ATOMIC64_DECL_ONE(sub_unchecked_386);
9623 ATOMIC64_DECL_ONE(inc_386);
9624+ATOMIC64_DECL_ONE(inc_unchecked_386);
9625 ATOMIC64_DECL_ONE(dec_386);
9626+ATOMIC64_DECL_ONE(dec_unchecked_386);
9627 #endif
9628
9629 #define alternative_atomic64(f, out, in...) \
9630 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
9631
9632 ATOMIC64_DECL(read);
9633+ATOMIC64_DECL(read_unchecked);
9634 ATOMIC64_DECL(set);
9635+ATOMIC64_DECL(set_unchecked);
9636 ATOMIC64_DECL(xchg);
9637 ATOMIC64_DECL(add_return);
9638+ATOMIC64_DECL(add_return_unchecked);
9639 ATOMIC64_DECL(sub_return);
9640+ATOMIC64_DECL(sub_return_unchecked);
9641 ATOMIC64_DECL(inc_return);
9642+ATOMIC64_DECL(inc_return_unchecked);
9643 ATOMIC64_DECL(dec_return);
9644+ATOMIC64_DECL(dec_return_unchecked);
9645 ATOMIC64_DECL(dec_if_positive);
9646 ATOMIC64_DECL(inc_not_zero);
9647 ATOMIC64_DECL(add_unless);
9648@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9649 }
9650
9651 /**
9652+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9653+ * @p: pointer to type atomic64_unchecked_t
9654+ * @o: expected value
9655+ * @n: new value
9656+ *
9657+ * Atomically sets @v to @n if it was equal to @o and returns
9658+ * the old value.
9659+ */
9660+
9661+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9662+{
9663+ return cmpxchg64(&v->counter, o, n);
9664+}
9665+
9666+/**
9667 * atomic64_xchg - xchg atomic64 variable
9668 * @v: pointer to type atomic64_t
9669 * @n: value to assign
9670@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9671 }
9672
9673 /**
9674+ * atomic64_set_unchecked - set atomic64 variable
9675+ * @v: pointer to type atomic64_unchecked_t
9676+ * @n: value to assign
9677+ *
9678+ * Atomically sets the value of @v to @n.
9679+ */
9680+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9681+{
9682+ unsigned high = (unsigned)(i >> 32);
9683+ unsigned low = (unsigned)i;
9684+ alternative_atomic64(set, /* no output */,
9685+ "S" (v), "b" (low), "c" (high)
9686+ : "eax", "edx", "memory");
9687+}
9688+
9689+/**
9690 * atomic64_read - read atomic64 variable
9691 * @v: pointer to type atomic64_t
9692 *
9693@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
9694 }
9695
9696 /**
9697+ * atomic64_read_unchecked - read atomic64 variable
9698+ * @v: pointer to type atomic64_unchecked_t
9699+ *
9700+ * Atomically reads the value of @v and returns it.
9701+ */
9702+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9703+{
9704+ long long r;
9705+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
9706+ return r;
9707+ }
9708+
9709+/**
9710 * atomic64_add_return - add and return
9711 * @i: integer value to add
9712 * @v: pointer to type atomic64_t
9713@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9714 return i;
9715 }
9716
9717+/**
9718+ * atomic64_add_return_unchecked - add and return
9719+ * @i: integer value to add
9720+ * @v: pointer to type atomic64_unchecked_t
9721+ *
9722+ * Atomically adds @i to @v and returns @i + *@v
9723+ */
9724+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9725+{
9726+ alternative_atomic64(add_return_unchecked,
9727+ ASM_OUTPUT2("+A" (i), "+c" (v)),
9728+ ASM_NO_INPUT_CLOBBER("memory"));
9729+ return i;
9730+}
9731+
9732 /*
9733 * Other variants with different arithmetic operators:
9734 */
9735@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9736 return a;
9737 }
9738
9739+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9740+{
9741+ long long a;
9742+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
9743+ "S" (v) : "memory", "ecx");
9744+ return a;
9745+}
9746+
9747 static inline long long atomic64_dec_return(atomic64_t *v)
9748 {
9749 long long a;
9750@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9751 }
9752
9753 /**
9754+ * atomic64_add_unchecked - add integer to atomic64 variable
9755+ * @i: integer value to add
9756+ * @v: pointer to type atomic64_unchecked_t
9757+ *
9758+ * Atomically adds @i to @v.
9759+ */
9760+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9761+{
9762+ __alternative_atomic64(add_unchecked, add_return_unchecked,
9763+ ASM_OUTPUT2("+A" (i), "+c" (v)),
9764+ ASM_NO_INPUT_CLOBBER("memory"));
9765+ return i;
9766+}
9767+
9768+/**
9769 * atomic64_sub - subtract the atomic64 variable
9770 * @i: integer value to subtract
9771 * @v: pointer to type atomic64_t
9772diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
9773index 0e1cbfc..5623683 100644
9774--- a/arch/x86/include/asm/atomic64_64.h
9775+++ b/arch/x86/include/asm/atomic64_64.h
9776@@ -18,7 +18,19 @@
9777 */
9778 static inline long atomic64_read(const atomic64_t *v)
9779 {
9780- return (*(volatile long *)&(v)->counter);
9781+ return (*(volatile const long *)&(v)->counter);
9782+}
9783+
9784+/**
9785+ * atomic64_read_unchecked - read atomic64 variable
9786+ * @v: pointer of type atomic64_unchecked_t
9787+ *
9788+ * Atomically reads the value of @v.
9789+ * Doesn't imply a read memory barrier.
9790+ */
9791+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9792+{
9793+ return (*(volatile const long *)&(v)->counter);
9794 }
9795
9796 /**
9797@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9798 }
9799
9800 /**
9801+ * atomic64_set_unchecked - set atomic64 variable
9802+ * @v: pointer to type atomic64_unchecked_t
9803+ * @i: required value
9804+ *
9805+ * Atomically sets the value of @v to @i.
9806+ */
9807+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9808+{
9809+ v->counter = i;
9810+}
9811+
9812+/**
9813 * atomic64_add - add integer to atomic64 variable
9814 * @i: integer value to add
9815 * @v: pointer to type atomic64_t
9816@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9817 */
9818 static inline void atomic64_add(long i, atomic64_t *v)
9819 {
9820+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
9821+
9822+#ifdef CONFIG_PAX_REFCOUNT
9823+ "jno 0f\n"
9824+ LOCK_PREFIX "subq %1,%0\n"
9825+ "int $4\n0:\n"
9826+ _ASM_EXTABLE(0b, 0b)
9827+#endif
9828+
9829+ : "=m" (v->counter)
9830+ : "er" (i), "m" (v->counter));
9831+}
9832+
9833+/**
9834+ * atomic64_add_unchecked - add integer to atomic64 variable
9835+ * @i: integer value to add
9836+ * @v: pointer to type atomic64_unchecked_t
9837+ *
9838+ * Atomically adds @i to @v.
9839+ */
9840+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9841+{
9842 asm volatile(LOCK_PREFIX "addq %1,%0"
9843 : "=m" (v->counter)
9844 : "er" (i), "m" (v->counter));
9845@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
9846 */
9847 static inline void atomic64_sub(long i, atomic64_t *v)
9848 {
9849- asm volatile(LOCK_PREFIX "subq %1,%0"
9850+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9851+
9852+#ifdef CONFIG_PAX_REFCOUNT
9853+ "jno 0f\n"
9854+ LOCK_PREFIX "addq %1,%0\n"
9855+ "int $4\n0:\n"
9856+ _ASM_EXTABLE(0b, 0b)
9857+#endif
9858+
9859+ : "=m" (v->counter)
9860+ : "er" (i), "m" (v->counter));
9861+}
9862+
9863+/**
9864+ * atomic64_sub_unchecked - subtract the atomic64 variable
9865+ * @i: integer value to subtract
9866+ * @v: pointer to type atomic64_unchecked_t
9867+ *
9868+ * Atomically subtracts @i from @v.
9869+ */
9870+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
9871+{
9872+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9873 : "=m" (v->counter)
9874 : "er" (i), "m" (v->counter));
9875 }
9876@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9877 {
9878 unsigned char c;
9879
9880- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9881+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
9882+
9883+#ifdef CONFIG_PAX_REFCOUNT
9884+ "jno 0f\n"
9885+ LOCK_PREFIX "addq %2,%0\n"
9886+ "int $4\n0:\n"
9887+ _ASM_EXTABLE(0b, 0b)
9888+#endif
9889+
9890+ "sete %1\n"
9891 : "=m" (v->counter), "=qm" (c)
9892 : "er" (i), "m" (v->counter) : "memory");
9893 return c;
9894@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9895 */
9896 static inline void atomic64_inc(atomic64_t *v)
9897 {
9898+ asm volatile(LOCK_PREFIX "incq %0\n"
9899+
9900+#ifdef CONFIG_PAX_REFCOUNT
9901+ "jno 0f\n"
9902+ LOCK_PREFIX "decq %0\n"
9903+ "int $4\n0:\n"
9904+ _ASM_EXTABLE(0b, 0b)
9905+#endif
9906+
9907+ : "=m" (v->counter)
9908+ : "m" (v->counter));
9909+}
9910+
9911+/**
9912+ * atomic64_inc_unchecked - increment atomic64 variable
9913+ * @v: pointer to type atomic64_unchecked_t
9914+ *
9915+ * Atomically increments @v by 1.
9916+ */
9917+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9918+{
9919 asm volatile(LOCK_PREFIX "incq %0"
9920 : "=m" (v->counter)
9921 : "m" (v->counter));
9922@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
9923 */
9924 static inline void atomic64_dec(atomic64_t *v)
9925 {
9926- asm volatile(LOCK_PREFIX "decq %0"
9927+ asm volatile(LOCK_PREFIX "decq %0\n"
9928+
9929+#ifdef CONFIG_PAX_REFCOUNT
9930+ "jno 0f\n"
9931+ LOCK_PREFIX "incq %0\n"
9932+ "int $4\n0:\n"
9933+ _ASM_EXTABLE(0b, 0b)
9934+#endif
9935+
9936+ : "=m" (v->counter)
9937+ : "m" (v->counter));
9938+}
9939+
9940+/**
9941+ * atomic64_dec_unchecked - decrement atomic64 variable
9942+ * @v: pointer to type atomic64_t
9943+ *
9944+ * Atomically decrements @v by 1.
9945+ */
9946+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9947+{
9948+ asm volatile(LOCK_PREFIX "decq %0\n"
9949 : "=m" (v->counter)
9950 : "m" (v->counter));
9951 }
9952@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9953 {
9954 unsigned char c;
9955
9956- asm volatile(LOCK_PREFIX "decq %0; sete %1"
9957+ asm volatile(LOCK_PREFIX "decq %0\n"
9958+
9959+#ifdef CONFIG_PAX_REFCOUNT
9960+ "jno 0f\n"
9961+ LOCK_PREFIX "incq %0\n"
9962+ "int $4\n0:\n"
9963+ _ASM_EXTABLE(0b, 0b)
9964+#endif
9965+
9966+ "sete %1\n"
9967 : "=m" (v->counter), "=qm" (c)
9968 : "m" (v->counter) : "memory");
9969 return c != 0;
9970@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9971 {
9972 unsigned char c;
9973
9974- asm volatile(LOCK_PREFIX "incq %0; sete %1"
9975+ asm volatile(LOCK_PREFIX "incq %0\n"
9976+
9977+#ifdef CONFIG_PAX_REFCOUNT
9978+ "jno 0f\n"
9979+ LOCK_PREFIX "decq %0\n"
9980+ "int $4\n0:\n"
9981+ _ASM_EXTABLE(0b, 0b)
9982+#endif
9983+
9984+ "sete %1\n"
9985 : "=m" (v->counter), "=qm" (c)
9986 : "m" (v->counter) : "memory");
9987 return c != 0;
9988@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9989 {
9990 unsigned char c;
9991
9992- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9993+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
9994+
9995+#ifdef CONFIG_PAX_REFCOUNT
9996+ "jno 0f\n"
9997+ LOCK_PREFIX "subq %2,%0\n"
9998+ "int $4\n0:\n"
9999+ _ASM_EXTABLE(0b, 0b)
10000+#endif
10001+
10002+ "sets %1\n"
10003 : "=m" (v->counter), "=qm" (c)
10004 : "er" (i), "m" (v->counter) : "memory");
10005 return c;
10006@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10007 */
10008 static inline long atomic64_add_return(long i, atomic64_t *v)
10009 {
10010+ return i + xadd_check_overflow(&v->counter, i);
10011+}
10012+
10013+/**
10014+ * atomic64_add_return_unchecked - add and return
10015+ * @i: integer value to add
10016+ * @v: pointer to type atomic64_unchecked_t
10017+ *
10018+ * Atomically adds @i to @v and returns @i + @v
10019+ */
10020+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
10021+{
10022 return i + xadd(&v->counter, i);
10023 }
10024
10025@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
10026 }
10027
10028 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
10029+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10030+{
10031+ return atomic64_add_return_unchecked(1, v);
10032+}
10033 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
10034
10035 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10036@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10037 return cmpxchg(&v->counter, old, new);
10038 }
10039
10040+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
10041+{
10042+ return cmpxchg(&v->counter, old, new);
10043+}
10044+
10045 static inline long atomic64_xchg(atomic64_t *v, long new)
10046 {
10047 return xchg(&v->counter, new);
10048@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
10049 */
10050 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
10051 {
10052- long c, old;
10053+ long c, old, new;
10054 c = atomic64_read(v);
10055 for (;;) {
10056- if (unlikely(c == (u)))
10057+ if (unlikely(c == u))
10058 break;
10059- old = atomic64_cmpxchg((v), c, c + (a));
10060+
10061+ asm volatile("add %2,%0\n"
10062+
10063+#ifdef CONFIG_PAX_REFCOUNT
10064+ "jno 0f\n"
10065+ "sub %2,%0\n"
10066+ "int $4\n0:\n"
10067+ _ASM_EXTABLE(0b, 0b)
10068+#endif
10069+
10070+ : "=r" (new)
10071+ : "0" (c), "ir" (a));
10072+
10073+ old = atomic64_cmpxchg(v, c, new);
10074 if (likely(old == c))
10075 break;
10076 c = old;
10077 }
10078- return c != (u);
10079+ return c != u;
10080 }
10081
10082 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10083diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
10084index b97596e..9bd48b06 100644
10085--- a/arch/x86/include/asm/bitops.h
10086+++ b/arch/x86/include/asm/bitops.h
10087@@ -38,7 +38,7 @@
10088 * a mask operation on a byte.
10089 */
10090 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
10091-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
10092+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
10093 #define CONST_MASK(nr) (1 << ((nr) & 7))
10094
10095 /**
10096diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
10097index 5e1a2ee..c9f9533 100644
10098--- a/arch/x86/include/asm/boot.h
10099+++ b/arch/x86/include/asm/boot.h
10100@@ -11,10 +11,15 @@
10101 #include <asm/pgtable_types.h>
10102
10103 /* Physical address where kernel should be loaded. */
10104-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10105+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10106 + (CONFIG_PHYSICAL_ALIGN - 1)) \
10107 & ~(CONFIG_PHYSICAL_ALIGN - 1))
10108
10109+#ifndef __ASSEMBLY__
10110+extern unsigned char __LOAD_PHYSICAL_ADDR[];
10111+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
10112+#endif
10113+
10114 /* Minimum kernel alignment, as a power of two */
10115 #ifdef CONFIG_X86_64
10116 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
10117diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
10118index 48f99f1..d78ebf9 100644
10119--- a/arch/x86/include/asm/cache.h
10120+++ b/arch/x86/include/asm/cache.h
10121@@ -5,12 +5,13 @@
10122
10123 /* L1 cache line size */
10124 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10125-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10126+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10127
10128 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10129+#define __read_only __attribute__((__section__(".data..read_only")))
10130
10131 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
10132-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
10133+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
10134
10135 #ifdef CONFIG_X86_VSMP
10136 #ifdef CONFIG_SMP
10137diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
10138index 9863ee3..4a1f8e1 100644
10139--- a/arch/x86/include/asm/cacheflush.h
10140+++ b/arch/x86/include/asm/cacheflush.h
10141@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
10142 unsigned long pg_flags = pg->flags & _PGMT_MASK;
10143
10144 if (pg_flags == _PGMT_DEFAULT)
10145- return -1;
10146+ return ~0UL;
10147 else if (pg_flags == _PGMT_WC)
10148 return _PAGE_CACHE_WC;
10149 else if (pg_flags == _PGMT_UC_MINUS)
10150diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10151index 46fc474..b02b0f9 100644
10152--- a/arch/x86/include/asm/checksum_32.h
10153+++ b/arch/x86/include/asm/checksum_32.h
10154@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
10155 int len, __wsum sum,
10156 int *src_err_ptr, int *dst_err_ptr);
10157
10158+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10159+ int len, __wsum sum,
10160+ int *src_err_ptr, int *dst_err_ptr);
10161+
10162+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10163+ int len, __wsum sum,
10164+ int *src_err_ptr, int *dst_err_ptr);
10165+
10166 /*
10167 * Note: when you get a NULL pointer exception here this means someone
10168 * passed in an incorrect kernel address to one of these functions.
10169@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
10170 int *err_ptr)
10171 {
10172 might_sleep();
10173- return csum_partial_copy_generic((__force void *)src, dst,
10174+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
10175 len, sum, err_ptr, NULL);
10176 }
10177
10178@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
10179 {
10180 might_sleep();
10181 if (access_ok(VERIFY_WRITE, dst, len))
10182- return csum_partial_copy_generic(src, (__force void *)dst,
10183+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10184 len, sum, NULL, err_ptr);
10185
10186 if (len)
10187diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
10188index 99480e5..d81165b 100644
10189--- a/arch/x86/include/asm/cmpxchg.h
10190+++ b/arch/x86/include/asm/cmpxchg.h
10191@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
10192 __compiletime_error("Bad argument size for cmpxchg");
10193 extern void __xadd_wrong_size(void)
10194 __compiletime_error("Bad argument size for xadd");
10195+extern void __xadd_check_overflow_wrong_size(void)
10196+ __compiletime_error("Bad argument size for xadd_check_overflow");
10197 extern void __add_wrong_size(void)
10198 __compiletime_error("Bad argument size for add");
10199+extern void __add_check_overflow_wrong_size(void)
10200+ __compiletime_error("Bad argument size for add_check_overflow");
10201
10202 /*
10203 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
10204@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
10205 __ret; \
10206 })
10207
10208+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
10209+ ({ \
10210+ __typeof__ (*(ptr)) __ret = (arg); \
10211+ switch (sizeof(*(ptr))) { \
10212+ case __X86_CASE_L: \
10213+ asm volatile (lock #op "l %0, %1\n" \
10214+ "jno 0f\n" \
10215+ "mov %0,%1\n" \
10216+ "int $4\n0:\n" \
10217+ _ASM_EXTABLE(0b, 0b) \
10218+ : "+r" (__ret), "+m" (*(ptr)) \
10219+ : : "memory", "cc"); \
10220+ break; \
10221+ case __X86_CASE_Q: \
10222+ asm volatile (lock #op "q %q0, %1\n" \
10223+ "jno 0f\n" \
10224+ "mov %0,%1\n" \
10225+ "int $4\n0:\n" \
10226+ _ASM_EXTABLE(0b, 0b) \
10227+ : "+r" (__ret), "+m" (*(ptr)) \
10228+ : : "memory", "cc"); \
10229+ break; \
10230+ default: \
10231+ __ ## op ## _check_overflow_wrong_size(); \
10232+ } \
10233+ __ret; \
10234+ })
10235+
10236 /*
10237 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
10238 * Since this is generally used to protect other memory information, we
10239@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
10240 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
10241 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
10242
10243+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
10244+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
10245+
10246 #define __add(ptr, inc, lock) \
10247 ({ \
10248 __typeof__ (*(ptr)) __ret = (inc); \
10249diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
10250index f91e80f..7f9bd27 100644
10251--- a/arch/x86/include/asm/cpufeature.h
10252+++ b/arch/x86/include/asm/cpufeature.h
10253@@ -371,7 +371,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
10254 ".section .discard,\"aw\",@progbits\n"
10255 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
10256 ".previous\n"
10257- ".section .altinstr_replacement,\"ax\"\n"
10258+ ".section .altinstr_replacement,\"a\"\n"
10259 "3: movb $1,%0\n"
10260 "4:\n"
10261 ".previous\n"
10262diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
10263index e95822d..a90010e 100644
10264--- a/arch/x86/include/asm/desc.h
10265+++ b/arch/x86/include/asm/desc.h
10266@@ -4,6 +4,7 @@
10267 #include <asm/desc_defs.h>
10268 #include <asm/ldt.h>
10269 #include <asm/mmu.h>
10270+#include <asm/pgtable.h>
10271
10272 #include <linux/smp.h>
10273
10274@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10275
10276 desc->type = (info->read_exec_only ^ 1) << 1;
10277 desc->type |= info->contents << 2;
10278+ desc->type |= info->seg_not_present ^ 1;
10279
10280 desc->s = 1;
10281 desc->dpl = 0x3;
10282@@ -34,19 +36,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10283 }
10284
10285 extern struct desc_ptr idt_descr;
10286-extern gate_desc idt_table[];
10287 extern struct desc_ptr nmi_idt_descr;
10288-extern gate_desc nmi_idt_table[];
10289-
10290-struct gdt_page {
10291- struct desc_struct gdt[GDT_ENTRIES];
10292-} __attribute__((aligned(PAGE_SIZE)));
10293-
10294-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
10295+extern gate_desc idt_table[256];
10296+extern gate_desc nmi_idt_table[256];
10297
10298+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
10299 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10300 {
10301- return per_cpu(gdt_page, cpu).gdt;
10302+ return cpu_gdt_table[cpu];
10303 }
10304
10305 #ifdef CONFIG_X86_64
10306@@ -71,8 +68,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
10307 unsigned long base, unsigned dpl, unsigned flags,
10308 unsigned short seg)
10309 {
10310- gate->a = (seg << 16) | (base & 0xffff);
10311- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10312+ gate->gate.offset_low = base;
10313+ gate->gate.seg = seg;
10314+ gate->gate.reserved = 0;
10315+ gate->gate.type = type;
10316+ gate->gate.s = 0;
10317+ gate->gate.dpl = dpl;
10318+ gate->gate.p = 1;
10319+ gate->gate.offset_high = base >> 16;
10320 }
10321
10322 #endif
10323@@ -117,12 +120,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
10324
10325 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
10326 {
10327+ pax_open_kernel();
10328 memcpy(&idt[entry], gate, sizeof(*gate));
10329+ pax_close_kernel();
10330 }
10331
10332 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
10333 {
10334+ pax_open_kernel();
10335 memcpy(&ldt[entry], desc, 8);
10336+ pax_close_kernel();
10337 }
10338
10339 static inline void
10340@@ -136,7 +143,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
10341 default: size = sizeof(*gdt); break;
10342 }
10343
10344+ pax_open_kernel();
10345 memcpy(&gdt[entry], desc, size);
10346+ pax_close_kernel();
10347 }
10348
10349 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10350@@ -209,7 +218,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10351
10352 static inline void native_load_tr_desc(void)
10353 {
10354+ pax_open_kernel();
10355 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10356+ pax_close_kernel();
10357 }
10358
10359 static inline void native_load_gdt(const struct desc_ptr *dtr)
10360@@ -246,8 +257,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10361 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10362 unsigned int i;
10363
10364+ pax_open_kernel();
10365 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10366 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10367+ pax_close_kernel();
10368 }
10369
10370 #define _LDT_empty(info) \
10371@@ -310,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10372 }
10373
10374 #ifdef CONFIG_X86_64
10375-static inline void set_nmi_gate(int gate, void *addr)
10376+static inline void set_nmi_gate(int gate, const void *addr)
10377 {
10378 gate_desc s;
10379
10380@@ -319,7 +332,7 @@ static inline void set_nmi_gate(int gate, void *addr)
10381 }
10382 #endif
10383
10384-static inline void _set_gate(int gate, unsigned type, void *addr,
10385+static inline void _set_gate(int gate, unsigned type, const void *addr,
10386 unsigned dpl, unsigned ist, unsigned seg)
10387 {
10388 gate_desc s;
10389@@ -338,7 +351,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10390 * Pentium F0 0F bugfix can have resulted in the mapped
10391 * IDT being write-protected.
10392 */
10393-static inline void set_intr_gate(unsigned int n, void *addr)
10394+static inline void set_intr_gate(unsigned int n, const void *addr)
10395 {
10396 BUG_ON((unsigned)n > 0xFF);
10397 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10398@@ -368,19 +381,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10399 /*
10400 * This routine sets up an interrupt gate at directory privilege level 3.
10401 */
10402-static inline void set_system_intr_gate(unsigned int n, void *addr)
10403+static inline void set_system_intr_gate(unsigned int n, const void *addr)
10404 {
10405 BUG_ON((unsigned)n > 0xFF);
10406 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10407 }
10408
10409-static inline void set_system_trap_gate(unsigned int n, void *addr)
10410+static inline void set_system_trap_gate(unsigned int n, const void *addr)
10411 {
10412 BUG_ON((unsigned)n > 0xFF);
10413 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10414 }
10415
10416-static inline void set_trap_gate(unsigned int n, void *addr)
10417+static inline void set_trap_gate(unsigned int n, const void *addr)
10418 {
10419 BUG_ON((unsigned)n > 0xFF);
10420 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10421@@ -389,19 +402,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10422 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10423 {
10424 BUG_ON((unsigned)n > 0xFF);
10425- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10426+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10427 }
10428
10429-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10430+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10431 {
10432 BUG_ON((unsigned)n > 0xFF);
10433 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10434 }
10435
10436-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10437+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10438 {
10439 BUG_ON((unsigned)n > 0xFF);
10440 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10441 }
10442
10443+#ifdef CONFIG_X86_32
10444+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10445+{
10446+ struct desc_struct d;
10447+
10448+ if (likely(limit))
10449+ limit = (limit - 1UL) >> PAGE_SHIFT;
10450+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
10451+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10452+}
10453+#endif
10454+
10455 #endif /* _ASM_X86_DESC_H */
10456diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10457index 278441f..b95a174 100644
10458--- a/arch/x86/include/asm/desc_defs.h
10459+++ b/arch/x86/include/asm/desc_defs.h
10460@@ -31,6 +31,12 @@ struct desc_struct {
10461 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10462 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10463 };
10464+ struct {
10465+ u16 offset_low;
10466+ u16 seg;
10467+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10468+ unsigned offset_high: 16;
10469+ } gate;
10470 };
10471 } __attribute__((packed));
10472
10473diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10474index 3778256..c5d4fce 100644
10475--- a/arch/x86/include/asm/e820.h
10476+++ b/arch/x86/include/asm/e820.h
10477@@ -69,7 +69,7 @@ struct e820map {
10478 #define ISA_START_ADDRESS 0xa0000
10479 #define ISA_END_ADDRESS 0x100000
10480
10481-#define BIOS_BEGIN 0x000a0000
10482+#define BIOS_BEGIN 0x000c0000
10483 #define BIOS_END 0x00100000
10484
10485 #define BIOS_ROM_BASE 0xffe00000
10486diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10487index 5939f44..f8845f6 100644
10488--- a/arch/x86/include/asm/elf.h
10489+++ b/arch/x86/include/asm/elf.h
10490@@ -243,7 +243,25 @@ extern int force_personality32;
10491 the loader. We need to make sure that it is out of the way of the program
10492 that it will "exec", and that there is sufficient room for the brk. */
10493
10494+#ifdef CONFIG_PAX_SEGMEXEC
10495+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10496+#else
10497 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10498+#endif
10499+
10500+#ifdef CONFIG_PAX_ASLR
10501+#ifdef CONFIG_X86_32
10502+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10503+
10504+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10505+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10506+#else
10507+#define PAX_ELF_ET_DYN_BASE 0x400000UL
10508+
10509+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10510+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10511+#endif
10512+#endif
10513
10514 /* This yields a mask that user programs can use to figure out what
10515 instruction set this CPU supports. This could be done in user space,
10516@@ -296,16 +314,12 @@ do { \
10517
10518 #define ARCH_DLINFO \
10519 do { \
10520- if (vdso_enabled) \
10521- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10522- (unsigned long)current->mm->context.vdso); \
10523+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10524 } while (0)
10525
10526 #define ARCH_DLINFO_X32 \
10527 do { \
10528- if (vdso_enabled) \
10529- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10530- (unsigned long)current->mm->context.vdso); \
10531+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10532 } while (0)
10533
10534 #define AT_SYSINFO 32
10535@@ -320,7 +334,7 @@ else \
10536
10537 #endif /* !CONFIG_X86_32 */
10538
10539-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10540+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10541
10542 #define VDSO_ENTRY \
10543 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10544@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
10545 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10546 #define compat_arch_setup_additional_pages syscall32_setup_pages
10547
10548-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10549-#define arch_randomize_brk arch_randomize_brk
10550-
10551 /*
10552 * True on X86_32 or when emulating IA32 on X86_64
10553 */
10554diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10555index cc70c1c..d96d011 100644
10556--- a/arch/x86/include/asm/emergency-restart.h
10557+++ b/arch/x86/include/asm/emergency-restart.h
10558@@ -15,6 +15,6 @@ enum reboot_type {
10559
10560 extern enum reboot_type reboot_type;
10561
10562-extern void machine_emergency_restart(void);
10563+extern void machine_emergency_restart(void) __noreturn;
10564
10565 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10566diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
10567index 4fa8815..71b121a 100644
10568--- a/arch/x86/include/asm/fpu-internal.h
10569+++ b/arch/x86/include/asm/fpu-internal.h
10570@@ -86,6 +86,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10571 {
10572 int err;
10573
10574+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10575+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10576+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10577+#endif
10578+
10579 /* See comment in fxsave() below. */
10580 #ifdef CONFIG_AS_FXSAVEQ
10581 asm volatile("1: fxrstorq %[fx]\n\t"
10582@@ -115,6 +120,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10583 {
10584 int err;
10585
10586+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10587+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10588+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10589+#endif
10590+
10591 /*
10592 * Clear the bytes not touched by the fxsave and reserved
10593 * for the SW usage.
10594@@ -271,7 +281,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
10595 "emms\n\t" /* clear stack tags */
10596 "fildl %P[addr]", /* set F?P to defined value */
10597 X86_FEATURE_FXSAVE_LEAK,
10598- [addr] "m" (tsk->thread.fpu.has_fpu));
10599+ [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
10600
10601 return fpu_restore_checking(&tsk->thread.fpu);
10602 }
10603diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10604index 71ecbcb..bac10b7 100644
10605--- a/arch/x86/include/asm/futex.h
10606+++ b/arch/x86/include/asm/futex.h
10607@@ -11,16 +11,18 @@
10608 #include <asm/processor.h>
10609
10610 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10611+ typecheck(u32 __user *, uaddr); \
10612 asm volatile("1:\t" insn "\n" \
10613 "2:\t.section .fixup,\"ax\"\n" \
10614 "3:\tmov\t%3, %1\n" \
10615 "\tjmp\t2b\n" \
10616 "\t.previous\n" \
10617 _ASM_EXTABLE(1b, 3b) \
10618- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10619+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10620 : "i" (-EFAULT), "0" (oparg), "1" (0))
10621
10622 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10623+ typecheck(u32 __user *, uaddr); \
10624 asm volatile("1:\tmovl %2, %0\n" \
10625 "\tmovl\t%0, %3\n" \
10626 "\t" insn "\n" \
10627@@ -33,7 +35,7 @@
10628 _ASM_EXTABLE(1b, 4b) \
10629 _ASM_EXTABLE(2b, 4b) \
10630 : "=&a" (oldval), "=&r" (ret), \
10631- "+m" (*uaddr), "=&r" (tem) \
10632+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10633 : "r" (oparg), "i" (-EFAULT), "1" (0))
10634
10635 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10636@@ -60,10 +62,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10637
10638 switch (op) {
10639 case FUTEX_OP_SET:
10640- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10641+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10642 break;
10643 case FUTEX_OP_ADD:
10644- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10645+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10646 uaddr, oparg);
10647 break;
10648 case FUTEX_OP_OR:
10649@@ -122,13 +124,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10650 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10651 return -EFAULT;
10652
10653- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10654+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
10655 "2:\t.section .fixup, \"ax\"\n"
10656 "3:\tmov %3, %0\n"
10657 "\tjmp 2b\n"
10658 "\t.previous\n"
10659 _ASM_EXTABLE(1b, 3b)
10660- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
10661+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
10662 : "i" (-EFAULT), "r" (newval), "1" (oldval)
10663 : "memory"
10664 );
10665diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10666index eb92a6e..b98b2f4 100644
10667--- a/arch/x86/include/asm/hw_irq.h
10668+++ b/arch/x86/include/asm/hw_irq.h
10669@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
10670 extern void enable_IO_APIC(void);
10671
10672 /* Statistics */
10673-extern atomic_t irq_err_count;
10674-extern atomic_t irq_mis_count;
10675+extern atomic_unchecked_t irq_err_count;
10676+extern atomic_unchecked_t irq_mis_count;
10677
10678 /* EISA */
10679 extern void eisa_set_level_irq(unsigned int irq);
10680diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10681index d8e8eef..99f81ae 100644
10682--- a/arch/x86/include/asm/io.h
10683+++ b/arch/x86/include/asm/io.h
10684@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
10685
10686 #include <linux/vmalloc.h>
10687
10688+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10689+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10690+{
10691+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10692+}
10693+
10694+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10695+{
10696+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10697+}
10698+
10699 /*
10700 * Convert a virtual cached pointer to an uncached pointer
10701 */
10702diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10703index bba3cf8..06bc8da 100644
10704--- a/arch/x86/include/asm/irqflags.h
10705+++ b/arch/x86/include/asm/irqflags.h
10706@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
10707 sti; \
10708 sysexit
10709
10710+#define GET_CR0_INTO_RDI mov %cr0, %rdi
10711+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10712+#define GET_CR3_INTO_RDI mov %cr3, %rdi
10713+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10714+
10715 #else
10716 #define INTERRUPT_RETURN iret
10717 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10718diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10719index 5478825..839e88c 100644
10720--- a/arch/x86/include/asm/kprobes.h
10721+++ b/arch/x86/include/asm/kprobes.h
10722@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10723 #define RELATIVEJUMP_SIZE 5
10724 #define RELATIVECALL_OPCODE 0xe8
10725 #define RELATIVE_ADDR_SIZE 4
10726-#define MAX_STACK_SIZE 64
10727-#define MIN_STACK_SIZE(ADDR) \
10728- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10729- THREAD_SIZE - (unsigned long)(ADDR))) \
10730- ? (MAX_STACK_SIZE) \
10731- : (((unsigned long)current_thread_info()) + \
10732- THREAD_SIZE - (unsigned long)(ADDR)))
10733+#define MAX_STACK_SIZE 64UL
10734+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10735
10736 #define flush_insn_slot(p) do { } while (0)
10737
10738diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10739index e216ba0..453f6ec 100644
10740--- a/arch/x86/include/asm/kvm_host.h
10741+++ b/arch/x86/include/asm/kvm_host.h
10742@@ -679,7 +679,7 @@ struct kvm_x86_ops {
10743 int (*check_intercept)(struct kvm_vcpu *vcpu,
10744 struct x86_instruction_info *info,
10745 enum x86_intercept_stage stage);
10746-};
10747+} __do_const;
10748
10749 struct kvm_arch_async_pf {
10750 u32 token;
10751diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10752index c8bed0d..e5721fa 100644
10753--- a/arch/x86/include/asm/local.h
10754+++ b/arch/x86/include/asm/local.h
10755@@ -17,26 +17,58 @@ typedef struct {
10756
10757 static inline void local_inc(local_t *l)
10758 {
10759- asm volatile(_ASM_INC "%0"
10760+ asm volatile(_ASM_INC "%0\n"
10761+
10762+#ifdef CONFIG_PAX_REFCOUNT
10763+ "jno 0f\n"
10764+ _ASM_DEC "%0\n"
10765+ "int $4\n0:\n"
10766+ _ASM_EXTABLE(0b, 0b)
10767+#endif
10768+
10769 : "+m" (l->a.counter));
10770 }
10771
10772 static inline void local_dec(local_t *l)
10773 {
10774- asm volatile(_ASM_DEC "%0"
10775+ asm volatile(_ASM_DEC "%0\n"
10776+
10777+#ifdef CONFIG_PAX_REFCOUNT
10778+ "jno 0f\n"
10779+ _ASM_INC "%0\n"
10780+ "int $4\n0:\n"
10781+ _ASM_EXTABLE(0b, 0b)
10782+#endif
10783+
10784 : "+m" (l->a.counter));
10785 }
10786
10787 static inline void local_add(long i, local_t *l)
10788 {
10789- asm volatile(_ASM_ADD "%1,%0"
10790+ asm volatile(_ASM_ADD "%1,%0\n"
10791+
10792+#ifdef CONFIG_PAX_REFCOUNT
10793+ "jno 0f\n"
10794+ _ASM_SUB "%1,%0\n"
10795+ "int $4\n0:\n"
10796+ _ASM_EXTABLE(0b, 0b)
10797+#endif
10798+
10799 : "+m" (l->a.counter)
10800 : "ir" (i));
10801 }
10802
10803 static inline void local_sub(long i, local_t *l)
10804 {
10805- asm volatile(_ASM_SUB "%1,%0"
10806+ asm volatile(_ASM_SUB "%1,%0\n"
10807+
10808+#ifdef CONFIG_PAX_REFCOUNT
10809+ "jno 0f\n"
10810+ _ASM_ADD "%1,%0\n"
10811+ "int $4\n0:\n"
10812+ _ASM_EXTABLE(0b, 0b)
10813+#endif
10814+
10815 : "+m" (l->a.counter)
10816 : "ir" (i));
10817 }
10818@@ -54,7 +86,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10819 {
10820 unsigned char c;
10821
10822- asm volatile(_ASM_SUB "%2,%0; sete %1"
10823+ asm volatile(_ASM_SUB "%2,%0\n"
10824+
10825+#ifdef CONFIG_PAX_REFCOUNT
10826+ "jno 0f\n"
10827+ _ASM_ADD "%2,%0\n"
10828+ "int $4\n0:\n"
10829+ _ASM_EXTABLE(0b, 0b)
10830+#endif
10831+
10832+ "sete %1\n"
10833 : "+m" (l->a.counter), "=qm" (c)
10834 : "ir" (i) : "memory");
10835 return c;
10836@@ -72,7 +113,16 @@ static inline int local_dec_and_test(local_t *l)
10837 {
10838 unsigned char c;
10839
10840- asm volatile(_ASM_DEC "%0; sete %1"
10841+ asm volatile(_ASM_DEC "%0\n"
10842+
10843+#ifdef CONFIG_PAX_REFCOUNT
10844+ "jno 0f\n"
10845+ _ASM_INC "%0\n"
10846+ "int $4\n0:\n"
10847+ _ASM_EXTABLE(0b, 0b)
10848+#endif
10849+
10850+ "sete %1\n"
10851 : "+m" (l->a.counter), "=qm" (c)
10852 : : "memory");
10853 return c != 0;
10854@@ -90,7 +140,16 @@ static inline int local_inc_and_test(local_t *l)
10855 {
10856 unsigned char c;
10857
10858- asm volatile(_ASM_INC "%0; sete %1"
10859+ asm volatile(_ASM_INC "%0\n"
10860+
10861+#ifdef CONFIG_PAX_REFCOUNT
10862+ "jno 0f\n"
10863+ _ASM_DEC "%0\n"
10864+ "int $4\n0:\n"
10865+ _ASM_EXTABLE(0b, 0b)
10866+#endif
10867+
10868+ "sete %1\n"
10869 : "+m" (l->a.counter), "=qm" (c)
10870 : : "memory");
10871 return c != 0;
10872@@ -109,7 +168,16 @@ static inline int local_add_negative(long i, local_t *l)
10873 {
10874 unsigned char c;
10875
10876- asm volatile(_ASM_ADD "%2,%0; sets %1"
10877+ asm volatile(_ASM_ADD "%2,%0\n"
10878+
10879+#ifdef CONFIG_PAX_REFCOUNT
10880+ "jno 0f\n"
10881+ _ASM_SUB "%2,%0\n"
10882+ "int $4\n0:\n"
10883+ _ASM_EXTABLE(0b, 0b)
10884+#endif
10885+
10886+ "sets %1\n"
10887 : "+m" (l->a.counter), "=qm" (c)
10888 : "ir" (i) : "memory");
10889 return c;
10890@@ -132,7 +200,15 @@ static inline long local_add_return(long i, local_t *l)
10891 #endif
10892 /* Modern 486+ processor */
10893 __i = i;
10894- asm volatile(_ASM_XADD "%0, %1;"
10895+ asm volatile(_ASM_XADD "%0, %1\n"
10896+
10897+#ifdef CONFIG_PAX_REFCOUNT
10898+ "jno 0f\n"
10899+ _ASM_MOV "%0,%1\n"
10900+ "int $4\n0:\n"
10901+ _ASM_EXTABLE(0b, 0b)
10902+#endif
10903+
10904 : "+r" (i), "+m" (l->a.counter)
10905 : : "memory");
10906 return i + __i;
10907diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10908index 593e51d..fa69c9a 100644
10909--- a/arch/x86/include/asm/mman.h
10910+++ b/arch/x86/include/asm/mman.h
10911@@ -5,4 +5,14 @@
10912
10913 #include <asm-generic/mman.h>
10914
10915+#ifdef __KERNEL__
10916+#ifndef __ASSEMBLY__
10917+#ifdef CONFIG_X86_32
10918+#define arch_mmap_check i386_mmap_check
10919+int i386_mmap_check(unsigned long addr, unsigned long len,
10920+ unsigned long flags);
10921+#endif
10922+#endif
10923+#endif
10924+
10925 #endif /* _ASM_X86_MMAN_H */
10926diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10927index 5f55e69..e20bfb1 100644
10928--- a/arch/x86/include/asm/mmu.h
10929+++ b/arch/x86/include/asm/mmu.h
10930@@ -9,7 +9,7 @@
10931 * we put the segment information here.
10932 */
10933 typedef struct {
10934- void *ldt;
10935+ struct desc_struct *ldt;
10936 int size;
10937
10938 #ifdef CONFIG_X86_64
10939@@ -18,7 +18,19 @@ typedef struct {
10940 #endif
10941
10942 struct mutex lock;
10943- void *vdso;
10944+ unsigned long vdso;
10945+
10946+#ifdef CONFIG_X86_32
10947+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10948+ unsigned long user_cs_base;
10949+ unsigned long user_cs_limit;
10950+
10951+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10952+ cpumask_t cpu_user_cs_mask;
10953+#endif
10954+
10955+#endif
10956+#endif
10957 } mm_context_t;
10958
10959 #ifdef CONFIG_SMP
10960diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10961index 6902152..da4283a 100644
10962--- a/arch/x86/include/asm/mmu_context.h
10963+++ b/arch/x86/include/asm/mmu_context.h
10964@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10965
10966 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10967 {
10968+
10969+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10970+ unsigned int i;
10971+ pgd_t *pgd;
10972+
10973+ pax_open_kernel();
10974+ pgd = get_cpu_pgd(smp_processor_id());
10975+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10976+ set_pgd_batched(pgd+i, native_make_pgd(0));
10977+ pax_close_kernel();
10978+#endif
10979+
10980 #ifdef CONFIG_SMP
10981 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10982 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10983@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10984 struct task_struct *tsk)
10985 {
10986 unsigned cpu = smp_processor_id();
10987+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10988+ int tlbstate = TLBSTATE_OK;
10989+#endif
10990
10991 if (likely(prev != next)) {
10992 #ifdef CONFIG_SMP
10993+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10994+ tlbstate = percpu_read(cpu_tlbstate.state);
10995+#endif
10996 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10997 percpu_write(cpu_tlbstate.active_mm, next);
10998 #endif
10999 cpumask_set_cpu(cpu, mm_cpumask(next));
11000
11001 /* Re-load page tables */
11002+#ifdef CONFIG_PAX_PER_CPU_PGD
11003+ pax_open_kernel();
11004+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
11005+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
11006+ pax_close_kernel();
11007+ load_cr3(get_cpu_pgd(cpu));
11008+#else
11009 load_cr3(next->pgd);
11010+#endif
11011
11012 /* stop flush ipis for the previous mm */
11013 cpumask_clear_cpu(cpu, mm_cpumask(prev));
11014@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11015 */
11016 if (unlikely(prev->context.ldt != next->context.ldt))
11017 load_LDT_nolock(&next->context);
11018- }
11019+
11020+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11021+ if (!(__supported_pte_mask & _PAGE_NX)) {
11022+ smp_mb__before_clear_bit();
11023+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
11024+ smp_mb__after_clear_bit();
11025+ cpu_set(cpu, next->context.cpu_user_cs_mask);
11026+ }
11027+#endif
11028+
11029+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11030+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
11031+ prev->context.user_cs_limit != next->context.user_cs_limit))
11032+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11033 #ifdef CONFIG_SMP
11034+ else if (unlikely(tlbstate != TLBSTATE_OK))
11035+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11036+#endif
11037+#endif
11038+
11039+ }
11040 else {
11041+
11042+#ifdef CONFIG_PAX_PER_CPU_PGD
11043+ pax_open_kernel();
11044+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
11045+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
11046+ pax_close_kernel();
11047+ load_cr3(get_cpu_pgd(cpu));
11048+#endif
11049+
11050+#ifdef CONFIG_SMP
11051 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11052 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
11053
11054@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11055 * tlb flush IPI delivery. We must reload CR3
11056 * to make sure to use no freed page tables.
11057 */
11058+
11059+#ifndef CONFIG_PAX_PER_CPU_PGD
11060 load_cr3(next->pgd);
11061+#endif
11062+
11063 load_LDT_nolock(&next->context);
11064+
11065+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
11066+ if (!(__supported_pte_mask & _PAGE_NX))
11067+ cpu_set(cpu, next->context.cpu_user_cs_mask);
11068+#endif
11069+
11070+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11071+#ifdef CONFIG_PAX_PAGEEXEC
11072+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
11073+#endif
11074+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11075+#endif
11076+
11077 }
11078+#endif
11079 }
11080-#endif
11081 }
11082
11083 #define activate_mm(prev, next) \
11084diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
11085index 9eae775..c914fea 100644
11086--- a/arch/x86/include/asm/module.h
11087+++ b/arch/x86/include/asm/module.h
11088@@ -5,6 +5,7 @@
11089
11090 #ifdef CONFIG_X86_64
11091 /* X86_64 does not define MODULE_PROC_FAMILY */
11092+#define MODULE_PROC_FAMILY ""
11093 #elif defined CONFIG_M386
11094 #define MODULE_PROC_FAMILY "386 "
11095 #elif defined CONFIG_M486
11096@@ -59,8 +60,20 @@
11097 #error unknown processor family
11098 #endif
11099
11100-#ifdef CONFIG_X86_32
11101-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
11102+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11103+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
11104+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
11105+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
11106+#else
11107+#define MODULE_PAX_KERNEXEC ""
11108 #endif
11109
11110+#ifdef CONFIG_PAX_MEMORY_UDEREF
11111+#define MODULE_PAX_UDEREF "UDEREF "
11112+#else
11113+#define MODULE_PAX_UDEREF ""
11114+#endif
11115+
11116+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
11117+
11118 #endif /* _ASM_X86_MODULE_H */
11119diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
11120index 7639dbf..e08a58c 100644
11121--- a/arch/x86/include/asm/page_64_types.h
11122+++ b/arch/x86/include/asm/page_64_types.h
11123@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
11124
11125 /* duplicated to the one in bootmem.h */
11126 extern unsigned long max_pfn;
11127-extern unsigned long phys_base;
11128+extern const unsigned long phys_base;
11129
11130 extern unsigned long __phys_addr(unsigned long);
11131 #define __phys_reloc_hide(x) (x)
11132diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
11133index aa0f913..0c5bc6a 100644
11134--- a/arch/x86/include/asm/paravirt.h
11135+++ b/arch/x86/include/asm/paravirt.h
11136@@ -668,6 +668,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
11137 val);
11138 }
11139
11140+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11141+{
11142+ pgdval_t val = native_pgd_val(pgd);
11143+
11144+ if (sizeof(pgdval_t) > sizeof(long))
11145+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
11146+ val, (u64)val >> 32);
11147+ else
11148+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
11149+ val);
11150+}
11151+
11152 static inline void pgd_clear(pgd_t *pgdp)
11153 {
11154 set_pgd(pgdp, __pgd(0));
11155@@ -749,6 +761,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
11156 pv_mmu_ops.set_fixmap(idx, phys, flags);
11157 }
11158
11159+#ifdef CONFIG_PAX_KERNEXEC
11160+static inline unsigned long pax_open_kernel(void)
11161+{
11162+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
11163+}
11164+
11165+static inline unsigned long pax_close_kernel(void)
11166+{
11167+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
11168+}
11169+#else
11170+static inline unsigned long pax_open_kernel(void) { return 0; }
11171+static inline unsigned long pax_close_kernel(void) { return 0; }
11172+#endif
11173+
11174 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11175
11176 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
11177@@ -965,7 +992,7 @@ extern void default_banner(void);
11178
11179 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11180 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11181-#define PARA_INDIRECT(addr) *%cs:addr
11182+#define PARA_INDIRECT(addr) *%ss:addr
11183 #endif
11184
11185 #define INTERRUPT_RETURN \
11186@@ -1042,6 +1069,21 @@ extern void default_banner(void);
11187 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11188 CLBR_NONE, \
11189 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11190+
11191+#define GET_CR0_INTO_RDI \
11192+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11193+ mov %rax,%rdi
11194+
11195+#define SET_RDI_INTO_CR0 \
11196+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11197+
11198+#define GET_CR3_INTO_RDI \
11199+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11200+ mov %rax,%rdi
11201+
11202+#define SET_RDI_INTO_CR3 \
11203+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11204+
11205 #endif /* CONFIG_X86_32 */
11206
11207 #endif /* __ASSEMBLY__ */
11208diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11209index 8e8b9a4..f07d725 100644
11210--- a/arch/x86/include/asm/paravirt_types.h
11211+++ b/arch/x86/include/asm/paravirt_types.h
11212@@ -84,20 +84,20 @@ struct pv_init_ops {
11213 */
11214 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11215 unsigned long addr, unsigned len);
11216-};
11217+} __no_const;
11218
11219
11220 struct pv_lazy_ops {
11221 /* Set deferred update mode, used for batching operations. */
11222 void (*enter)(void);
11223 void (*leave)(void);
11224-};
11225+} __no_const;
11226
11227 struct pv_time_ops {
11228 unsigned long long (*sched_clock)(void);
11229 unsigned long long (*steal_clock)(int cpu);
11230 unsigned long (*get_tsc_khz)(void);
11231-};
11232+} __no_const;
11233
11234 struct pv_cpu_ops {
11235 /* hooks for various privileged instructions */
11236@@ -193,7 +193,7 @@ struct pv_cpu_ops {
11237
11238 void (*start_context_switch)(struct task_struct *prev);
11239 void (*end_context_switch)(struct task_struct *next);
11240-};
11241+} __no_const;
11242
11243 struct pv_irq_ops {
11244 /*
11245@@ -224,7 +224,7 @@ struct pv_apic_ops {
11246 unsigned long start_eip,
11247 unsigned long start_esp);
11248 #endif
11249-};
11250+} __no_const;
11251
11252 struct pv_mmu_ops {
11253 unsigned long (*read_cr2)(void);
11254@@ -313,6 +313,7 @@ struct pv_mmu_ops {
11255 struct paravirt_callee_save make_pud;
11256
11257 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11258+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11259 #endif /* PAGETABLE_LEVELS == 4 */
11260 #endif /* PAGETABLE_LEVELS >= 3 */
11261
11262@@ -324,6 +325,12 @@ struct pv_mmu_ops {
11263 an mfn. We can tell which is which from the index. */
11264 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11265 phys_addr_t phys, pgprot_t flags);
11266+
11267+#ifdef CONFIG_PAX_KERNEXEC
11268+ unsigned long (*pax_open_kernel)(void);
11269+ unsigned long (*pax_close_kernel)(void);
11270+#endif
11271+
11272 };
11273
11274 struct arch_spinlock;
11275@@ -334,7 +341,7 @@ struct pv_lock_ops {
11276 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
11277 int (*spin_trylock)(struct arch_spinlock *lock);
11278 void (*spin_unlock)(struct arch_spinlock *lock);
11279-};
11280+} __no_const;
11281
11282 /* This contains all the paravirt structures: we get a convenient
11283 * number for each function using the offset which we use to indicate
11284diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
11285index b4389a4..7024269 100644
11286--- a/arch/x86/include/asm/pgalloc.h
11287+++ b/arch/x86/include/asm/pgalloc.h
11288@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
11289 pmd_t *pmd, pte_t *pte)
11290 {
11291 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11292+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11293+}
11294+
11295+static inline void pmd_populate_user(struct mm_struct *mm,
11296+ pmd_t *pmd, pte_t *pte)
11297+{
11298+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11299 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11300 }
11301
11302@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
11303
11304 #ifdef CONFIG_X86_PAE
11305 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
11306+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
11307+{
11308+ pud_populate(mm, pudp, pmd);
11309+}
11310 #else /* !CONFIG_X86_PAE */
11311 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11312 {
11313 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11314 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
11315 }
11316+
11317+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11318+{
11319+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11320+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
11321+}
11322 #endif /* CONFIG_X86_PAE */
11323
11324 #if PAGETABLE_LEVELS > 3
11325@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11326 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
11327 }
11328
11329+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11330+{
11331+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
11332+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
11333+}
11334+
11335 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
11336 {
11337 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
11338diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11339index 98391db..8f6984e 100644
11340--- a/arch/x86/include/asm/pgtable-2level.h
11341+++ b/arch/x86/include/asm/pgtable-2level.h
11342@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
11343
11344 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11345 {
11346+ pax_open_kernel();
11347 *pmdp = pmd;
11348+ pax_close_kernel();
11349 }
11350
11351 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11352diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11353index cb00ccc..17e9054 100644
11354--- a/arch/x86/include/asm/pgtable-3level.h
11355+++ b/arch/x86/include/asm/pgtable-3level.h
11356@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11357
11358 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11359 {
11360+ pax_open_kernel();
11361 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
11362+ pax_close_kernel();
11363 }
11364
11365 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11366 {
11367+ pax_open_kernel();
11368 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
11369+ pax_close_kernel();
11370 }
11371
11372 /*
11373diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
11374index 49afb3f..91a8c63 100644
11375--- a/arch/x86/include/asm/pgtable.h
11376+++ b/arch/x86/include/asm/pgtable.h
11377@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11378
11379 #ifndef __PAGETABLE_PUD_FOLDED
11380 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11381+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11382 #define pgd_clear(pgd) native_pgd_clear(pgd)
11383 #endif
11384
11385@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11386
11387 #define arch_end_context_switch(prev) do {} while(0)
11388
11389+#define pax_open_kernel() native_pax_open_kernel()
11390+#define pax_close_kernel() native_pax_close_kernel()
11391 #endif /* CONFIG_PARAVIRT */
11392
11393+#define __HAVE_ARCH_PAX_OPEN_KERNEL
11394+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
11395+
11396+#ifdef CONFIG_PAX_KERNEXEC
11397+static inline unsigned long native_pax_open_kernel(void)
11398+{
11399+ unsigned long cr0;
11400+
11401+ preempt_disable();
11402+ barrier();
11403+ cr0 = read_cr0() ^ X86_CR0_WP;
11404+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
11405+ write_cr0(cr0);
11406+ return cr0 ^ X86_CR0_WP;
11407+}
11408+
11409+static inline unsigned long native_pax_close_kernel(void)
11410+{
11411+ unsigned long cr0;
11412+
11413+ cr0 = read_cr0() ^ X86_CR0_WP;
11414+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11415+ write_cr0(cr0);
11416+ barrier();
11417+ preempt_enable_no_resched();
11418+ return cr0 ^ X86_CR0_WP;
11419+}
11420+#else
11421+static inline unsigned long native_pax_open_kernel(void) { return 0; }
11422+static inline unsigned long native_pax_close_kernel(void) { return 0; }
11423+#endif
11424+
11425 /*
11426 * The following only work if pte_present() is true.
11427 * Undefined behaviour if not..
11428 */
11429+static inline int pte_user(pte_t pte)
11430+{
11431+ return pte_val(pte) & _PAGE_USER;
11432+}
11433+
11434 static inline int pte_dirty(pte_t pte)
11435 {
11436 return pte_flags(pte) & _PAGE_DIRTY;
11437@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11438 return pte_clear_flags(pte, _PAGE_RW);
11439 }
11440
11441+static inline pte_t pte_mkread(pte_t pte)
11442+{
11443+ return __pte(pte_val(pte) | _PAGE_USER);
11444+}
11445+
11446 static inline pte_t pte_mkexec(pte_t pte)
11447 {
11448- return pte_clear_flags(pte, _PAGE_NX);
11449+#ifdef CONFIG_X86_PAE
11450+ if (__supported_pte_mask & _PAGE_NX)
11451+ return pte_clear_flags(pte, _PAGE_NX);
11452+ else
11453+#endif
11454+ return pte_set_flags(pte, _PAGE_USER);
11455+}
11456+
11457+static inline pte_t pte_exprotect(pte_t pte)
11458+{
11459+#ifdef CONFIG_X86_PAE
11460+ if (__supported_pte_mask & _PAGE_NX)
11461+ return pte_set_flags(pte, _PAGE_NX);
11462+ else
11463+#endif
11464+ return pte_clear_flags(pte, _PAGE_USER);
11465 }
11466
11467 static inline pte_t pte_mkdirty(pte_t pte)
11468@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11469 #endif
11470
11471 #ifndef __ASSEMBLY__
11472+
11473+#ifdef CONFIG_PAX_PER_CPU_PGD
11474+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11475+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11476+{
11477+ return cpu_pgd[cpu];
11478+}
11479+#endif
11480+
11481 #include <linux/mm_types.h>
11482
11483 static inline int pte_none(pte_t pte)
11484@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11485
11486 static inline int pgd_bad(pgd_t pgd)
11487 {
11488- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11489+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11490 }
11491
11492 static inline int pgd_none(pgd_t pgd)
11493@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
11494 * pgd_offset() returns a (pgd_t *)
11495 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11496 */
11497-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11498+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11499+
11500+#ifdef CONFIG_PAX_PER_CPU_PGD
11501+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11502+#endif
11503+
11504 /*
11505 * a shortcut which implies the use of the kernel's pgd, instead
11506 * of a process's
11507@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
11508 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11509 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11510
11511+#ifdef CONFIG_X86_32
11512+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11513+#else
11514+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11515+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11516+
11517+#ifdef CONFIG_PAX_MEMORY_UDEREF
11518+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11519+#else
11520+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11521+#endif
11522+
11523+#endif
11524+
11525 #ifndef __ASSEMBLY__
11526
11527 extern int direct_gbpages;
11528@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
11529 * dst and src can be on the same page, but the range must not overlap,
11530 * and must not cross a page boundary.
11531 */
11532-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11533+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11534 {
11535- memcpy(dst, src, count * sizeof(pgd_t));
11536+ pax_open_kernel();
11537+ while (count--)
11538+ *dst++ = *src++;
11539+ pax_close_kernel();
11540 }
11541
11542+#ifdef CONFIG_PAX_PER_CPU_PGD
11543+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
11544+#endif
11545+
11546+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11547+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
11548+#else
11549+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
11550+#endif
11551
11552 #include <asm-generic/pgtable.h>
11553 #endif /* __ASSEMBLY__ */
11554diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11555index 0c92113..34a77c6 100644
11556--- a/arch/x86/include/asm/pgtable_32.h
11557+++ b/arch/x86/include/asm/pgtable_32.h
11558@@ -25,9 +25,6 @@
11559 struct mm_struct;
11560 struct vm_area_struct;
11561
11562-extern pgd_t swapper_pg_dir[1024];
11563-extern pgd_t initial_page_table[1024];
11564-
11565 static inline void pgtable_cache_init(void) { }
11566 static inline void check_pgt_cache(void) { }
11567 void paging_init(void);
11568@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11569 # include <asm/pgtable-2level.h>
11570 #endif
11571
11572+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11573+extern pgd_t initial_page_table[PTRS_PER_PGD];
11574+#ifdef CONFIG_X86_PAE
11575+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11576+#endif
11577+
11578 #if defined(CONFIG_HIGHPTE)
11579 #define pte_offset_map(dir, address) \
11580 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
11581@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11582 /* Clear a kernel PTE and flush it from the TLB */
11583 #define kpte_clear_flush(ptep, vaddr) \
11584 do { \
11585+ pax_open_kernel(); \
11586 pte_clear(&init_mm, (vaddr), (ptep)); \
11587+ pax_close_kernel(); \
11588 __flush_tlb_one((vaddr)); \
11589 } while (0)
11590
11591@@ -74,6 +79,9 @@ do { \
11592
11593 #endif /* !__ASSEMBLY__ */
11594
11595+#define HAVE_ARCH_UNMAPPED_AREA
11596+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11597+
11598 /*
11599 * kern_addr_valid() is (1) for FLATMEM and (0) for
11600 * SPARSEMEM and DISCONTIGMEM
11601diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11602index ed5903b..c7fe163 100644
11603--- a/arch/x86/include/asm/pgtable_32_types.h
11604+++ b/arch/x86/include/asm/pgtable_32_types.h
11605@@ -8,7 +8,7 @@
11606 */
11607 #ifdef CONFIG_X86_PAE
11608 # include <asm/pgtable-3level_types.h>
11609-# define PMD_SIZE (1UL << PMD_SHIFT)
11610+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11611 # define PMD_MASK (~(PMD_SIZE - 1))
11612 #else
11613 # include <asm/pgtable-2level_types.h>
11614@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11615 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11616 #endif
11617
11618+#ifdef CONFIG_PAX_KERNEXEC
11619+#ifndef __ASSEMBLY__
11620+extern unsigned char MODULES_EXEC_VADDR[];
11621+extern unsigned char MODULES_EXEC_END[];
11622+#endif
11623+#include <asm/boot.h>
11624+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11625+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11626+#else
11627+#define ktla_ktva(addr) (addr)
11628+#define ktva_ktla(addr) (addr)
11629+#endif
11630+
11631 #define MODULES_VADDR VMALLOC_START
11632 #define MODULES_END VMALLOC_END
11633 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11634diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11635index 975f709..9f779c9 100644
11636--- a/arch/x86/include/asm/pgtable_64.h
11637+++ b/arch/x86/include/asm/pgtable_64.h
11638@@ -16,10 +16,14 @@
11639
11640 extern pud_t level3_kernel_pgt[512];
11641 extern pud_t level3_ident_pgt[512];
11642+extern pud_t level3_vmalloc_start_pgt[512];
11643+extern pud_t level3_vmalloc_end_pgt[512];
11644+extern pud_t level3_vmemmap_pgt[512];
11645+extern pud_t level2_vmemmap_pgt[512];
11646 extern pmd_t level2_kernel_pgt[512];
11647 extern pmd_t level2_fixmap_pgt[512];
11648-extern pmd_t level2_ident_pgt[512];
11649-extern pgd_t init_level4_pgt[];
11650+extern pmd_t level2_ident_pgt[512*2];
11651+extern pgd_t init_level4_pgt[512];
11652
11653 #define swapper_pg_dir init_level4_pgt
11654
11655@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11656
11657 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11658 {
11659+ pax_open_kernel();
11660 *pmdp = pmd;
11661+ pax_close_kernel();
11662 }
11663
11664 static inline void native_pmd_clear(pmd_t *pmd)
11665@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
11666
11667 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11668 {
11669+ pax_open_kernel();
11670 *pudp = pud;
11671+ pax_close_kernel();
11672 }
11673
11674 static inline void native_pud_clear(pud_t *pud)
11675@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
11676
11677 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11678 {
11679+ pax_open_kernel();
11680+ *pgdp = pgd;
11681+ pax_close_kernel();
11682+}
11683+
11684+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11685+{
11686 *pgdp = pgd;
11687 }
11688
11689diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11690index 766ea16..5b96cb3 100644
11691--- a/arch/x86/include/asm/pgtable_64_types.h
11692+++ b/arch/x86/include/asm/pgtable_64_types.h
11693@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11694 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11695 #define MODULES_END _AC(0xffffffffff000000, UL)
11696 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11697+#define MODULES_EXEC_VADDR MODULES_VADDR
11698+#define MODULES_EXEC_END MODULES_END
11699+
11700+#define ktla_ktva(addr) (addr)
11701+#define ktva_ktla(addr) (addr)
11702
11703 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11704diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11705index 013286a..8b42f4f 100644
11706--- a/arch/x86/include/asm/pgtable_types.h
11707+++ b/arch/x86/include/asm/pgtable_types.h
11708@@ -16,13 +16,12 @@
11709 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11710 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11711 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11712-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11713+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11714 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11715 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11716 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11717-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11718-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11719-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
11720+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11721+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
11722 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11723
11724 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11725@@ -40,7 +39,6 @@
11726 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11727 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11728 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11729-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11730 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11731 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11732 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11733@@ -57,8 +55,10 @@
11734
11735 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11736 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11737-#else
11738+#elif defined(CONFIG_KMEMCHECK)
11739 #define _PAGE_NX (_AT(pteval_t, 0))
11740+#else
11741+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11742 #endif
11743
11744 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11745@@ -96,6 +96,9 @@
11746 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11747 _PAGE_ACCESSED)
11748
11749+#define PAGE_READONLY_NOEXEC PAGE_READONLY
11750+#define PAGE_SHARED_NOEXEC PAGE_SHARED
11751+
11752 #define __PAGE_KERNEL_EXEC \
11753 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11754 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11755@@ -106,7 +109,7 @@
11756 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11757 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11758 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11759-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11760+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11761 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11762 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
11763 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11764@@ -168,8 +171,8 @@
11765 * bits are combined, this will alow user to access the high address mapped
11766 * VDSO in the presence of CONFIG_COMPAT_VDSO
11767 */
11768-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11769-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11770+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11771+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11772 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11773 #endif
11774
11775@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11776 {
11777 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11778 }
11779+#endif
11780
11781+#if PAGETABLE_LEVELS == 3
11782+#include <asm-generic/pgtable-nopud.h>
11783+#endif
11784+
11785+#if PAGETABLE_LEVELS == 2
11786+#include <asm-generic/pgtable-nopmd.h>
11787+#endif
11788+
11789+#ifndef __ASSEMBLY__
11790 #if PAGETABLE_LEVELS > 3
11791 typedef struct { pudval_t pud; } pud_t;
11792
11793@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11794 return pud.pud;
11795 }
11796 #else
11797-#include <asm-generic/pgtable-nopud.h>
11798-
11799 static inline pudval_t native_pud_val(pud_t pud)
11800 {
11801 return native_pgd_val(pud.pgd);
11802@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11803 return pmd.pmd;
11804 }
11805 #else
11806-#include <asm-generic/pgtable-nopmd.h>
11807-
11808 static inline pmdval_t native_pmd_val(pmd_t pmd)
11809 {
11810 return native_pgd_val(pmd.pud.pgd);
11811@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
11812
11813 extern pteval_t __supported_pte_mask;
11814 extern void set_nx(void);
11815-extern int nx_enabled;
11816
11817 #define pgprot_writecombine pgprot_writecombine
11818 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11819diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11820index 4fa7dcc..764e33a 100644
11821--- a/arch/x86/include/asm/processor.h
11822+++ b/arch/x86/include/asm/processor.h
11823@@ -276,7 +276,7 @@ struct tss_struct {
11824
11825 } ____cacheline_aligned;
11826
11827-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11828+extern struct tss_struct init_tss[NR_CPUS];
11829
11830 /*
11831 * Save the original ist values for checking stack pointers during debugging
11832@@ -807,11 +807,18 @@ static inline void spin_lock_prefetch(const void *x)
11833 */
11834 #define TASK_SIZE PAGE_OFFSET
11835 #define TASK_SIZE_MAX TASK_SIZE
11836+
11837+#ifdef CONFIG_PAX_SEGMEXEC
11838+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11839+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11840+#else
11841 #define STACK_TOP TASK_SIZE
11842-#define STACK_TOP_MAX STACK_TOP
11843+#endif
11844+
11845+#define STACK_TOP_MAX TASK_SIZE
11846
11847 #define INIT_THREAD { \
11848- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11849+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11850 .vm86_info = NULL, \
11851 .sysenter_cs = __KERNEL_CS, \
11852 .io_bitmap_ptr = NULL, \
11853@@ -825,7 +832,7 @@ static inline void spin_lock_prefetch(const void *x)
11854 */
11855 #define INIT_TSS { \
11856 .x86_tss = { \
11857- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11858+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11859 .ss0 = __KERNEL_DS, \
11860 .ss1 = __KERNEL_CS, \
11861 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11862@@ -836,11 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
11863 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11864
11865 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11866-#define KSTK_TOP(info) \
11867-({ \
11868- unsigned long *__ptr = (unsigned long *)(info); \
11869- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11870-})
11871+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11872
11873 /*
11874 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11875@@ -855,7 +858,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11876 #define task_pt_regs(task) \
11877 ({ \
11878 struct pt_regs *__regs__; \
11879- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11880+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11881 __regs__ - 1; \
11882 })
11883
11884@@ -865,13 +868,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11885 /*
11886 * User space process size. 47bits minus one guard page.
11887 */
11888-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11889+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11890
11891 /* This decides where the kernel will search for a free chunk of vm
11892 * space during mmap's.
11893 */
11894 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11895- 0xc0000000 : 0xFFFFe000)
11896+ 0xc0000000 : 0xFFFFf000)
11897
11898 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
11899 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11900@@ -882,11 +885,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11901 #define STACK_TOP_MAX TASK_SIZE_MAX
11902
11903 #define INIT_THREAD { \
11904- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11905+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11906 }
11907
11908 #define INIT_TSS { \
11909- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11910+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11911 }
11912
11913 /*
11914@@ -914,6 +917,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11915 */
11916 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11917
11918+#ifdef CONFIG_PAX_SEGMEXEC
11919+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11920+#endif
11921+
11922 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11923
11924 /* Get/set a process' ability to use the timestamp counter instruction */
11925@@ -976,12 +983,12 @@ extern bool cpu_has_amd_erratum(const int *);
11926
11927 void cpu_idle_wait(void);
11928
11929-extern unsigned long arch_align_stack(unsigned long sp);
11930+#define arch_align_stack(x) ((x) & ~0xfUL)
11931 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11932
11933 void default_idle(void);
11934 bool set_pm_idle_to_default(void);
11935
11936-void stop_this_cpu(void *dummy);
11937+void stop_this_cpu(void *dummy) __noreturn;
11938
11939 #endif /* _ASM_X86_PROCESSOR_H */
11940diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11941index dcfde52..dbfea06 100644
11942--- a/arch/x86/include/asm/ptrace.h
11943+++ b/arch/x86/include/asm/ptrace.h
11944@@ -155,28 +155,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11945 }
11946
11947 /*
11948- * user_mode_vm(regs) determines whether a register set came from user mode.
11949+ * user_mode(regs) determines whether a register set came from user mode.
11950 * This is true if V8086 mode was enabled OR if the register set was from
11951 * protected mode with RPL-3 CS value. This tricky test checks that with
11952 * one comparison. Many places in the kernel can bypass this full check
11953- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11954+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11955+ * be used.
11956 */
11957-static inline int user_mode(struct pt_regs *regs)
11958+static inline int user_mode_novm(struct pt_regs *regs)
11959 {
11960 #ifdef CONFIG_X86_32
11961 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11962 #else
11963- return !!(regs->cs & 3);
11964+ return !!(regs->cs & SEGMENT_RPL_MASK);
11965 #endif
11966 }
11967
11968-static inline int user_mode_vm(struct pt_regs *regs)
11969+static inline int user_mode(struct pt_regs *regs)
11970 {
11971 #ifdef CONFIG_X86_32
11972 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11973 USER_RPL;
11974 #else
11975- return user_mode(regs);
11976+ return user_mode_novm(regs);
11977 #endif
11978 }
11979
11980@@ -192,15 +193,16 @@ static inline int v8086_mode(struct pt_regs *regs)
11981 #ifdef CONFIG_X86_64
11982 static inline bool user_64bit_mode(struct pt_regs *regs)
11983 {
11984+ unsigned long cs = regs->cs & 0xffff;
11985 #ifndef CONFIG_PARAVIRT
11986 /*
11987 * On non-paravirt systems, this is the only long mode CPL 3
11988 * selector. We do not allow long mode selectors in the LDT.
11989 */
11990- return regs->cs == __USER_CS;
11991+ return cs == __USER_CS;
11992 #else
11993 /* Headers are too twisted for this to go in paravirt.h. */
11994- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
11995+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
11996 #endif
11997 }
11998 #endif
11999diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
12000index 92f29706..a79cbbb 100644
12001--- a/arch/x86/include/asm/reboot.h
12002+++ b/arch/x86/include/asm/reboot.h
12003@@ -6,19 +6,19 @@
12004 struct pt_regs;
12005
12006 struct machine_ops {
12007- void (*restart)(char *cmd);
12008- void (*halt)(void);
12009- void (*power_off)(void);
12010+ void (* __noreturn restart)(char *cmd);
12011+ void (* __noreturn halt)(void);
12012+ void (* __noreturn power_off)(void);
12013 void (*shutdown)(void);
12014 void (*crash_shutdown)(struct pt_regs *);
12015- void (*emergency_restart)(void);
12016-};
12017+ void (* __noreturn emergency_restart)(void);
12018+} __no_const;
12019
12020 extern struct machine_ops machine_ops;
12021
12022 void native_machine_crash_shutdown(struct pt_regs *regs);
12023 void native_machine_shutdown(void);
12024-void machine_real_restart(unsigned int type);
12025+void machine_real_restart(unsigned int type) __noreturn;
12026 /* These must match dispatch_table in reboot_32.S */
12027 #define MRR_BIOS 0
12028 #define MRR_APM 1
12029diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
12030index 2dbe4a7..ce1db00 100644
12031--- a/arch/x86/include/asm/rwsem.h
12032+++ b/arch/x86/include/asm/rwsem.h
12033@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
12034 {
12035 asm volatile("# beginning down_read\n\t"
12036 LOCK_PREFIX _ASM_INC "(%1)\n\t"
12037+
12038+#ifdef CONFIG_PAX_REFCOUNT
12039+ "jno 0f\n"
12040+ LOCK_PREFIX _ASM_DEC "(%1)\n"
12041+ "int $4\n0:\n"
12042+ _ASM_EXTABLE(0b, 0b)
12043+#endif
12044+
12045 /* adds 0x00000001 */
12046 " jns 1f\n"
12047 " call call_rwsem_down_read_failed\n"
12048@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
12049 "1:\n\t"
12050 " mov %1,%2\n\t"
12051 " add %3,%2\n\t"
12052+
12053+#ifdef CONFIG_PAX_REFCOUNT
12054+ "jno 0f\n"
12055+ "sub %3,%2\n"
12056+ "int $4\n0:\n"
12057+ _ASM_EXTABLE(0b, 0b)
12058+#endif
12059+
12060 " jle 2f\n\t"
12061 LOCK_PREFIX " cmpxchg %2,%0\n\t"
12062 " jnz 1b\n\t"
12063@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
12064 long tmp;
12065 asm volatile("# beginning down_write\n\t"
12066 LOCK_PREFIX " xadd %1,(%2)\n\t"
12067+
12068+#ifdef CONFIG_PAX_REFCOUNT
12069+ "jno 0f\n"
12070+ "mov %1,(%2)\n"
12071+ "int $4\n0:\n"
12072+ _ASM_EXTABLE(0b, 0b)
12073+#endif
12074+
12075 /* adds 0xffff0001, returns the old value */
12076 " test %1,%1\n\t"
12077 /* was the count 0 before? */
12078@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
12079 long tmp;
12080 asm volatile("# beginning __up_read\n\t"
12081 LOCK_PREFIX " xadd %1,(%2)\n\t"
12082+
12083+#ifdef CONFIG_PAX_REFCOUNT
12084+ "jno 0f\n"
12085+ "mov %1,(%2)\n"
12086+ "int $4\n0:\n"
12087+ _ASM_EXTABLE(0b, 0b)
12088+#endif
12089+
12090 /* subtracts 1, returns the old value */
12091 " jns 1f\n\t"
12092 " call call_rwsem_wake\n" /* expects old value in %edx */
12093@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
12094 long tmp;
12095 asm volatile("# beginning __up_write\n\t"
12096 LOCK_PREFIX " xadd %1,(%2)\n\t"
12097+
12098+#ifdef CONFIG_PAX_REFCOUNT
12099+ "jno 0f\n"
12100+ "mov %1,(%2)\n"
12101+ "int $4\n0:\n"
12102+ _ASM_EXTABLE(0b, 0b)
12103+#endif
12104+
12105 /* subtracts 0xffff0001, returns the old value */
12106 " jns 1f\n\t"
12107 " call call_rwsem_wake\n" /* expects old value in %edx */
12108@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12109 {
12110 asm volatile("# beginning __downgrade_write\n\t"
12111 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
12112+
12113+#ifdef CONFIG_PAX_REFCOUNT
12114+ "jno 0f\n"
12115+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
12116+ "int $4\n0:\n"
12117+ _ASM_EXTABLE(0b, 0b)
12118+#endif
12119+
12120 /*
12121 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
12122 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
12123@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12124 */
12125 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12126 {
12127- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
12128+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
12129+
12130+#ifdef CONFIG_PAX_REFCOUNT
12131+ "jno 0f\n"
12132+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
12133+ "int $4\n0:\n"
12134+ _ASM_EXTABLE(0b, 0b)
12135+#endif
12136+
12137 : "+m" (sem->count)
12138 : "er" (delta));
12139 }
12140@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12141 */
12142 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
12143 {
12144- return delta + xadd(&sem->count, delta);
12145+ return delta + xadd_check_overflow(&sem->count, delta);
12146 }
12147
12148 #endif /* __KERNEL__ */
12149diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
12150index 1654662..5af4157 100644
12151--- a/arch/x86/include/asm/segment.h
12152+++ b/arch/x86/include/asm/segment.h
12153@@ -64,10 +64,15 @@
12154 * 26 - ESPFIX small SS
12155 * 27 - per-cpu [ offset to per-cpu data area ]
12156 * 28 - stack_canary-20 [ for stack protector ]
12157- * 29 - unused
12158- * 30 - unused
12159+ * 29 - PCI BIOS CS
12160+ * 30 - PCI BIOS DS
12161 * 31 - TSS for double fault handler
12162 */
12163+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
12164+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
12165+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
12166+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
12167+
12168 #define GDT_ENTRY_TLS_MIN 6
12169 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
12170
12171@@ -79,6 +84,8 @@
12172
12173 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
12174
12175+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
12176+
12177 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
12178
12179 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
12180@@ -104,6 +111,12 @@
12181 #define __KERNEL_STACK_CANARY 0
12182 #endif
12183
12184+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
12185+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12186+
12187+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
12188+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12189+
12190 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12191
12192 /*
12193@@ -141,7 +154,7 @@
12194 */
12195
12196 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12197-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12198+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12199
12200
12201 #else
12202@@ -165,6 +178,8 @@
12203 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
12204 #define __USER32_DS __USER_DS
12205
12206+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12207+
12208 #define GDT_ENTRY_TSS 8 /* needs two entries */
12209 #define GDT_ENTRY_LDT 10 /* needs two entries */
12210 #define GDT_ENTRY_TLS_MIN 12
12211@@ -185,6 +200,7 @@
12212 #endif
12213
12214 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
12215+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
12216 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
12217 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
12218 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
12219@@ -263,7 +279,7 @@ static inline unsigned long get_limit(unsigned long segment)
12220 {
12221 unsigned long __limit;
12222 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12223- return __limit + 1;
12224+ return __limit;
12225 }
12226
12227 #endif /* !__ASSEMBLY__ */
12228diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12229index 0434c40..1714bf0 100644
12230--- a/arch/x86/include/asm/smp.h
12231+++ b/arch/x86/include/asm/smp.h
12232@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12233 /* cpus sharing the last level cache: */
12234 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
12235 DECLARE_PER_CPU(u16, cpu_llc_id);
12236-DECLARE_PER_CPU(int, cpu_number);
12237+DECLARE_PER_CPU(unsigned int, cpu_number);
12238
12239 static inline struct cpumask *cpu_sibling_mask(int cpu)
12240 {
12241@@ -77,7 +77,7 @@ struct smp_ops {
12242
12243 void (*send_call_func_ipi)(const struct cpumask *mask);
12244 void (*send_call_func_single_ipi)(int cpu);
12245-};
12246+} __no_const;
12247
12248 /* Globals due to paravirt */
12249 extern void set_cpu_sibling_map(int cpu);
12250@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12251 extern int safe_smp_processor_id(void);
12252
12253 #elif defined(CONFIG_X86_64_SMP)
12254-#define raw_smp_processor_id() (percpu_read(cpu_number))
12255-
12256-#define stack_smp_processor_id() \
12257-({ \
12258- struct thread_info *ti; \
12259- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12260- ti->cpu; \
12261-})
12262+#define raw_smp_processor_id() (percpu_read(cpu_number))
12263+#define stack_smp_processor_id() raw_smp_processor_id()
12264 #define safe_smp_processor_id() smp_processor_id()
12265
12266 #endif
12267diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12268index 76bfa2c..12d3fe7 100644
12269--- a/arch/x86/include/asm/spinlock.h
12270+++ b/arch/x86/include/asm/spinlock.h
12271@@ -175,6 +175,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
12272 static inline void arch_read_lock(arch_rwlock_t *rw)
12273 {
12274 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
12275+
12276+#ifdef CONFIG_PAX_REFCOUNT
12277+ "jno 0f\n"
12278+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
12279+ "int $4\n0:\n"
12280+ _ASM_EXTABLE(0b, 0b)
12281+#endif
12282+
12283 "jns 1f\n"
12284 "call __read_lock_failed\n\t"
12285 "1:\n"
12286@@ -184,6 +192,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
12287 static inline void arch_write_lock(arch_rwlock_t *rw)
12288 {
12289 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
12290+
12291+#ifdef CONFIG_PAX_REFCOUNT
12292+ "jno 0f\n"
12293+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
12294+ "int $4\n0:\n"
12295+ _ASM_EXTABLE(0b, 0b)
12296+#endif
12297+
12298 "jz 1f\n"
12299 "call __write_lock_failed\n\t"
12300 "1:\n"
12301@@ -213,13 +229,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
12302
12303 static inline void arch_read_unlock(arch_rwlock_t *rw)
12304 {
12305- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
12306+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
12307+
12308+#ifdef CONFIG_PAX_REFCOUNT
12309+ "jno 0f\n"
12310+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
12311+ "int $4\n0:\n"
12312+ _ASM_EXTABLE(0b, 0b)
12313+#endif
12314+
12315 :"+m" (rw->lock) : : "memory");
12316 }
12317
12318 static inline void arch_write_unlock(arch_rwlock_t *rw)
12319 {
12320- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
12321+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
12322+
12323+#ifdef CONFIG_PAX_REFCOUNT
12324+ "jno 0f\n"
12325+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
12326+ "int $4\n0:\n"
12327+ _ASM_EXTABLE(0b, 0b)
12328+#endif
12329+
12330 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
12331 }
12332
12333diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
12334index b5d9533..41655fa 100644
12335--- a/arch/x86/include/asm/stackprotector.h
12336+++ b/arch/x86/include/asm/stackprotector.h
12337@@ -47,7 +47,7 @@
12338 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12339 */
12340 #define GDT_STACK_CANARY_INIT \
12341- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12342+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12343
12344 /*
12345 * Initialize the stackprotector canary value.
12346@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
12347
12348 static inline void load_stack_canary_segment(void)
12349 {
12350-#ifdef CONFIG_X86_32
12351+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12352 asm volatile ("mov %0, %%gs" : : "r" (0));
12353 #endif
12354 }
12355diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
12356index 70bbe39..4ae2bd4 100644
12357--- a/arch/x86/include/asm/stacktrace.h
12358+++ b/arch/x86/include/asm/stacktrace.h
12359@@ -11,28 +11,20 @@
12360
12361 extern int kstack_depth_to_print;
12362
12363-struct thread_info;
12364+struct task_struct;
12365 struct stacktrace_ops;
12366
12367-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
12368- unsigned long *stack,
12369- unsigned long bp,
12370- const struct stacktrace_ops *ops,
12371- void *data,
12372- unsigned long *end,
12373- int *graph);
12374+typedef unsigned long walk_stack_t(struct task_struct *task,
12375+ void *stack_start,
12376+ unsigned long *stack,
12377+ unsigned long bp,
12378+ const struct stacktrace_ops *ops,
12379+ void *data,
12380+ unsigned long *end,
12381+ int *graph);
12382
12383-extern unsigned long
12384-print_context_stack(struct thread_info *tinfo,
12385- unsigned long *stack, unsigned long bp,
12386- const struct stacktrace_ops *ops, void *data,
12387- unsigned long *end, int *graph);
12388-
12389-extern unsigned long
12390-print_context_stack_bp(struct thread_info *tinfo,
12391- unsigned long *stack, unsigned long bp,
12392- const struct stacktrace_ops *ops, void *data,
12393- unsigned long *end, int *graph);
12394+extern walk_stack_t print_context_stack;
12395+extern walk_stack_t print_context_stack_bp;
12396
12397 /* Generic stack tracer with callbacks */
12398
12399@@ -40,7 +32,7 @@ struct stacktrace_ops {
12400 void (*address)(void *data, unsigned long address, int reliable);
12401 /* On negative return stop dumping */
12402 int (*stack)(void *data, char *name);
12403- walk_stack_t walk_stack;
12404+ walk_stack_t *walk_stack;
12405 };
12406
12407 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
12408diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
12409index 4ec45b3..a4f0a8a 100644
12410--- a/arch/x86/include/asm/switch_to.h
12411+++ b/arch/x86/include/asm/switch_to.h
12412@@ -108,7 +108,7 @@ do { \
12413 "call __switch_to\n\t" \
12414 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12415 __switch_canary \
12416- "movq %P[thread_info](%%rsi),%%r8\n\t" \
12417+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12418 "movq %%rax,%%rdi\n\t" \
12419 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12420 "jnz ret_from_fork\n\t" \
12421@@ -119,7 +119,7 @@ do { \
12422 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12423 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12424 [_tif_fork] "i" (_TIF_FORK), \
12425- [thread_info] "i" (offsetof(struct task_struct, stack)), \
12426+ [thread_info] "m" (current_tinfo), \
12427 [current_task] "m" (current_task) \
12428 __switch_canary_iparam \
12429 : "memory", "cc" __EXTRA_CLOBBER)
12430diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
12431index 3fda9db4..4ca1c61 100644
12432--- a/arch/x86/include/asm/sys_ia32.h
12433+++ b/arch/x86/include/asm/sys_ia32.h
12434@@ -40,7 +40,7 @@ asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
12435 struct old_sigaction32 __user *);
12436 asmlinkage long sys32_alarm(unsigned int);
12437
12438-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
12439+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
12440 asmlinkage long sys32_sysfs(int, u32, u32);
12441
12442 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
12443diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
12444index ad6df8c..5e0cf6e 100644
12445--- a/arch/x86/include/asm/thread_info.h
12446+++ b/arch/x86/include/asm/thread_info.h
12447@@ -10,6 +10,7 @@
12448 #include <linux/compiler.h>
12449 #include <asm/page.h>
12450 #include <asm/types.h>
12451+#include <asm/percpu.h>
12452
12453 /*
12454 * low level task data that entry.S needs immediate access to
12455@@ -24,7 +25,6 @@ struct exec_domain;
12456 #include <linux/atomic.h>
12457
12458 struct thread_info {
12459- struct task_struct *task; /* main task structure */
12460 struct exec_domain *exec_domain; /* execution domain */
12461 __u32 flags; /* low level flags */
12462 __u32 status; /* thread synchronous flags */
12463@@ -34,19 +34,13 @@ struct thread_info {
12464 mm_segment_t addr_limit;
12465 struct restart_block restart_block;
12466 void __user *sysenter_return;
12467-#ifdef CONFIG_X86_32
12468- unsigned long previous_esp; /* ESP of the previous stack in
12469- case of nested (IRQ) stacks
12470- */
12471- __u8 supervisor_stack[0];
12472-#endif
12473+ unsigned long lowest_stack;
12474 unsigned int sig_on_uaccess_error:1;
12475 unsigned int uaccess_err:1; /* uaccess failed */
12476 };
12477
12478-#define INIT_THREAD_INFO(tsk) \
12479+#define INIT_THREAD_INFO \
12480 { \
12481- .task = &tsk, \
12482 .exec_domain = &default_exec_domain, \
12483 .flags = 0, \
12484 .cpu = 0, \
12485@@ -57,7 +51,7 @@ struct thread_info {
12486 }, \
12487 }
12488
12489-#define init_thread_info (init_thread_union.thread_info)
12490+#define init_thread_info (init_thread_union.stack)
12491 #define init_stack (init_thread_union.stack)
12492
12493 #else /* !__ASSEMBLY__ */
12494@@ -97,6 +91,7 @@ struct thread_info {
12495 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
12496 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
12497 #define TIF_X32 30 /* 32-bit native x86-64 binary */
12498+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
12499
12500 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
12501 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
12502@@ -120,16 +115,18 @@ struct thread_info {
12503 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
12504 #define _TIF_ADDR32 (1 << TIF_ADDR32)
12505 #define _TIF_X32 (1 << TIF_X32)
12506+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
12507
12508 /* work to do in syscall_trace_enter() */
12509 #define _TIF_WORK_SYSCALL_ENTRY \
12510 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
12511- _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
12512+ _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
12513+ _TIF_GRSEC_SETXID)
12514
12515 /* work to do in syscall_trace_leave() */
12516 #define _TIF_WORK_SYSCALL_EXIT \
12517 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
12518- _TIF_SYSCALL_TRACEPOINT)
12519+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
12520
12521 /* work to do on interrupt/exception return */
12522 #define _TIF_WORK_MASK \
12523@@ -139,7 +136,8 @@ struct thread_info {
12524
12525 /* work to do on any return to user space */
12526 #define _TIF_ALLWORK_MASK \
12527- ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
12528+ ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
12529+ _TIF_GRSEC_SETXID)
12530
12531 /* Only used for 64 bit */
12532 #define _TIF_DO_NOTIFY_MASK \
12533@@ -173,45 +171,40 @@ struct thread_info {
12534 ret; \
12535 })
12536
12537-#ifdef CONFIG_X86_32
12538-
12539-#define STACK_WARN (THREAD_SIZE/8)
12540-/*
12541- * macros/functions for gaining access to the thread information structure
12542- *
12543- * preempt_count needs to be 1 initially, until the scheduler is functional.
12544- */
12545-#ifndef __ASSEMBLY__
12546-
12547-
12548-/* how to get the current stack pointer from C */
12549-register unsigned long current_stack_pointer asm("esp") __used;
12550-
12551-/* how to get the thread information struct from C */
12552-static inline struct thread_info *current_thread_info(void)
12553-{
12554- return (struct thread_info *)
12555- (current_stack_pointer & ~(THREAD_SIZE - 1));
12556-}
12557-
12558-#else /* !__ASSEMBLY__ */
12559-
12560+#ifdef __ASSEMBLY__
12561 /* how to get the thread information struct from ASM */
12562 #define GET_THREAD_INFO(reg) \
12563- movl $-THREAD_SIZE, reg; \
12564- andl %esp, reg
12565+ mov PER_CPU_VAR(current_tinfo), reg
12566
12567 /* use this one if reg already contains %esp */
12568-#define GET_THREAD_INFO_WITH_ESP(reg) \
12569- andl $-THREAD_SIZE, reg
12570+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12571+#else
12572+/* how to get the thread information struct from C */
12573+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12574+
12575+static __always_inline struct thread_info *current_thread_info(void)
12576+{
12577+ return percpu_read_stable(current_tinfo);
12578+}
12579+#endif
12580+
12581+#ifdef CONFIG_X86_32
12582+
12583+#define STACK_WARN (THREAD_SIZE/8)
12584+/*
12585+ * macros/functions for gaining access to the thread information structure
12586+ *
12587+ * preempt_count needs to be 1 initially, until the scheduler is functional.
12588+ */
12589+#ifndef __ASSEMBLY__
12590+
12591+/* how to get the current stack pointer from C */
12592+register unsigned long current_stack_pointer asm("esp") __used;
12593
12594 #endif
12595
12596 #else /* X86_32 */
12597
12598-#include <asm/percpu.h>
12599-#define KERNEL_STACK_OFFSET (5*8)
12600-
12601 /*
12602 * macros/functions for gaining access to the thread information structure
12603 * preempt_count needs to be 1 initially, until the scheduler is functional.
12604@@ -219,27 +212,8 @@ static inline struct thread_info *current_thread_info(void)
12605 #ifndef __ASSEMBLY__
12606 DECLARE_PER_CPU(unsigned long, kernel_stack);
12607
12608-static inline struct thread_info *current_thread_info(void)
12609-{
12610- struct thread_info *ti;
12611- ti = (void *)(percpu_read_stable(kernel_stack) +
12612- KERNEL_STACK_OFFSET - THREAD_SIZE);
12613- return ti;
12614-}
12615-
12616-#else /* !__ASSEMBLY__ */
12617-
12618-/* how to get the thread information struct from ASM */
12619-#define GET_THREAD_INFO(reg) \
12620- movq PER_CPU_VAR(kernel_stack),reg ; \
12621- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12622-
12623-/*
12624- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
12625- * a certain register (to be used in assembler memory operands).
12626- */
12627-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
12628-
12629+/* how to get the current stack pointer from C */
12630+register unsigned long current_stack_pointer asm("rsp") __used;
12631 #endif
12632
12633 #endif /* !X86_32 */
12634@@ -285,5 +259,16 @@ extern void arch_task_cache_init(void);
12635 extern void free_thread_info(struct thread_info *ti);
12636 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12637 #define arch_task_cache_init arch_task_cache_init
12638+
12639+#define __HAVE_THREAD_FUNCTIONS
12640+#define task_thread_info(task) (&(task)->tinfo)
12641+#define task_stack_page(task) ((task)->stack)
12642+#define setup_thread_stack(p, org) do {} while (0)
12643+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12644+
12645+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12646+extern struct task_struct *alloc_task_struct_node(int node);
12647+extern void free_task_struct(struct task_struct *);
12648+
12649 #endif
12650 #endif /* _ASM_X86_THREAD_INFO_H */
12651diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12652index e054459..14bc8a7 100644
12653--- a/arch/x86/include/asm/uaccess.h
12654+++ b/arch/x86/include/asm/uaccess.h
12655@@ -7,12 +7,15 @@
12656 #include <linux/compiler.h>
12657 #include <linux/thread_info.h>
12658 #include <linux/string.h>
12659+#include <linux/sched.h>
12660 #include <asm/asm.h>
12661 #include <asm/page.h>
12662
12663 #define VERIFY_READ 0
12664 #define VERIFY_WRITE 1
12665
12666+extern void check_object_size(const void *ptr, unsigned long n, bool to);
12667+
12668 /*
12669 * The fs value determines whether argument validity checking should be
12670 * performed or not. If get_fs() == USER_DS, checking is performed, with
12671@@ -28,7 +31,12 @@
12672
12673 #define get_ds() (KERNEL_DS)
12674 #define get_fs() (current_thread_info()->addr_limit)
12675+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12676+void __set_fs(mm_segment_t x);
12677+void set_fs(mm_segment_t x);
12678+#else
12679 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12680+#endif
12681
12682 #define segment_eq(a, b) ((a).seg == (b).seg)
12683
12684@@ -76,7 +84,33 @@
12685 * checks that the pointer is in the user space range - after calling
12686 * this function, memory access functions may still return -EFAULT.
12687 */
12688-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12689+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12690+#define access_ok(type, addr, size) \
12691+({ \
12692+ long __size = size; \
12693+ unsigned long __addr = (unsigned long)addr; \
12694+ unsigned long __addr_ao = __addr & PAGE_MASK; \
12695+ unsigned long __end_ao = __addr + __size - 1; \
12696+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12697+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12698+ while(__addr_ao <= __end_ao) { \
12699+ char __c_ao; \
12700+ __addr_ao += PAGE_SIZE; \
12701+ if (__size > PAGE_SIZE) \
12702+ cond_resched(); \
12703+ if (__get_user(__c_ao, (char __user *)__addr)) \
12704+ break; \
12705+ if (type != VERIFY_WRITE) { \
12706+ __addr = __addr_ao; \
12707+ continue; \
12708+ } \
12709+ if (__put_user(__c_ao, (char __user *)__addr)) \
12710+ break; \
12711+ __addr = __addr_ao; \
12712+ } \
12713+ } \
12714+ __ret_ao; \
12715+})
12716
12717 /*
12718 * The exception table consists of pairs of addresses: the first is the
12719@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
12720 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12721 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12722
12723-
12724+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12725+#define __copyuser_seg "gs;"
12726+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12727+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12728+#else
12729+#define __copyuser_seg
12730+#define __COPYUSER_SET_ES
12731+#define __COPYUSER_RESTORE_ES
12732+#endif
12733
12734 #ifdef CONFIG_X86_32
12735 #define __put_user_asm_u64(x, addr, err, errret) \
12736- asm volatile("1: movl %%eax,0(%2)\n" \
12737- "2: movl %%edx,4(%2)\n" \
12738+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12739+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12740 "3:\n" \
12741 ".section .fixup,\"ax\"\n" \
12742 "4: movl %3,%0\n" \
12743@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
12744 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12745
12746 #define __put_user_asm_ex_u64(x, addr) \
12747- asm volatile("1: movl %%eax,0(%1)\n" \
12748- "2: movl %%edx,4(%1)\n" \
12749+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12750+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12751 "3:\n" \
12752 _ASM_EXTABLE(1b, 2b - 1b) \
12753 _ASM_EXTABLE(2b, 3b - 2b) \
12754@@ -252,7 +294,7 @@ extern void __put_user_8(void);
12755 __typeof__(*(ptr)) __pu_val; \
12756 __chk_user_ptr(ptr); \
12757 might_fault(); \
12758- __pu_val = x; \
12759+ __pu_val = (x); \
12760 switch (sizeof(*(ptr))) { \
12761 case 1: \
12762 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12763@@ -373,7 +415,7 @@ do { \
12764 } while (0)
12765
12766 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12767- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12768+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12769 "2:\n" \
12770 ".section .fixup,\"ax\"\n" \
12771 "3: mov %3,%0\n" \
12772@@ -381,7 +423,7 @@ do { \
12773 " jmp 2b\n" \
12774 ".previous\n" \
12775 _ASM_EXTABLE(1b, 3b) \
12776- : "=r" (err), ltype(x) \
12777+ : "=r" (err), ltype (x) \
12778 : "m" (__m(addr)), "i" (errret), "0" (err))
12779
12780 #define __get_user_size_ex(x, ptr, size) \
12781@@ -406,7 +448,7 @@ do { \
12782 } while (0)
12783
12784 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12785- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12786+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12787 "2:\n" \
12788 _ASM_EXTABLE(1b, 2b - 1b) \
12789 : ltype(x) : "m" (__m(addr)))
12790@@ -423,13 +465,24 @@ do { \
12791 int __gu_err; \
12792 unsigned long __gu_val; \
12793 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12794- (x) = (__force __typeof__(*(ptr)))__gu_val; \
12795+ (x) = (__typeof__(*(ptr)))__gu_val; \
12796 __gu_err; \
12797 })
12798
12799 /* FIXME: this hack is definitely wrong -AK */
12800 struct __large_struct { unsigned long buf[100]; };
12801-#define __m(x) (*(struct __large_struct __user *)(x))
12802+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12803+#define ____m(x) \
12804+({ \
12805+ unsigned long ____x = (unsigned long)(x); \
12806+ if (____x < PAX_USER_SHADOW_BASE) \
12807+ ____x += PAX_USER_SHADOW_BASE; \
12808+ (void __user *)____x; \
12809+})
12810+#else
12811+#define ____m(x) (x)
12812+#endif
12813+#define __m(x) (*(struct __large_struct __user *)____m(x))
12814
12815 /*
12816 * Tell gcc we read from memory instead of writing: this is because
12817@@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
12818 * aliasing issues.
12819 */
12820 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12821- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12822+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12823 "2:\n" \
12824 ".section .fixup,\"ax\"\n" \
12825 "3: mov %3,%0\n" \
12826@@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
12827 ".previous\n" \
12828 _ASM_EXTABLE(1b, 3b) \
12829 : "=r"(err) \
12830- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12831+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12832
12833 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12834- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12835+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12836 "2:\n" \
12837 _ASM_EXTABLE(1b, 2b - 1b) \
12838 : : ltype(x), "m" (__m(addr)))
12839@@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
12840 * On error, the variable @x is set to zero.
12841 */
12842
12843+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12844+#define __get_user(x, ptr) get_user((x), (ptr))
12845+#else
12846 #define __get_user(x, ptr) \
12847 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12848+#endif
12849
12850 /**
12851 * __put_user: - Write a simple value into user space, with less checking.
12852@@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
12853 * Returns zero on success, or -EFAULT on error.
12854 */
12855
12856+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12857+#define __put_user(x, ptr) put_user((x), (ptr))
12858+#else
12859 #define __put_user(x, ptr) \
12860 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12861+#endif
12862
12863 #define __get_user_unaligned __get_user
12864 #define __put_user_unaligned __put_user
12865@@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
12866 #define get_user_ex(x, ptr) do { \
12867 unsigned long __gue_val; \
12868 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12869- (x) = (__force __typeof__(*(ptr)))__gue_val; \
12870+ (x) = (__typeof__(*(ptr)))__gue_val; \
12871 } while (0)
12872
12873 #ifdef CONFIG_X86_WP_WORKS_OK
12874diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12875index 8084bc7..3d6ec37 100644
12876--- a/arch/x86/include/asm/uaccess_32.h
12877+++ b/arch/x86/include/asm/uaccess_32.h
12878@@ -11,15 +11,15 @@
12879 #include <asm/page.h>
12880
12881 unsigned long __must_check __copy_to_user_ll
12882- (void __user *to, const void *from, unsigned long n);
12883+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
12884 unsigned long __must_check __copy_from_user_ll
12885- (void *to, const void __user *from, unsigned long n);
12886+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12887 unsigned long __must_check __copy_from_user_ll_nozero
12888- (void *to, const void __user *from, unsigned long n);
12889+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12890 unsigned long __must_check __copy_from_user_ll_nocache
12891- (void *to, const void __user *from, unsigned long n);
12892+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12893 unsigned long __must_check __copy_from_user_ll_nocache_nozero
12894- (void *to, const void __user *from, unsigned long n);
12895+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12896
12897 /**
12898 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
12899@@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12900 static __always_inline unsigned long __must_check
12901 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12902 {
12903+ if ((long)n < 0)
12904+ return n;
12905+
12906 if (__builtin_constant_p(n)) {
12907 unsigned long ret;
12908
12909@@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12910 return ret;
12911 }
12912 }
12913+ if (!__builtin_constant_p(n))
12914+ check_object_size(from, n, true);
12915 return __copy_to_user_ll(to, from, n);
12916 }
12917
12918@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
12919 __copy_to_user(void __user *to, const void *from, unsigned long n)
12920 {
12921 might_fault();
12922+
12923 return __copy_to_user_inatomic(to, from, n);
12924 }
12925
12926 static __always_inline unsigned long
12927 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12928 {
12929+ if ((long)n < 0)
12930+ return n;
12931+
12932 /* Avoid zeroing the tail if the copy fails..
12933 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12934 * but as the zeroing behaviour is only significant when n is not
12935@@ -137,6 +146,10 @@ static __always_inline unsigned long
12936 __copy_from_user(void *to, const void __user *from, unsigned long n)
12937 {
12938 might_fault();
12939+
12940+ if ((long)n < 0)
12941+ return n;
12942+
12943 if (__builtin_constant_p(n)) {
12944 unsigned long ret;
12945
12946@@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12947 return ret;
12948 }
12949 }
12950+ if (!__builtin_constant_p(n))
12951+ check_object_size(to, n, false);
12952 return __copy_from_user_ll(to, from, n);
12953 }
12954
12955@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12956 const void __user *from, unsigned long n)
12957 {
12958 might_fault();
12959+
12960+ if ((long)n < 0)
12961+ return n;
12962+
12963 if (__builtin_constant_p(n)) {
12964 unsigned long ret;
12965
12966@@ -181,15 +200,19 @@ static __always_inline unsigned long
12967 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12968 unsigned long n)
12969 {
12970- return __copy_from_user_ll_nocache_nozero(to, from, n);
12971+ if ((long)n < 0)
12972+ return n;
12973+
12974+ return __copy_from_user_ll_nocache_nozero(to, from, n);
12975 }
12976
12977-unsigned long __must_check copy_to_user(void __user *to,
12978- const void *from, unsigned long n);
12979-unsigned long __must_check _copy_from_user(void *to,
12980- const void __user *from,
12981- unsigned long n);
12982-
12983+extern void copy_to_user_overflow(void)
12984+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12985+ __compiletime_error("copy_to_user() buffer size is not provably correct")
12986+#else
12987+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
12988+#endif
12989+;
12990
12991 extern void copy_from_user_overflow(void)
12992 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12993@@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
12994 #endif
12995 ;
12996
12997-static inline unsigned long __must_check copy_from_user(void *to,
12998- const void __user *from,
12999- unsigned long n)
13000+/**
13001+ * copy_to_user: - Copy a block of data into user space.
13002+ * @to: Destination address, in user space.
13003+ * @from: Source address, in kernel space.
13004+ * @n: Number of bytes to copy.
13005+ *
13006+ * Context: User context only. This function may sleep.
13007+ *
13008+ * Copy data from kernel space to user space.
13009+ *
13010+ * Returns number of bytes that could not be copied.
13011+ * On success, this will be zero.
13012+ */
13013+static inline unsigned long __must_check
13014+copy_to_user(void __user *to, const void *from, unsigned long n)
13015 {
13016- int sz = __compiletime_object_size(to);
13017+ size_t sz = __compiletime_object_size(from);
13018
13019- if (likely(sz == -1 || sz >= n))
13020- n = _copy_from_user(to, from, n);
13021- else
13022+ if (unlikely(sz != (size_t)-1 && sz < n))
13023+ copy_to_user_overflow();
13024+ else if (access_ok(VERIFY_WRITE, to, n))
13025+ n = __copy_to_user(to, from, n);
13026+ return n;
13027+}
13028+
13029+/**
13030+ * copy_from_user: - Copy a block of data from user space.
13031+ * @to: Destination address, in kernel space.
13032+ * @from: Source address, in user space.
13033+ * @n: Number of bytes to copy.
13034+ *
13035+ * Context: User context only. This function may sleep.
13036+ *
13037+ * Copy data from user space to kernel space.
13038+ *
13039+ * Returns number of bytes that could not be copied.
13040+ * On success, this will be zero.
13041+ *
13042+ * If some data could not be copied, this function will pad the copied
13043+ * data to the requested size using zero bytes.
13044+ */
13045+static inline unsigned long __must_check
13046+copy_from_user(void *to, const void __user *from, unsigned long n)
13047+{
13048+ size_t sz = __compiletime_object_size(to);
13049+
13050+ if (unlikely(sz != (size_t)-1 && sz < n))
13051 copy_from_user_overflow();
13052-
13053+ else if (access_ok(VERIFY_READ, from, n))
13054+ n = __copy_from_user(to, from, n);
13055+ else if ((long)n > 0) {
13056+ if (!__builtin_constant_p(n))
13057+ check_object_size(to, n, false);
13058+ memset(to, 0, n);
13059+ }
13060 return n;
13061 }
13062
13063@@ -230,7 +297,7 @@ static inline unsigned long __must_check copy_from_user(void *to,
13064 #define strlen_user(str) strnlen_user(str, LONG_MAX)
13065
13066 long strnlen_user(const char __user *str, long n);
13067-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
13068-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
13069+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13070+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13071
13072 #endif /* _ASM_X86_UACCESS_32_H */
13073diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
13074index fcd4b6f..ef04f8f 100644
13075--- a/arch/x86/include/asm/uaccess_64.h
13076+++ b/arch/x86/include/asm/uaccess_64.h
13077@@ -10,6 +10,9 @@
13078 #include <asm/alternative.h>
13079 #include <asm/cpufeature.h>
13080 #include <asm/page.h>
13081+#include <asm/pgtable.h>
13082+
13083+#define set_fs(x) (current_thread_info()->addr_limit = (x))
13084
13085 /*
13086 * Copy To/From Userspace
13087@@ -17,12 +20,14 @@
13088
13089 /* Handles exceptions in both to and from, but doesn't do access_ok */
13090 __must_check unsigned long
13091-copy_user_generic_string(void *to, const void *from, unsigned len);
13092+copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
13093 __must_check unsigned long
13094-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
13095+copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
13096
13097 static __always_inline __must_check unsigned long
13098-copy_user_generic(void *to, const void *from, unsigned len)
13099+copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
13100+static __always_inline __must_check unsigned long
13101+copy_user_generic(void *to, const void *from, unsigned long len)
13102 {
13103 unsigned ret;
13104
13105@@ -32,142 +37,238 @@ copy_user_generic(void *to, const void *from, unsigned len)
13106 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
13107 "=d" (len)),
13108 "1" (to), "2" (from), "3" (len)
13109- : "memory", "rcx", "r8", "r9", "r10", "r11");
13110+ : "memory", "rcx", "r8", "r9", "r11");
13111 return ret;
13112 }
13113
13114+static __always_inline __must_check unsigned long
13115+__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
13116+static __always_inline __must_check unsigned long
13117+__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
13118 __must_check unsigned long
13119-_copy_to_user(void __user *to, const void *from, unsigned len);
13120-__must_check unsigned long
13121-_copy_from_user(void *to, const void __user *from, unsigned len);
13122-__must_check unsigned long
13123-copy_in_user(void __user *to, const void __user *from, unsigned len);
13124+copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
13125+
13126+extern void copy_to_user_overflow(void)
13127+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13128+ __compiletime_error("copy_to_user() buffer size is not provably correct")
13129+#else
13130+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
13131+#endif
13132+;
13133+
13134+extern void copy_from_user_overflow(void)
13135+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13136+ __compiletime_error("copy_from_user() buffer size is not provably correct")
13137+#else
13138+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
13139+#endif
13140+;
13141
13142 static inline unsigned long __must_check copy_from_user(void *to,
13143 const void __user *from,
13144 unsigned long n)
13145 {
13146- int sz = __compiletime_object_size(to);
13147-
13148 might_fault();
13149- if (likely(sz == -1 || sz >= n))
13150- n = _copy_from_user(to, from, n);
13151-#ifdef CONFIG_DEBUG_VM
13152- else
13153- WARN(1, "Buffer overflow detected!\n");
13154-#endif
13155+
13156+ if (access_ok(VERIFY_READ, from, n))
13157+ n = __copy_from_user(to, from, n);
13158+ else if (n < INT_MAX) {
13159+ if (!__builtin_constant_p(n))
13160+ check_object_size(to, n, false);
13161+ memset(to, 0, n);
13162+ }
13163 return n;
13164 }
13165
13166 static __always_inline __must_check
13167-int copy_to_user(void __user *dst, const void *src, unsigned size)
13168+int copy_to_user(void __user *dst, const void *src, unsigned long size)
13169 {
13170 might_fault();
13171
13172- return _copy_to_user(dst, src, size);
13173+ if (access_ok(VERIFY_WRITE, dst, size))
13174+ size = __copy_to_user(dst, src, size);
13175+ return size;
13176 }
13177
13178 static __always_inline __must_check
13179-int __copy_from_user(void *dst, const void __user *src, unsigned size)
13180+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
13181 {
13182- int ret = 0;
13183+ size_t sz = __compiletime_object_size(dst);
13184+ unsigned ret = 0;
13185
13186 might_fault();
13187- if (!__builtin_constant_p(size))
13188- return copy_user_generic(dst, (__force void *)src, size);
13189+
13190+ if (size > INT_MAX)
13191+ return size;
13192+
13193+#ifdef CONFIG_PAX_MEMORY_UDEREF
13194+ if (!__access_ok(VERIFY_READ, src, size))
13195+ return size;
13196+#endif
13197+
13198+ if (unlikely(sz != (size_t)-1 && sz < size)) {
13199+ copy_from_user_overflow();
13200+ return size;
13201+ }
13202+
13203+ if (!__builtin_constant_p(size)) {
13204+ check_object_size(dst, size, false);
13205+
13206+#ifdef CONFIG_PAX_MEMORY_UDEREF
13207+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13208+ src += PAX_USER_SHADOW_BASE;
13209+#endif
13210+
13211+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13212+ }
13213 switch (size) {
13214- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
13215+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
13216 ret, "b", "b", "=q", 1);
13217 return ret;
13218- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
13219+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
13220 ret, "w", "w", "=r", 2);
13221 return ret;
13222- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
13223+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
13224 ret, "l", "k", "=r", 4);
13225 return ret;
13226- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
13227+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13228 ret, "q", "", "=r", 8);
13229 return ret;
13230 case 10:
13231- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13232+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13233 ret, "q", "", "=r", 10);
13234 if (unlikely(ret))
13235 return ret;
13236 __get_user_asm(*(u16 *)(8 + (char *)dst),
13237- (u16 __user *)(8 + (char __user *)src),
13238+ (const u16 __user *)(8 + (const char __user *)src),
13239 ret, "w", "w", "=r", 2);
13240 return ret;
13241 case 16:
13242- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13243+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13244 ret, "q", "", "=r", 16);
13245 if (unlikely(ret))
13246 return ret;
13247 __get_user_asm(*(u64 *)(8 + (char *)dst),
13248- (u64 __user *)(8 + (char __user *)src),
13249+ (const u64 __user *)(8 + (const char __user *)src),
13250 ret, "q", "", "=r", 8);
13251 return ret;
13252 default:
13253- return copy_user_generic(dst, (__force void *)src, size);
13254+
13255+#ifdef CONFIG_PAX_MEMORY_UDEREF
13256+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13257+ src += PAX_USER_SHADOW_BASE;
13258+#endif
13259+
13260+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13261 }
13262 }
13263
13264 static __always_inline __must_check
13265-int __copy_to_user(void __user *dst, const void *src, unsigned size)
13266+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
13267 {
13268- int ret = 0;
13269+ size_t sz = __compiletime_object_size(src);
13270+ unsigned ret = 0;
13271
13272 might_fault();
13273- if (!__builtin_constant_p(size))
13274- return copy_user_generic((__force void *)dst, src, size);
13275+
13276+ if (size > INT_MAX)
13277+ return size;
13278+
13279+#ifdef CONFIG_PAX_MEMORY_UDEREF
13280+ if (!__access_ok(VERIFY_WRITE, dst, size))
13281+ return size;
13282+#endif
13283+
13284+ if (unlikely(sz != (size_t)-1 && sz < size)) {
13285+ copy_to_user_overflow();
13286+ return size;
13287+ }
13288+
13289+ if (!__builtin_constant_p(size)) {
13290+ check_object_size(src, size, true);
13291+
13292+#ifdef CONFIG_PAX_MEMORY_UDEREF
13293+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13294+ dst += PAX_USER_SHADOW_BASE;
13295+#endif
13296+
13297+ return copy_user_generic((__force_kernel void *)dst, src, size);
13298+ }
13299 switch (size) {
13300- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13301+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
13302 ret, "b", "b", "iq", 1);
13303 return ret;
13304- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13305+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13306 ret, "w", "w", "ir", 2);
13307 return ret;
13308- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13309+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13310 ret, "l", "k", "ir", 4);
13311 return ret;
13312- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13313+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13314 ret, "q", "", "er", 8);
13315 return ret;
13316 case 10:
13317- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13318+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13319 ret, "q", "", "er", 10);
13320 if (unlikely(ret))
13321 return ret;
13322 asm("":::"memory");
13323- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13324+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13325 ret, "w", "w", "ir", 2);
13326 return ret;
13327 case 16:
13328- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13329+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13330 ret, "q", "", "er", 16);
13331 if (unlikely(ret))
13332 return ret;
13333 asm("":::"memory");
13334- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13335+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
13336 ret, "q", "", "er", 8);
13337 return ret;
13338 default:
13339- return copy_user_generic((__force void *)dst, src, size);
13340+
13341+#ifdef CONFIG_PAX_MEMORY_UDEREF
13342+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13343+ dst += PAX_USER_SHADOW_BASE;
13344+#endif
13345+
13346+ return copy_user_generic((__force_kernel void *)dst, src, size);
13347 }
13348 }
13349
13350 static __always_inline __must_check
13351-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13352+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
13353 {
13354- int ret = 0;
13355+ unsigned ret = 0;
13356
13357 might_fault();
13358- if (!__builtin_constant_p(size))
13359- return copy_user_generic((__force void *)dst,
13360- (__force void *)src, size);
13361+
13362+ if (size > INT_MAX)
13363+ return size;
13364+
13365+#ifdef CONFIG_PAX_MEMORY_UDEREF
13366+ if (!__access_ok(VERIFY_READ, src, size))
13367+ return size;
13368+ if (!__access_ok(VERIFY_WRITE, dst, size))
13369+ return size;
13370+#endif
13371+
13372+ if (!__builtin_constant_p(size)) {
13373+
13374+#ifdef CONFIG_PAX_MEMORY_UDEREF
13375+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13376+ src += PAX_USER_SHADOW_BASE;
13377+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13378+ dst += PAX_USER_SHADOW_BASE;
13379+#endif
13380+
13381+ return copy_user_generic((__force_kernel void *)dst,
13382+ (__force_kernel const void *)src, size);
13383+ }
13384 switch (size) {
13385 case 1: {
13386 u8 tmp;
13387- __get_user_asm(tmp, (u8 __user *)src,
13388+ __get_user_asm(tmp, (const u8 __user *)src,
13389 ret, "b", "b", "=q", 1);
13390 if (likely(!ret))
13391 __put_user_asm(tmp, (u8 __user *)dst,
13392@@ -176,7 +277,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13393 }
13394 case 2: {
13395 u16 tmp;
13396- __get_user_asm(tmp, (u16 __user *)src,
13397+ __get_user_asm(tmp, (const u16 __user *)src,
13398 ret, "w", "w", "=r", 2);
13399 if (likely(!ret))
13400 __put_user_asm(tmp, (u16 __user *)dst,
13401@@ -186,7 +287,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13402
13403 case 4: {
13404 u32 tmp;
13405- __get_user_asm(tmp, (u32 __user *)src,
13406+ __get_user_asm(tmp, (const u32 __user *)src,
13407 ret, "l", "k", "=r", 4);
13408 if (likely(!ret))
13409 __put_user_asm(tmp, (u32 __user *)dst,
13410@@ -195,7 +296,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13411 }
13412 case 8: {
13413 u64 tmp;
13414- __get_user_asm(tmp, (u64 __user *)src,
13415+ __get_user_asm(tmp, (const u64 __user *)src,
13416 ret, "q", "", "=r", 8);
13417 if (likely(!ret))
13418 __put_user_asm(tmp, (u64 __user *)dst,
13419@@ -203,47 +304,92 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13420 return ret;
13421 }
13422 default:
13423- return copy_user_generic((__force void *)dst,
13424- (__force void *)src, size);
13425+
13426+#ifdef CONFIG_PAX_MEMORY_UDEREF
13427+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13428+ src += PAX_USER_SHADOW_BASE;
13429+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13430+ dst += PAX_USER_SHADOW_BASE;
13431+#endif
13432+
13433+ return copy_user_generic((__force_kernel void *)dst,
13434+ (__force_kernel const void *)src, size);
13435 }
13436 }
13437
13438 __must_check long strnlen_user(const char __user *str, long n);
13439 __must_check long __strnlen_user(const char __user *str, long n);
13440 __must_check long strlen_user(const char __user *str);
13441-__must_check unsigned long clear_user(void __user *mem, unsigned long len);
13442-__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13443+__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13444+__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13445
13446 static __must_check __always_inline int
13447-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
13448+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13449 {
13450- return copy_user_generic(dst, (__force const void *)src, size);
13451+ if (size > INT_MAX)
13452+ return size;
13453+
13454+#ifdef CONFIG_PAX_MEMORY_UDEREF
13455+ if (!__access_ok(VERIFY_READ, src, size))
13456+ return size;
13457+
13458+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13459+ src += PAX_USER_SHADOW_BASE;
13460+#endif
13461+
13462+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13463 }
13464
13465-static __must_check __always_inline int
13466-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13467+static __must_check __always_inline unsigned long
13468+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13469 {
13470- return copy_user_generic((__force void *)dst, src, size);
13471+ if (size > INT_MAX)
13472+ return size;
13473+
13474+#ifdef CONFIG_PAX_MEMORY_UDEREF
13475+ if (!__access_ok(VERIFY_WRITE, dst, size))
13476+ return size;
13477+
13478+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13479+ dst += PAX_USER_SHADOW_BASE;
13480+#endif
13481+
13482+ return copy_user_generic((__force_kernel void *)dst, src, size);
13483 }
13484
13485-extern long __copy_user_nocache(void *dst, const void __user *src,
13486- unsigned size, int zerorest);
13487+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13488+ unsigned long size, int zerorest) __size_overflow(3);
13489
13490-static inline int
13491-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13492+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13493 {
13494 might_sleep();
13495+
13496+ if (size > INT_MAX)
13497+ return size;
13498+
13499+#ifdef CONFIG_PAX_MEMORY_UDEREF
13500+ if (!__access_ok(VERIFY_READ, src, size))
13501+ return size;
13502+#endif
13503+
13504 return __copy_user_nocache(dst, src, size, 1);
13505 }
13506
13507-static inline int
13508-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13509- unsigned size)
13510+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13511+ unsigned long size)
13512 {
13513+ if (size > INT_MAX)
13514+ return size;
13515+
13516+#ifdef CONFIG_PAX_MEMORY_UDEREF
13517+ if (!__access_ok(VERIFY_READ, src, size))
13518+ return size;
13519+#endif
13520+
13521 return __copy_user_nocache(dst, src, size, 0);
13522 }
13523
13524-unsigned long
13525-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
13526+extern unsigned long
13527+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
13528
13529 #endif /* _ASM_X86_UACCESS_64_H */
13530diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13531index bb05228..d763d5b 100644
13532--- a/arch/x86/include/asm/vdso.h
13533+++ b/arch/x86/include/asm/vdso.h
13534@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
13535 #define VDSO32_SYMBOL(base, name) \
13536 ({ \
13537 extern const char VDSO32_##name[]; \
13538- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13539+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13540 })
13541 #endif
13542
13543diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13544index 764b66a..ad3cfc8 100644
13545--- a/arch/x86/include/asm/x86_init.h
13546+++ b/arch/x86/include/asm/x86_init.h
13547@@ -29,7 +29,7 @@ struct x86_init_mpparse {
13548 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13549 void (*find_smp_config)(void);
13550 void (*get_smp_config)(unsigned int early);
13551-};
13552+} __no_const;
13553
13554 /**
13555 * struct x86_init_resources - platform specific resource related ops
13556@@ -43,7 +43,7 @@ struct x86_init_resources {
13557 void (*probe_roms)(void);
13558 void (*reserve_resources)(void);
13559 char *(*memory_setup)(void);
13560-};
13561+} __no_const;
13562
13563 /**
13564 * struct x86_init_irqs - platform specific interrupt setup
13565@@ -56,7 +56,7 @@ struct x86_init_irqs {
13566 void (*pre_vector_init)(void);
13567 void (*intr_init)(void);
13568 void (*trap_init)(void);
13569-};
13570+} __no_const;
13571
13572 /**
13573 * struct x86_init_oem - oem platform specific customizing functions
13574@@ -66,7 +66,7 @@ struct x86_init_irqs {
13575 struct x86_init_oem {
13576 void (*arch_setup)(void);
13577 void (*banner)(void);
13578-};
13579+} __no_const;
13580
13581 /**
13582 * struct x86_init_mapping - platform specific initial kernel pagetable setup
13583@@ -77,7 +77,7 @@ struct x86_init_oem {
13584 */
13585 struct x86_init_mapping {
13586 void (*pagetable_reserve)(u64 start, u64 end);
13587-};
13588+} __no_const;
13589
13590 /**
13591 * struct x86_init_paging - platform specific paging functions
13592@@ -87,7 +87,7 @@ struct x86_init_mapping {
13593 struct x86_init_paging {
13594 void (*pagetable_setup_start)(pgd_t *base);
13595 void (*pagetable_setup_done)(pgd_t *base);
13596-};
13597+} __no_const;
13598
13599 /**
13600 * struct x86_init_timers - platform specific timer setup
13601@@ -102,7 +102,7 @@ struct x86_init_timers {
13602 void (*tsc_pre_init)(void);
13603 void (*timer_init)(void);
13604 void (*wallclock_init)(void);
13605-};
13606+} __no_const;
13607
13608 /**
13609 * struct x86_init_iommu - platform specific iommu setup
13610@@ -110,7 +110,7 @@ struct x86_init_timers {
13611 */
13612 struct x86_init_iommu {
13613 int (*iommu_init)(void);
13614-};
13615+} __no_const;
13616
13617 /**
13618 * struct x86_init_pci - platform specific pci init functions
13619@@ -124,7 +124,7 @@ struct x86_init_pci {
13620 int (*init)(void);
13621 void (*init_irq)(void);
13622 void (*fixup_irqs)(void);
13623-};
13624+} __no_const;
13625
13626 /**
13627 * struct x86_init_ops - functions for platform specific setup
13628@@ -140,7 +140,7 @@ struct x86_init_ops {
13629 struct x86_init_timers timers;
13630 struct x86_init_iommu iommu;
13631 struct x86_init_pci pci;
13632-};
13633+} __no_const;
13634
13635 /**
13636 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13637@@ -151,7 +151,7 @@ struct x86_cpuinit_ops {
13638 void (*setup_percpu_clockev)(void);
13639 void (*early_percpu_clock_init)(void);
13640 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
13641-};
13642+} __no_const;
13643
13644 /**
13645 * struct x86_platform_ops - platform specific runtime functions
13646@@ -177,7 +177,7 @@ struct x86_platform_ops {
13647 int (*i8042_detect)(void);
13648 void (*save_sched_clock_state)(void);
13649 void (*restore_sched_clock_state)(void);
13650-};
13651+} __no_const;
13652
13653 struct pci_dev;
13654
13655@@ -186,7 +186,7 @@ struct x86_msi_ops {
13656 void (*teardown_msi_irq)(unsigned int irq);
13657 void (*teardown_msi_irqs)(struct pci_dev *dev);
13658 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
13659-};
13660+} __no_const;
13661
13662 extern struct x86_init_ops x86_init;
13663 extern struct x86_cpuinit_ops x86_cpuinit;
13664diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13665index c6ce245..ffbdab7 100644
13666--- a/arch/x86/include/asm/xsave.h
13667+++ b/arch/x86/include/asm/xsave.h
13668@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13669 {
13670 int err;
13671
13672+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13673+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13674+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13675+#endif
13676+
13677 /*
13678 * Clear the xsave header first, so that reserved fields are
13679 * initialized to zero.
13680@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13681 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13682 {
13683 int err;
13684- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13685+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13686 u32 lmask = mask;
13687 u32 hmask = mask >> 32;
13688
13689+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13690+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13691+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13692+#endif
13693+
13694 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13695 "2:\n"
13696 ".section .fixup,\"ax\"\n"
13697diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13698index 6a564ac..9b1340c 100644
13699--- a/arch/x86/kernel/acpi/realmode/Makefile
13700+++ b/arch/x86/kernel/acpi/realmode/Makefile
13701@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13702 $(call cc-option, -fno-stack-protector) \
13703 $(call cc-option, -mpreferred-stack-boundary=2)
13704 KBUILD_CFLAGS += $(call cc-option, -m32)
13705+ifdef CONSTIFY_PLUGIN
13706+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13707+endif
13708 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13709 GCOV_PROFILE := n
13710
13711diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13712index b4fd836..4358fe3 100644
13713--- a/arch/x86/kernel/acpi/realmode/wakeup.S
13714+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13715@@ -108,6 +108,9 @@ wakeup_code:
13716 /* Do any other stuff... */
13717
13718 #ifndef CONFIG_64BIT
13719+ /* Recheck NX bit overrides (64bit path does this in trampoline */
13720+ call verify_cpu
13721+
13722 /* This could also be done in C code... */
13723 movl pmode_cr3, %eax
13724 movl %eax, %cr3
13725@@ -131,6 +134,7 @@ wakeup_code:
13726 movl pmode_cr0, %eax
13727 movl %eax, %cr0
13728 jmp pmode_return
13729+# include "../../verify_cpu.S"
13730 #else
13731 pushw $0
13732 pushw trampoline_segment
13733diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13734index 146a49c..1b5338b 100644
13735--- a/arch/x86/kernel/acpi/sleep.c
13736+++ b/arch/x86/kernel/acpi/sleep.c
13737@@ -98,8 +98,12 @@ int acpi_suspend_lowlevel(void)
13738 header->trampoline_segment = trampoline_address() >> 4;
13739 #ifdef CONFIG_SMP
13740 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13741+
13742+ pax_open_kernel();
13743 early_gdt_descr.address =
13744 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13745+ pax_close_kernel();
13746+
13747 initial_gs = per_cpu_offset(smp_processor_id());
13748 #endif
13749 initial_code = (unsigned long)wakeup_long64;
13750diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13751index 7261083..5c12053 100644
13752--- a/arch/x86/kernel/acpi/wakeup_32.S
13753+++ b/arch/x86/kernel/acpi/wakeup_32.S
13754@@ -30,13 +30,11 @@ wakeup_pmode_return:
13755 # and restore the stack ... but you need gdt for this to work
13756 movl saved_context_esp, %esp
13757
13758- movl %cs:saved_magic, %eax
13759- cmpl $0x12345678, %eax
13760+ cmpl $0x12345678, saved_magic
13761 jne bogus_magic
13762
13763 # jump to place where we left off
13764- movl saved_eip, %eax
13765- jmp *%eax
13766+ jmp *(saved_eip)
13767
13768 bogus_magic:
13769 jmp bogus_magic
13770diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13771index 1f84794..e23f862 100644
13772--- a/arch/x86/kernel/alternative.c
13773+++ b/arch/x86/kernel/alternative.c
13774@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
13775 */
13776 for (a = start; a < end; a++) {
13777 instr = (u8 *)&a->instr_offset + a->instr_offset;
13778+
13779+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13780+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13781+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13782+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13783+#endif
13784+
13785 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13786 BUG_ON(a->replacementlen > a->instrlen);
13787 BUG_ON(a->instrlen > sizeof(insnbuf));
13788@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
13789 for (poff = start; poff < end; poff++) {
13790 u8 *ptr = (u8 *)poff + *poff;
13791
13792+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13793+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13794+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13795+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13796+#endif
13797+
13798 if (!*poff || ptr < text || ptr >= text_end)
13799 continue;
13800 /* turn DS segment override prefix into lock prefix */
13801- if (*ptr == 0x3e)
13802+ if (*ktla_ktva(ptr) == 0x3e)
13803 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13804 };
13805 mutex_unlock(&text_mutex);
13806@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
13807 for (poff = start; poff < end; poff++) {
13808 u8 *ptr = (u8 *)poff + *poff;
13809
13810+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13811+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13812+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13813+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13814+#endif
13815+
13816 if (!*poff || ptr < text || ptr >= text_end)
13817 continue;
13818 /* turn lock prefix into DS segment override prefix */
13819- if (*ptr == 0xf0)
13820+ if (*ktla_ktva(ptr) == 0xf0)
13821 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13822 };
13823 mutex_unlock(&text_mutex);
13824@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13825
13826 BUG_ON(p->len > MAX_PATCH_LEN);
13827 /* prep the buffer with the original instructions */
13828- memcpy(insnbuf, p->instr, p->len);
13829+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13830 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13831 (unsigned long)p->instr, p->len);
13832
13833@@ -568,7 +587,7 @@ void __init alternative_instructions(void)
13834 if (smp_alt_once)
13835 free_init_pages("SMP alternatives",
13836 (unsigned long)__smp_locks,
13837- (unsigned long)__smp_locks_end);
13838+ PAGE_ALIGN((unsigned long)__smp_locks_end));
13839
13840 restart_nmi();
13841 }
13842@@ -585,13 +604,17 @@ void __init alternative_instructions(void)
13843 * instructions. And on the local CPU you need to be protected again NMI or MCE
13844 * handlers seeing an inconsistent instruction while you patch.
13845 */
13846-void *__init_or_module text_poke_early(void *addr, const void *opcode,
13847+void *__kprobes text_poke_early(void *addr, const void *opcode,
13848 size_t len)
13849 {
13850 unsigned long flags;
13851 local_irq_save(flags);
13852- memcpy(addr, opcode, len);
13853+
13854+ pax_open_kernel();
13855+ memcpy(ktla_ktva(addr), opcode, len);
13856 sync_core();
13857+ pax_close_kernel();
13858+
13859 local_irq_restore(flags);
13860 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13861 that causes hangs on some VIA CPUs. */
13862@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
13863 */
13864 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13865 {
13866- unsigned long flags;
13867- char *vaddr;
13868+ unsigned char *vaddr = ktla_ktva(addr);
13869 struct page *pages[2];
13870- int i;
13871+ size_t i;
13872
13873 if (!core_kernel_text((unsigned long)addr)) {
13874- pages[0] = vmalloc_to_page(addr);
13875- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13876+ pages[0] = vmalloc_to_page(vaddr);
13877+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13878 } else {
13879- pages[0] = virt_to_page(addr);
13880+ pages[0] = virt_to_page(vaddr);
13881 WARN_ON(!PageReserved(pages[0]));
13882- pages[1] = virt_to_page(addr + PAGE_SIZE);
13883+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13884 }
13885 BUG_ON(!pages[0]);
13886- local_irq_save(flags);
13887- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13888- if (pages[1])
13889- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13890- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13891- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13892- clear_fixmap(FIX_TEXT_POKE0);
13893- if (pages[1])
13894- clear_fixmap(FIX_TEXT_POKE1);
13895- local_flush_tlb();
13896- sync_core();
13897- /* Could also do a CLFLUSH here to speed up CPU recovery; but
13898- that causes hangs on some VIA CPUs. */
13899+ text_poke_early(addr, opcode, len);
13900 for (i = 0; i < len; i++)
13901- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13902- local_irq_restore(flags);
13903+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13904 return addr;
13905 }
13906
13907diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13908index edc2448..553e7c5 100644
13909--- a/arch/x86/kernel/apic/apic.c
13910+++ b/arch/x86/kernel/apic/apic.c
13911@@ -184,7 +184,7 @@ int first_system_vector = 0xfe;
13912 /*
13913 * Debug level, exported for io_apic.c
13914 */
13915-unsigned int apic_verbosity;
13916+int apic_verbosity;
13917
13918 int pic_mode;
13919
13920@@ -1917,7 +1917,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13921 apic_write(APIC_ESR, 0);
13922 v1 = apic_read(APIC_ESR);
13923 ack_APIC_irq();
13924- atomic_inc(&irq_err_count);
13925+ atomic_inc_unchecked(&irq_err_count);
13926
13927 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
13928 smp_processor_id(), v0 , v1);
13929diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13930index e88300d..cd5a87a 100644
13931--- a/arch/x86/kernel/apic/io_apic.c
13932+++ b/arch/x86/kernel/apic/io_apic.c
13933@@ -83,7 +83,9 @@ static struct io_apic_ops io_apic_ops = {
13934
13935 void __init set_io_apic_ops(const struct io_apic_ops *ops)
13936 {
13937- io_apic_ops = *ops;
13938+ pax_open_kernel();
13939+ memcpy((void*)&io_apic_ops, ops, sizeof io_apic_ops);
13940+ pax_close_kernel();
13941 }
13942
13943 /*
13944@@ -1135,7 +1137,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13945 }
13946 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13947
13948-void lock_vector_lock(void)
13949+void lock_vector_lock(void) __acquires(vector_lock)
13950 {
13951 /* Used to the online set of cpus does not change
13952 * during assign_irq_vector.
13953@@ -1143,7 +1145,7 @@ void lock_vector_lock(void)
13954 raw_spin_lock(&vector_lock);
13955 }
13956
13957-void unlock_vector_lock(void)
13958+void unlock_vector_lock(void) __releases(vector_lock)
13959 {
13960 raw_spin_unlock(&vector_lock);
13961 }
13962@@ -2549,7 +2551,7 @@ static void ack_apic_edge(struct irq_data *data)
13963 ack_APIC_irq();
13964 }
13965
13966-atomic_t irq_mis_count;
13967+atomic_unchecked_t irq_mis_count;
13968
13969 #ifdef CONFIG_GENERIC_PENDING_IRQ
13970 static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
13971@@ -2667,7 +2669,7 @@ static void ack_apic_level(struct irq_data *data)
13972 * at the cpu.
13973 */
13974 if (!(v & (1 << (i & 0x1f)))) {
13975- atomic_inc(&irq_mis_count);
13976+ atomic_inc_unchecked(&irq_mis_count);
13977
13978 eoi_ioapic_irq(irq, cfg);
13979 }
13980diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13981index 459e78c..f037006 100644
13982--- a/arch/x86/kernel/apm_32.c
13983+++ b/arch/x86/kernel/apm_32.c
13984@@ -410,7 +410,7 @@ static DEFINE_MUTEX(apm_mutex);
13985 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13986 * even though they are called in protected mode.
13987 */
13988-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13989+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13990 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13991
13992 static const char driver_version[] = "1.16ac"; /* no spaces */
13993@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13994 BUG_ON(cpu != 0);
13995 gdt = get_cpu_gdt_table(cpu);
13996 save_desc_40 = gdt[0x40 / 8];
13997+
13998+ pax_open_kernel();
13999 gdt[0x40 / 8] = bad_bios_desc;
14000+ pax_close_kernel();
14001
14002 apm_irq_save(flags);
14003 APM_DO_SAVE_SEGS;
14004@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
14005 &call->esi);
14006 APM_DO_RESTORE_SEGS;
14007 apm_irq_restore(flags);
14008+
14009+ pax_open_kernel();
14010 gdt[0x40 / 8] = save_desc_40;
14011+ pax_close_kernel();
14012+
14013 put_cpu();
14014
14015 return call->eax & 0xff;
14016@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
14017 BUG_ON(cpu != 0);
14018 gdt = get_cpu_gdt_table(cpu);
14019 save_desc_40 = gdt[0x40 / 8];
14020+
14021+ pax_open_kernel();
14022 gdt[0x40 / 8] = bad_bios_desc;
14023+ pax_close_kernel();
14024
14025 apm_irq_save(flags);
14026 APM_DO_SAVE_SEGS;
14027@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
14028 &call->eax);
14029 APM_DO_RESTORE_SEGS;
14030 apm_irq_restore(flags);
14031+
14032+ pax_open_kernel();
14033 gdt[0x40 / 8] = save_desc_40;
14034+ pax_close_kernel();
14035+
14036 put_cpu();
14037 return error;
14038 }
14039@@ -2345,12 +2359,15 @@ static int __init apm_init(void)
14040 * code to that CPU.
14041 */
14042 gdt = get_cpu_gdt_table(0);
14043+
14044+ pax_open_kernel();
14045 set_desc_base(&gdt[APM_CS >> 3],
14046 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
14047 set_desc_base(&gdt[APM_CS_16 >> 3],
14048 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
14049 set_desc_base(&gdt[APM_DS >> 3],
14050 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
14051+ pax_close_kernel();
14052
14053 proc_create("apm", 0, NULL, &apm_file_ops);
14054
14055diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
14056index 68de2dc..1f3c720 100644
14057--- a/arch/x86/kernel/asm-offsets.c
14058+++ b/arch/x86/kernel/asm-offsets.c
14059@@ -33,6 +33,8 @@ void common(void) {
14060 OFFSET(TI_status, thread_info, status);
14061 OFFSET(TI_addr_limit, thread_info, addr_limit);
14062 OFFSET(TI_preempt_count, thread_info, preempt_count);
14063+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
14064+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14065
14066 BLANK();
14067 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
14068@@ -53,8 +55,26 @@ void common(void) {
14069 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14070 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14071 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
14072+
14073+#ifdef CONFIG_PAX_KERNEXEC
14074+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14075 #endif
14076
14077+#ifdef CONFIG_PAX_MEMORY_UDEREF
14078+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
14079+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
14080+#ifdef CONFIG_X86_64
14081+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
14082+#endif
14083+#endif
14084+
14085+#endif
14086+
14087+ BLANK();
14088+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14089+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
14090+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14091+
14092 #ifdef CONFIG_XEN
14093 BLANK();
14094 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
14095diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
14096index 1b4754f..fbb4227 100644
14097--- a/arch/x86/kernel/asm-offsets_64.c
14098+++ b/arch/x86/kernel/asm-offsets_64.c
14099@@ -76,6 +76,7 @@ int main(void)
14100 BLANK();
14101 #undef ENTRY
14102
14103+ DEFINE(TSS_size, sizeof(struct tss_struct));
14104 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
14105 BLANK();
14106
14107diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
14108index 6ab6aa2..8f71507 100644
14109--- a/arch/x86/kernel/cpu/Makefile
14110+++ b/arch/x86/kernel/cpu/Makefile
14111@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
14112 CFLAGS_REMOVE_perf_event.o = -pg
14113 endif
14114
14115-# Make sure load_percpu_segment has no stackprotector
14116-nostackp := $(call cc-option, -fno-stack-protector)
14117-CFLAGS_common.o := $(nostackp)
14118-
14119 obj-y := intel_cacheinfo.o scattered.o topology.o
14120 obj-y += proc.o capflags.o powerflags.o common.o
14121 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
14122diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
14123index 146bb62..ac9c74a 100644
14124--- a/arch/x86/kernel/cpu/amd.c
14125+++ b/arch/x86/kernel/cpu/amd.c
14126@@ -691,7 +691,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
14127 unsigned int size)
14128 {
14129 /* AMD errata T13 (order #21922) */
14130- if ((c->x86 == 6)) {
14131+ if (c->x86 == 6) {
14132 /* Duron Rev A0 */
14133 if (c->x86_model == 3 && c->x86_mask == 0)
14134 size = 64;
14135diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
14136index cf79302..b1b28ae 100644
14137--- a/arch/x86/kernel/cpu/common.c
14138+++ b/arch/x86/kernel/cpu/common.c
14139@@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
14140
14141 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14142
14143-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14144-#ifdef CONFIG_X86_64
14145- /*
14146- * We need valid kernel segments for data and code in long mode too
14147- * IRET will check the segment types kkeil 2000/10/28
14148- * Also sysret mandates a special GDT layout
14149- *
14150- * TLS descriptors are currently at a different place compared to i386.
14151- * Hopefully nobody expects them at a fixed place (Wine?)
14152- */
14153- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14154- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14155- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14156- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14157- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14158- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
14159-#else
14160- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14161- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14162- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14163- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
14164- /*
14165- * Segments used for calling PnP BIOS have byte granularity.
14166- * They code segments and data segments have fixed 64k limits,
14167- * the transfer segment sizes are set at run time.
14168- */
14169- /* 32-bit code */
14170- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14171- /* 16-bit code */
14172- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14173- /* 16-bit data */
14174- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
14175- /* 16-bit data */
14176- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
14177- /* 16-bit data */
14178- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
14179- /*
14180- * The APM segments have byte granularity and their bases
14181- * are set at run time. All have 64k limits.
14182- */
14183- /* 32-bit code */
14184- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14185- /* 16-bit code */
14186- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14187- /* data */
14188- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
14189-
14190- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14191- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14192- GDT_STACK_CANARY_INIT
14193-#endif
14194-} };
14195-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
14196-
14197 static int __init x86_xsave_setup(char *s)
14198 {
14199 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
14200@@ -374,7 +320,7 @@ void switch_to_new_gdt(int cpu)
14201 {
14202 struct desc_ptr gdt_descr;
14203
14204- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
14205+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14206 gdt_descr.size = GDT_SIZE - 1;
14207 load_gdt(&gdt_descr);
14208 /* Reload the per-cpu base */
14209@@ -841,6 +787,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
14210 /* Filter out anything that depends on CPUID levels we don't have */
14211 filter_cpuid_features(c, true);
14212
14213+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14214+ setup_clear_cpu_cap(X86_FEATURE_SEP);
14215+#endif
14216+
14217 /* If the model name is still unset, do table lookup. */
14218 if (!c->x86_model_id[0]) {
14219 const char *p;
14220@@ -1021,10 +971,12 @@ static __init int setup_disablecpuid(char *arg)
14221 }
14222 __setup("clearcpuid=", setup_disablecpuid);
14223
14224+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
14225+EXPORT_PER_CPU_SYMBOL(current_tinfo);
14226+
14227 #ifdef CONFIG_X86_64
14228 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
14229-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
14230- (unsigned long) nmi_idt_table };
14231+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
14232
14233 DEFINE_PER_CPU_FIRST(union irq_stack_union,
14234 irq_stack_union) __aligned(PAGE_SIZE);
14235@@ -1038,7 +990,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
14236 EXPORT_PER_CPU_SYMBOL(current_task);
14237
14238 DEFINE_PER_CPU(unsigned long, kernel_stack) =
14239- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
14240+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
14241 EXPORT_PER_CPU_SYMBOL(kernel_stack);
14242
14243 DEFINE_PER_CPU(char *, irq_stack_ptr) =
14244@@ -1126,7 +1078,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
14245 {
14246 memset(regs, 0, sizeof(struct pt_regs));
14247 regs->fs = __KERNEL_PERCPU;
14248- regs->gs = __KERNEL_STACK_CANARY;
14249+ savesegment(gs, regs->gs);
14250
14251 return regs;
14252 }
14253@@ -1181,7 +1133,7 @@ void __cpuinit cpu_init(void)
14254 int i;
14255
14256 cpu = stack_smp_processor_id();
14257- t = &per_cpu(init_tss, cpu);
14258+ t = init_tss + cpu;
14259 oist = &per_cpu(orig_ist, cpu);
14260
14261 #ifdef CONFIG_NUMA
14262@@ -1207,7 +1159,7 @@ void __cpuinit cpu_init(void)
14263 switch_to_new_gdt(cpu);
14264 loadsegment(fs, 0);
14265
14266- load_idt((const struct desc_ptr *)&idt_descr);
14267+ load_idt(&idt_descr);
14268
14269 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
14270 syscall_init();
14271@@ -1216,7 +1168,6 @@ void __cpuinit cpu_init(void)
14272 wrmsrl(MSR_KERNEL_GS_BASE, 0);
14273 barrier();
14274
14275- x86_configure_nx();
14276 if (cpu != 0)
14277 enable_x2apic();
14278
14279@@ -1272,7 +1223,7 @@ void __cpuinit cpu_init(void)
14280 {
14281 int cpu = smp_processor_id();
14282 struct task_struct *curr = current;
14283- struct tss_struct *t = &per_cpu(init_tss, cpu);
14284+ struct tss_struct *t = init_tss + cpu;
14285 struct thread_struct *thread = &curr->thread;
14286
14287 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
14288diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
14289index 3e6ff6c..54b4992 100644
14290--- a/arch/x86/kernel/cpu/intel.c
14291+++ b/arch/x86/kernel/cpu/intel.c
14292@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
14293 * Update the IDT descriptor and reload the IDT so that
14294 * it uses the read-only mapped virtual address.
14295 */
14296- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14297+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14298 load_idt(&idt_descr);
14299 }
14300 #endif
14301diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14302index 61604ae..98250a5 100644
14303--- a/arch/x86/kernel/cpu/mcheck/mce.c
14304+++ b/arch/x86/kernel/cpu/mcheck/mce.c
14305@@ -42,6 +42,7 @@
14306 #include <asm/processor.h>
14307 #include <asm/mce.h>
14308 #include <asm/msr.h>
14309+#include <asm/local.h>
14310
14311 #include "mce-internal.h"
14312
14313@@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
14314 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14315 m->cs, m->ip);
14316
14317- if (m->cs == __KERNEL_CS)
14318+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14319 print_symbol("{%s}", m->ip);
14320 pr_cont("\n");
14321 }
14322@@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
14323
14324 #define PANIC_TIMEOUT 5 /* 5 seconds */
14325
14326-static atomic_t mce_paniced;
14327+static atomic_unchecked_t mce_paniced;
14328
14329 static int fake_panic;
14330-static atomic_t mce_fake_paniced;
14331+static atomic_unchecked_t mce_fake_paniced;
14332
14333 /* Panic in progress. Enable interrupts and wait for final IPI */
14334 static void wait_for_panic(void)
14335@@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14336 /*
14337 * Make sure only one CPU runs in machine check panic
14338 */
14339- if (atomic_inc_return(&mce_paniced) > 1)
14340+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14341 wait_for_panic();
14342 barrier();
14343
14344@@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14345 console_verbose();
14346 } else {
14347 /* Don't log too much for fake panic */
14348- if (atomic_inc_return(&mce_fake_paniced) > 1)
14349+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14350 return;
14351 }
14352 /* First print corrected ones that are still unlogged */
14353@@ -684,7 +685,7 @@ static int mce_timed_out(u64 *t)
14354 * might have been modified by someone else.
14355 */
14356 rmb();
14357- if (atomic_read(&mce_paniced))
14358+ if (atomic_read_unchecked(&mce_paniced))
14359 wait_for_panic();
14360 if (!monarch_timeout)
14361 goto out;
14362@@ -1535,7 +1536,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14363 }
14364
14365 /* Call the installed machine check handler for this CPU setup. */
14366-void (*machine_check_vector)(struct pt_regs *, long error_code) =
14367+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14368 unexpected_machine_check;
14369
14370 /*
14371@@ -1558,7 +1559,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14372 return;
14373 }
14374
14375+ pax_open_kernel();
14376 machine_check_vector = do_machine_check;
14377+ pax_close_kernel();
14378
14379 __mcheck_cpu_init_generic();
14380 __mcheck_cpu_init_vendor(c);
14381@@ -1572,7 +1575,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14382 */
14383
14384 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
14385-static int mce_chrdev_open_count; /* #times opened */
14386+static local_t mce_chrdev_open_count; /* #times opened */
14387 static int mce_chrdev_open_exclu; /* already open exclusive? */
14388
14389 static int mce_chrdev_open(struct inode *inode, struct file *file)
14390@@ -1580,7 +1583,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14391 spin_lock(&mce_chrdev_state_lock);
14392
14393 if (mce_chrdev_open_exclu ||
14394- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
14395+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
14396 spin_unlock(&mce_chrdev_state_lock);
14397
14398 return -EBUSY;
14399@@ -1588,7 +1591,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14400
14401 if (file->f_flags & O_EXCL)
14402 mce_chrdev_open_exclu = 1;
14403- mce_chrdev_open_count++;
14404+ local_inc(&mce_chrdev_open_count);
14405
14406 spin_unlock(&mce_chrdev_state_lock);
14407
14408@@ -1599,7 +1602,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
14409 {
14410 spin_lock(&mce_chrdev_state_lock);
14411
14412- mce_chrdev_open_count--;
14413+ local_dec(&mce_chrdev_open_count);
14414 mce_chrdev_open_exclu = 0;
14415
14416 spin_unlock(&mce_chrdev_state_lock);
14417@@ -2324,7 +2327,7 @@ struct dentry *mce_get_debugfs_dir(void)
14418 static void mce_reset(void)
14419 {
14420 cpu_missing = 0;
14421- atomic_set(&mce_fake_paniced, 0);
14422+ atomic_set_unchecked(&mce_fake_paniced, 0);
14423 atomic_set(&mce_executing, 0);
14424 atomic_set(&mce_callin, 0);
14425 atomic_set(&global_nwo, 0);
14426diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
14427index 2d5454c..51987eb 100644
14428--- a/arch/x86/kernel/cpu/mcheck/p5.c
14429+++ b/arch/x86/kernel/cpu/mcheck/p5.c
14430@@ -11,6 +11,7 @@
14431 #include <asm/processor.h>
14432 #include <asm/mce.h>
14433 #include <asm/msr.h>
14434+#include <asm/pgtable.h>
14435
14436 /* By default disabled */
14437 int mce_p5_enabled __read_mostly;
14438@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
14439 if (!cpu_has(c, X86_FEATURE_MCE))
14440 return;
14441
14442+ pax_open_kernel();
14443 machine_check_vector = pentium_machine_check;
14444+ pax_close_kernel();
14445 /* Make sure the vector pointer is visible before we enable MCEs: */
14446 wmb();
14447
14448diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
14449index 2d7998f..17c9de1 100644
14450--- a/arch/x86/kernel/cpu/mcheck/winchip.c
14451+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
14452@@ -10,6 +10,7 @@
14453 #include <asm/processor.h>
14454 #include <asm/mce.h>
14455 #include <asm/msr.h>
14456+#include <asm/pgtable.h>
14457
14458 /* Machine check handler for WinChip C6: */
14459 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14460@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
14461 {
14462 u32 lo, hi;
14463
14464+ pax_open_kernel();
14465 machine_check_vector = winchip_machine_check;
14466+ pax_close_kernel();
14467 /* Make sure the vector pointer is visible before we enable MCEs: */
14468 wmb();
14469
14470diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14471index 6b96110..0da73eb 100644
14472--- a/arch/x86/kernel/cpu/mtrr/main.c
14473+++ b/arch/x86/kernel/cpu/mtrr/main.c
14474@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
14475 u64 size_or_mask, size_and_mask;
14476 static bool mtrr_aps_delayed_init;
14477
14478-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14479+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14480
14481 const struct mtrr_ops *mtrr_if;
14482
14483diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14484index df5e41f..816c719 100644
14485--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14486+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14487@@ -25,7 +25,7 @@ struct mtrr_ops {
14488 int (*validate_add_page)(unsigned long base, unsigned long size,
14489 unsigned int type);
14490 int (*have_wrcomb)(void);
14491-};
14492+} __do_const;
14493
14494 extern int generic_get_free_region(unsigned long base, unsigned long size,
14495 int replace_reg);
14496diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14497index bb8e034..fb9020b 100644
14498--- a/arch/x86/kernel/cpu/perf_event.c
14499+++ b/arch/x86/kernel/cpu/perf_event.c
14500@@ -1835,7 +1835,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
14501 break;
14502
14503 perf_callchain_store(entry, frame.return_address);
14504- fp = frame.next_frame;
14505+ fp = (const void __force_user *)frame.next_frame;
14506 }
14507 }
14508
14509diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14510index 13ad899..f642b9a 100644
14511--- a/arch/x86/kernel/crash.c
14512+++ b/arch/x86/kernel/crash.c
14513@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
14514 {
14515 #ifdef CONFIG_X86_32
14516 struct pt_regs fixed_regs;
14517-#endif
14518
14519-#ifdef CONFIG_X86_32
14520- if (!user_mode_vm(regs)) {
14521+ if (!user_mode(regs)) {
14522 crash_fixup_ss_esp(&fixed_regs, regs);
14523 regs = &fixed_regs;
14524 }
14525diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14526index 37250fe..bf2ec74 100644
14527--- a/arch/x86/kernel/doublefault_32.c
14528+++ b/arch/x86/kernel/doublefault_32.c
14529@@ -11,7 +11,7 @@
14530
14531 #define DOUBLEFAULT_STACKSIZE (1024)
14532 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14533-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14534+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14535
14536 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14537
14538@@ -21,7 +21,7 @@ static void doublefault_fn(void)
14539 unsigned long gdt, tss;
14540
14541 store_gdt(&gdt_desc);
14542- gdt = gdt_desc.address;
14543+ gdt = (unsigned long)gdt_desc.address;
14544
14545 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14546
14547@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14548 /* 0x2 bit is always set */
14549 .flags = X86_EFLAGS_SF | 0x2,
14550 .sp = STACK_START,
14551- .es = __USER_DS,
14552+ .es = __KERNEL_DS,
14553 .cs = __KERNEL_CS,
14554 .ss = __KERNEL_DS,
14555- .ds = __USER_DS,
14556+ .ds = __KERNEL_DS,
14557 .fs = __KERNEL_PERCPU,
14558
14559 .__cr3 = __pa_nodebug(swapper_pg_dir),
14560diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14561index 1b81839..0b4e7b0 100644
14562--- a/arch/x86/kernel/dumpstack.c
14563+++ b/arch/x86/kernel/dumpstack.c
14564@@ -2,6 +2,9 @@
14565 * Copyright (C) 1991, 1992 Linus Torvalds
14566 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14567 */
14568+#ifdef CONFIG_GRKERNSEC_HIDESYM
14569+#define __INCLUDED_BY_HIDESYM 1
14570+#endif
14571 #include <linux/kallsyms.h>
14572 #include <linux/kprobes.h>
14573 #include <linux/uaccess.h>
14574@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
14575 static void
14576 print_ftrace_graph_addr(unsigned long addr, void *data,
14577 const struct stacktrace_ops *ops,
14578- struct thread_info *tinfo, int *graph)
14579+ struct task_struct *task, int *graph)
14580 {
14581- struct task_struct *task;
14582 unsigned long ret_addr;
14583 int index;
14584
14585 if (addr != (unsigned long)return_to_handler)
14586 return;
14587
14588- task = tinfo->task;
14589 index = task->curr_ret_stack;
14590
14591 if (!task->ret_stack || index < *graph)
14592@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14593 static inline void
14594 print_ftrace_graph_addr(unsigned long addr, void *data,
14595 const struct stacktrace_ops *ops,
14596- struct thread_info *tinfo, int *graph)
14597+ struct task_struct *task, int *graph)
14598 { }
14599 #endif
14600
14601@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14602 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14603 */
14604
14605-static inline int valid_stack_ptr(struct thread_info *tinfo,
14606- void *p, unsigned int size, void *end)
14607+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14608 {
14609- void *t = tinfo;
14610 if (end) {
14611 if (p < end && p >= (end-THREAD_SIZE))
14612 return 1;
14613@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14614 }
14615
14616 unsigned long
14617-print_context_stack(struct thread_info *tinfo,
14618+print_context_stack(struct task_struct *task, void *stack_start,
14619 unsigned long *stack, unsigned long bp,
14620 const struct stacktrace_ops *ops, void *data,
14621 unsigned long *end, int *graph)
14622 {
14623 struct stack_frame *frame = (struct stack_frame *)bp;
14624
14625- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14626+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14627 unsigned long addr;
14628
14629 addr = *stack;
14630@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
14631 } else {
14632 ops->address(data, addr, 0);
14633 }
14634- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14635+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14636 }
14637 stack++;
14638 }
14639@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
14640 EXPORT_SYMBOL_GPL(print_context_stack);
14641
14642 unsigned long
14643-print_context_stack_bp(struct thread_info *tinfo,
14644+print_context_stack_bp(struct task_struct *task, void *stack_start,
14645 unsigned long *stack, unsigned long bp,
14646 const struct stacktrace_ops *ops, void *data,
14647 unsigned long *end, int *graph)
14648@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14649 struct stack_frame *frame = (struct stack_frame *)bp;
14650 unsigned long *ret_addr = &frame->return_address;
14651
14652- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
14653+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
14654 unsigned long addr = *ret_addr;
14655
14656 if (!__kernel_text_address(addr))
14657@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14658 ops->address(data, addr, 1);
14659 frame = frame->next_frame;
14660 ret_addr = &frame->return_address;
14661- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14662+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14663 }
14664
14665 return (unsigned long)frame;
14666@@ -189,7 +188,7 @@ void dump_stack(void)
14667
14668 bp = stack_frame(current, NULL);
14669 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14670- current->pid, current->comm, print_tainted(),
14671+ task_pid_nr(current), current->comm, print_tainted(),
14672 init_utsname()->release,
14673 (int)strcspn(init_utsname()->version, " "),
14674 init_utsname()->version);
14675@@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
14676 }
14677 EXPORT_SYMBOL_GPL(oops_begin);
14678
14679+extern void gr_handle_kernel_exploit(void);
14680+
14681 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14682 {
14683 if (regs && kexec_should_crash(current))
14684@@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14685 panic("Fatal exception in interrupt");
14686 if (panic_on_oops)
14687 panic("Fatal exception");
14688- do_exit(signr);
14689+
14690+ gr_handle_kernel_exploit();
14691+
14692+ do_group_exit(signr);
14693 }
14694
14695 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14696@@ -273,7 +277,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14697
14698 show_registers(regs);
14699 #ifdef CONFIG_X86_32
14700- if (user_mode_vm(regs)) {
14701+ if (user_mode(regs)) {
14702 sp = regs->sp;
14703 ss = regs->ss & 0xffff;
14704 } else {
14705@@ -301,7 +305,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14706 unsigned long flags = oops_begin();
14707 int sig = SIGSEGV;
14708
14709- if (!user_mode_vm(regs))
14710+ if (!user_mode(regs))
14711 report_bug(regs->ip, regs);
14712
14713 if (__die(str, regs, err))
14714diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14715index 88ec912..e95e935 100644
14716--- a/arch/x86/kernel/dumpstack_32.c
14717+++ b/arch/x86/kernel/dumpstack_32.c
14718@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14719 bp = stack_frame(task, regs);
14720
14721 for (;;) {
14722- struct thread_info *context;
14723+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14724
14725- context = (struct thread_info *)
14726- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14727- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14728+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14729
14730- stack = (unsigned long *)context->previous_esp;
14731- if (!stack)
14732+ if (stack_start == task_stack_page(task))
14733 break;
14734+ stack = *(unsigned long **)stack_start;
14735 if (ops->stack(data, "IRQ") < 0)
14736 break;
14737 touch_nmi_watchdog();
14738@@ -87,7 +85,7 @@ void show_registers(struct pt_regs *regs)
14739 int i;
14740
14741 print_modules();
14742- __show_regs(regs, !user_mode_vm(regs));
14743+ __show_regs(regs, !user_mode(regs));
14744
14745 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
14746 TASK_COMM_LEN, current->comm, task_pid_nr(current),
14747@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
14748 * When in-kernel, we also print out the stack and code at the
14749 * time of the fault..
14750 */
14751- if (!user_mode_vm(regs)) {
14752+ if (!user_mode(regs)) {
14753 unsigned int code_prologue = code_bytes * 43 / 64;
14754 unsigned int code_len = code_bytes;
14755 unsigned char c;
14756 u8 *ip;
14757+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14758
14759 printk(KERN_EMERG "Stack:\n");
14760 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14761
14762 printk(KERN_EMERG "Code: ");
14763
14764- ip = (u8 *)regs->ip - code_prologue;
14765+ ip = (u8 *)regs->ip - code_prologue + cs_base;
14766 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14767 /* try starting at IP */
14768- ip = (u8 *)regs->ip;
14769+ ip = (u8 *)regs->ip + cs_base;
14770 code_len = code_len - code_prologue + 1;
14771 }
14772 for (i = 0; i < code_len; i++, ip++) {
14773@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
14774 printk(KERN_CONT " Bad EIP value.");
14775 break;
14776 }
14777- if (ip == (u8 *)regs->ip)
14778+ if (ip == (u8 *)regs->ip + cs_base)
14779 printk(KERN_CONT "<%02x> ", c);
14780 else
14781 printk(KERN_CONT "%02x ", c);
14782@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14783 {
14784 unsigned short ud2;
14785
14786+ ip = ktla_ktva(ip);
14787 if (ip < PAGE_OFFSET)
14788 return 0;
14789 if (probe_kernel_address((unsigned short *)ip, ud2))
14790@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14791
14792 return ud2 == 0x0b0f;
14793 }
14794+
14795+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14796+void pax_check_alloca(unsigned long size)
14797+{
14798+ unsigned long sp = (unsigned long)&sp, stack_left;
14799+
14800+ /* all kernel stacks are of the same size */
14801+ stack_left = sp & (THREAD_SIZE - 1);
14802+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14803+}
14804+EXPORT_SYMBOL(pax_check_alloca);
14805+#endif
14806diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14807index 17107bd..9623722 100644
14808--- a/arch/x86/kernel/dumpstack_64.c
14809+++ b/arch/x86/kernel/dumpstack_64.c
14810@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14811 unsigned long *irq_stack_end =
14812 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14813 unsigned used = 0;
14814- struct thread_info *tinfo;
14815 int graph = 0;
14816 unsigned long dummy;
14817+ void *stack_start;
14818
14819 if (!task)
14820 task = current;
14821@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14822 * current stack address. If the stacks consist of nested
14823 * exceptions
14824 */
14825- tinfo = task_thread_info(task);
14826 for (;;) {
14827 char *id;
14828 unsigned long *estack_end;
14829+
14830 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14831 &used, &id);
14832
14833@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14834 if (ops->stack(data, id) < 0)
14835 break;
14836
14837- bp = ops->walk_stack(tinfo, stack, bp, ops,
14838+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14839 data, estack_end, &graph);
14840 ops->stack(data, "<EOE>");
14841 /*
14842@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14843 * second-to-last pointer (index -2 to end) in the
14844 * exception stack:
14845 */
14846+ if ((u16)estack_end[-1] != __KERNEL_DS)
14847+ goto out;
14848 stack = (unsigned long *) estack_end[-2];
14849 continue;
14850 }
14851@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14852 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14853 if (ops->stack(data, "IRQ") < 0)
14854 break;
14855- bp = ops->walk_stack(tinfo, stack, bp,
14856+ bp = ops->walk_stack(task, irq_stack, stack, bp,
14857 ops, data, irq_stack_end, &graph);
14858 /*
14859 * We link to the next stack (which would be
14860@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14861 /*
14862 * This handles the process stack:
14863 */
14864- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14865+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14866+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14867+out:
14868 put_cpu();
14869 }
14870 EXPORT_SYMBOL(dump_trace);
14871@@ -305,3 +309,50 @@ int is_valid_bugaddr(unsigned long ip)
14872
14873 return ud2 == 0x0b0f;
14874 }
14875+
14876+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14877+void pax_check_alloca(unsigned long size)
14878+{
14879+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14880+ unsigned cpu, used;
14881+ char *id;
14882+
14883+ /* check the process stack first */
14884+ stack_start = (unsigned long)task_stack_page(current);
14885+ stack_end = stack_start + THREAD_SIZE;
14886+ if (likely(stack_start <= sp && sp < stack_end)) {
14887+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
14888+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14889+ return;
14890+ }
14891+
14892+ cpu = get_cpu();
14893+
14894+ /* check the irq stacks */
14895+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14896+ stack_start = stack_end - IRQ_STACK_SIZE;
14897+ if (stack_start <= sp && sp < stack_end) {
14898+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14899+ put_cpu();
14900+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14901+ return;
14902+ }
14903+
14904+ /* check the exception stacks */
14905+ used = 0;
14906+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14907+ stack_start = stack_end - EXCEPTION_STKSZ;
14908+ if (stack_end && stack_start <= sp && sp < stack_end) {
14909+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14910+ put_cpu();
14911+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14912+ return;
14913+ }
14914+
14915+ put_cpu();
14916+
14917+ /* unknown stack */
14918+ BUG();
14919+}
14920+EXPORT_SYMBOL(pax_check_alloca);
14921+#endif
14922diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14923index 9b9f18b..9fcaa04 100644
14924--- a/arch/x86/kernel/early_printk.c
14925+++ b/arch/x86/kernel/early_printk.c
14926@@ -7,6 +7,7 @@
14927 #include <linux/pci_regs.h>
14928 #include <linux/pci_ids.h>
14929 #include <linux/errno.h>
14930+#include <linux/sched.h>
14931 #include <asm/io.h>
14932 #include <asm/processor.h>
14933 #include <asm/fcntl.h>
14934diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14935index 7b784f4..db6b628 100644
14936--- a/arch/x86/kernel/entry_32.S
14937+++ b/arch/x86/kernel/entry_32.S
14938@@ -179,13 +179,146 @@
14939 /*CFI_REL_OFFSET gs, PT_GS*/
14940 .endm
14941 .macro SET_KERNEL_GS reg
14942+
14943+#ifdef CONFIG_CC_STACKPROTECTOR
14944 movl $(__KERNEL_STACK_CANARY), \reg
14945+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14946+ movl $(__USER_DS), \reg
14947+#else
14948+ xorl \reg, \reg
14949+#endif
14950+
14951 movl \reg, %gs
14952 .endm
14953
14954 #endif /* CONFIG_X86_32_LAZY_GS */
14955
14956-.macro SAVE_ALL
14957+.macro pax_enter_kernel
14958+#ifdef CONFIG_PAX_KERNEXEC
14959+ call pax_enter_kernel
14960+#endif
14961+.endm
14962+
14963+.macro pax_exit_kernel
14964+#ifdef CONFIG_PAX_KERNEXEC
14965+ call pax_exit_kernel
14966+#endif
14967+.endm
14968+
14969+#ifdef CONFIG_PAX_KERNEXEC
14970+ENTRY(pax_enter_kernel)
14971+#ifdef CONFIG_PARAVIRT
14972+ pushl %eax
14973+ pushl %ecx
14974+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14975+ mov %eax, %esi
14976+#else
14977+ mov %cr0, %esi
14978+#endif
14979+ bts $16, %esi
14980+ jnc 1f
14981+ mov %cs, %esi
14982+ cmp $__KERNEL_CS, %esi
14983+ jz 3f
14984+ ljmp $__KERNEL_CS, $3f
14985+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14986+2:
14987+#ifdef CONFIG_PARAVIRT
14988+ mov %esi, %eax
14989+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14990+#else
14991+ mov %esi, %cr0
14992+#endif
14993+3:
14994+#ifdef CONFIG_PARAVIRT
14995+ popl %ecx
14996+ popl %eax
14997+#endif
14998+ ret
14999+ENDPROC(pax_enter_kernel)
15000+
15001+ENTRY(pax_exit_kernel)
15002+#ifdef CONFIG_PARAVIRT
15003+ pushl %eax
15004+ pushl %ecx
15005+#endif
15006+ mov %cs, %esi
15007+ cmp $__KERNEXEC_KERNEL_CS, %esi
15008+ jnz 2f
15009+#ifdef CONFIG_PARAVIRT
15010+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
15011+ mov %eax, %esi
15012+#else
15013+ mov %cr0, %esi
15014+#endif
15015+ btr $16, %esi
15016+ ljmp $__KERNEL_CS, $1f
15017+1:
15018+#ifdef CONFIG_PARAVIRT
15019+ mov %esi, %eax
15020+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
15021+#else
15022+ mov %esi, %cr0
15023+#endif
15024+2:
15025+#ifdef CONFIG_PARAVIRT
15026+ popl %ecx
15027+ popl %eax
15028+#endif
15029+ ret
15030+ENDPROC(pax_exit_kernel)
15031+#endif
15032+
15033+.macro pax_erase_kstack
15034+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15035+ call pax_erase_kstack
15036+#endif
15037+.endm
15038+
15039+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15040+/*
15041+ * ebp: thread_info
15042+ * ecx, edx: can be clobbered
15043+ */
15044+ENTRY(pax_erase_kstack)
15045+ pushl %edi
15046+ pushl %eax
15047+
15048+ mov TI_lowest_stack(%ebp), %edi
15049+ mov $-0xBEEF, %eax
15050+ std
15051+
15052+1: mov %edi, %ecx
15053+ and $THREAD_SIZE_asm - 1, %ecx
15054+ shr $2, %ecx
15055+ repne scasl
15056+ jecxz 2f
15057+
15058+ cmp $2*16, %ecx
15059+ jc 2f
15060+
15061+ mov $2*16, %ecx
15062+ repe scasl
15063+ jecxz 2f
15064+ jne 1b
15065+
15066+2: cld
15067+ mov %esp, %ecx
15068+ sub %edi, %ecx
15069+ shr $2, %ecx
15070+ rep stosl
15071+
15072+ mov TI_task_thread_sp0(%ebp), %edi
15073+ sub $128, %edi
15074+ mov %edi, TI_lowest_stack(%ebp)
15075+
15076+ popl %eax
15077+ popl %edi
15078+ ret
15079+ENDPROC(pax_erase_kstack)
15080+#endif
15081+
15082+.macro __SAVE_ALL _DS
15083 cld
15084 PUSH_GS
15085 pushl_cfi %fs
15086@@ -208,7 +341,7 @@
15087 CFI_REL_OFFSET ecx, 0
15088 pushl_cfi %ebx
15089 CFI_REL_OFFSET ebx, 0
15090- movl $(__USER_DS), %edx
15091+ movl $\_DS, %edx
15092 movl %edx, %ds
15093 movl %edx, %es
15094 movl $(__KERNEL_PERCPU), %edx
15095@@ -216,6 +349,15 @@
15096 SET_KERNEL_GS %edx
15097 .endm
15098
15099+.macro SAVE_ALL
15100+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15101+ __SAVE_ALL __KERNEL_DS
15102+ pax_enter_kernel
15103+#else
15104+ __SAVE_ALL __USER_DS
15105+#endif
15106+.endm
15107+
15108 .macro RESTORE_INT_REGS
15109 popl_cfi %ebx
15110 CFI_RESTORE ebx
15111@@ -301,7 +443,7 @@ ENTRY(ret_from_fork)
15112 popfl_cfi
15113 jmp syscall_exit
15114 CFI_ENDPROC
15115-END(ret_from_fork)
15116+ENDPROC(ret_from_fork)
15117
15118 /*
15119 * Interrupt exit functions should be protected against kprobes
15120@@ -335,7 +477,15 @@ resume_userspace_sig:
15121 andl $SEGMENT_RPL_MASK, %eax
15122 #endif
15123 cmpl $USER_RPL, %eax
15124+
15125+#ifdef CONFIG_PAX_KERNEXEC
15126+ jae resume_userspace
15127+
15128+ pax_exit_kernel
15129+ jmp resume_kernel
15130+#else
15131 jb resume_kernel # not returning to v8086 or userspace
15132+#endif
15133
15134 ENTRY(resume_userspace)
15135 LOCKDEP_SYS_EXIT
15136@@ -347,8 +497,8 @@ ENTRY(resume_userspace)
15137 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15138 # int/exception return?
15139 jne work_pending
15140- jmp restore_all
15141-END(ret_from_exception)
15142+ jmp restore_all_pax
15143+ENDPROC(ret_from_exception)
15144
15145 #ifdef CONFIG_PREEMPT
15146 ENTRY(resume_kernel)
15147@@ -363,7 +513,7 @@ need_resched:
15148 jz restore_all
15149 call preempt_schedule_irq
15150 jmp need_resched
15151-END(resume_kernel)
15152+ENDPROC(resume_kernel)
15153 #endif
15154 CFI_ENDPROC
15155 /*
15156@@ -397,23 +547,34 @@ sysenter_past_esp:
15157 /*CFI_REL_OFFSET cs, 0*/
15158 /*
15159 * Push current_thread_info()->sysenter_return to the stack.
15160- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15161- * pushed above; +8 corresponds to copy_thread's esp0 setting.
15162 */
15163- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
15164+ pushl_cfi $0
15165 CFI_REL_OFFSET eip, 0
15166
15167 pushl_cfi %eax
15168 SAVE_ALL
15169+ GET_THREAD_INFO(%ebp)
15170+ movl TI_sysenter_return(%ebp),%ebp
15171+ movl %ebp,PT_EIP(%esp)
15172 ENABLE_INTERRUPTS(CLBR_NONE)
15173
15174 /*
15175 * Load the potential sixth argument from user stack.
15176 * Careful about security.
15177 */
15178+ movl PT_OLDESP(%esp),%ebp
15179+
15180+#ifdef CONFIG_PAX_MEMORY_UDEREF
15181+ mov PT_OLDSS(%esp),%ds
15182+1: movl %ds:(%ebp),%ebp
15183+ push %ss
15184+ pop %ds
15185+#else
15186 cmpl $__PAGE_OFFSET-3,%ebp
15187 jae syscall_fault
15188 1: movl (%ebp),%ebp
15189+#endif
15190+
15191 movl %ebp,PT_EBP(%esp)
15192 .section __ex_table,"a"
15193 .align 4
15194@@ -436,12 +597,24 @@ sysenter_do_call:
15195 testl $_TIF_ALLWORK_MASK, %ecx
15196 jne sysexit_audit
15197 sysenter_exit:
15198+
15199+#ifdef CONFIG_PAX_RANDKSTACK
15200+ pushl_cfi %eax
15201+ movl %esp, %eax
15202+ call pax_randomize_kstack
15203+ popl_cfi %eax
15204+#endif
15205+
15206+ pax_erase_kstack
15207+
15208 /* if something modifies registers it must also disable sysexit */
15209 movl PT_EIP(%esp), %edx
15210 movl PT_OLDESP(%esp), %ecx
15211 xorl %ebp,%ebp
15212 TRACE_IRQS_ON
15213 1: mov PT_FS(%esp), %fs
15214+2: mov PT_DS(%esp), %ds
15215+3: mov PT_ES(%esp), %es
15216 PTGS_TO_GS
15217 ENABLE_INTERRUPTS_SYSEXIT
15218
15219@@ -458,6 +631,9 @@ sysenter_audit:
15220 movl %eax,%edx /* 2nd arg: syscall number */
15221 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15222 call __audit_syscall_entry
15223+
15224+ pax_erase_kstack
15225+
15226 pushl_cfi %ebx
15227 movl PT_EAX(%esp),%eax /* reload syscall number */
15228 jmp sysenter_do_call
15229@@ -483,11 +659,17 @@ sysexit_audit:
15230
15231 CFI_ENDPROC
15232 .pushsection .fixup,"ax"
15233-2: movl $0,PT_FS(%esp)
15234+4: movl $0,PT_FS(%esp)
15235+ jmp 1b
15236+5: movl $0,PT_DS(%esp)
15237+ jmp 1b
15238+6: movl $0,PT_ES(%esp)
15239 jmp 1b
15240 .section __ex_table,"a"
15241 .align 4
15242- .long 1b,2b
15243+ .long 1b,4b
15244+ .long 2b,5b
15245+ .long 3b,6b
15246 .popsection
15247 PTGS_TO_GS_EX
15248 ENDPROC(ia32_sysenter_target)
15249@@ -520,6 +702,15 @@ syscall_exit:
15250 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15251 jne syscall_exit_work
15252
15253+restore_all_pax:
15254+
15255+#ifdef CONFIG_PAX_RANDKSTACK
15256+ movl %esp, %eax
15257+ call pax_randomize_kstack
15258+#endif
15259+
15260+ pax_erase_kstack
15261+
15262 restore_all:
15263 TRACE_IRQS_IRET
15264 restore_all_notrace:
15265@@ -579,14 +770,34 @@ ldt_ss:
15266 * compensating for the offset by changing to the ESPFIX segment with
15267 * a base address that matches for the difference.
15268 */
15269-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
15270+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
15271 mov %esp, %edx /* load kernel esp */
15272 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15273 mov %dx, %ax /* eax: new kernel esp */
15274 sub %eax, %edx /* offset (low word is 0) */
15275+#ifdef CONFIG_SMP
15276+ movl PER_CPU_VAR(cpu_number), %ebx
15277+ shll $PAGE_SHIFT_asm, %ebx
15278+ addl $cpu_gdt_table, %ebx
15279+#else
15280+ movl $cpu_gdt_table, %ebx
15281+#endif
15282 shr $16, %edx
15283- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
15284- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
15285+
15286+#ifdef CONFIG_PAX_KERNEXEC
15287+ mov %cr0, %esi
15288+ btr $16, %esi
15289+ mov %esi, %cr0
15290+#endif
15291+
15292+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
15293+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
15294+
15295+#ifdef CONFIG_PAX_KERNEXEC
15296+ bts $16, %esi
15297+ mov %esi, %cr0
15298+#endif
15299+
15300 pushl_cfi $__ESPFIX_SS
15301 pushl_cfi %eax /* new kernel esp */
15302 /* Disable interrupts, but do not irqtrace this section: we
15303@@ -615,38 +826,30 @@ work_resched:
15304 movl TI_flags(%ebp), %ecx
15305 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15306 # than syscall tracing?
15307- jz restore_all
15308+ jz restore_all_pax
15309 testb $_TIF_NEED_RESCHED, %cl
15310 jnz work_resched
15311
15312 work_notifysig: # deal with pending signals and
15313 # notify-resume requests
15314+ movl %esp, %eax
15315 #ifdef CONFIG_VM86
15316 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15317- movl %esp, %eax
15318- jne work_notifysig_v86 # returning to kernel-space or
15319+ jz 1f # returning to kernel-space or
15320 # vm86-space
15321- TRACE_IRQS_ON
15322- ENABLE_INTERRUPTS(CLBR_NONE)
15323- xorl %edx, %edx
15324- call do_notify_resume
15325- jmp resume_userspace_sig
15326
15327- ALIGN
15328-work_notifysig_v86:
15329 pushl_cfi %ecx # save ti_flags for do_notify_resume
15330 call save_v86_state # %eax contains pt_regs pointer
15331 popl_cfi %ecx
15332 movl %eax, %esp
15333-#else
15334- movl %esp, %eax
15335+1:
15336 #endif
15337 TRACE_IRQS_ON
15338 ENABLE_INTERRUPTS(CLBR_NONE)
15339 xorl %edx, %edx
15340 call do_notify_resume
15341 jmp resume_userspace_sig
15342-END(work_pending)
15343+ENDPROC(work_pending)
15344
15345 # perform syscall exit tracing
15346 ALIGN
15347@@ -654,11 +857,14 @@ syscall_trace_entry:
15348 movl $-ENOSYS,PT_EAX(%esp)
15349 movl %esp, %eax
15350 call syscall_trace_enter
15351+
15352+ pax_erase_kstack
15353+
15354 /* What it returned is what we'll actually use. */
15355 cmpl $(NR_syscalls), %eax
15356 jnae syscall_call
15357 jmp syscall_exit
15358-END(syscall_trace_entry)
15359+ENDPROC(syscall_trace_entry)
15360
15361 # perform syscall exit tracing
15362 ALIGN
15363@@ -671,20 +877,24 @@ syscall_exit_work:
15364 movl %esp, %eax
15365 call syscall_trace_leave
15366 jmp resume_userspace
15367-END(syscall_exit_work)
15368+ENDPROC(syscall_exit_work)
15369 CFI_ENDPROC
15370
15371 RING0_INT_FRAME # can't unwind into user space anyway
15372 syscall_fault:
15373+#ifdef CONFIG_PAX_MEMORY_UDEREF
15374+ push %ss
15375+ pop %ds
15376+#endif
15377 GET_THREAD_INFO(%ebp)
15378 movl $-EFAULT,PT_EAX(%esp)
15379 jmp resume_userspace
15380-END(syscall_fault)
15381+ENDPROC(syscall_fault)
15382
15383 syscall_badsys:
15384 movl $-ENOSYS,PT_EAX(%esp)
15385 jmp resume_userspace
15386-END(syscall_badsys)
15387+ENDPROC(syscall_badsys)
15388 CFI_ENDPROC
15389 /*
15390 * End of kprobes section
15391@@ -756,6 +966,36 @@ ENTRY(ptregs_clone)
15392 CFI_ENDPROC
15393 ENDPROC(ptregs_clone)
15394
15395+ ALIGN;
15396+ENTRY(kernel_execve)
15397+ CFI_STARTPROC
15398+ pushl_cfi %ebp
15399+ sub $PT_OLDSS+4,%esp
15400+ pushl_cfi %edi
15401+ pushl_cfi %ecx
15402+ pushl_cfi %eax
15403+ lea 3*4(%esp),%edi
15404+ mov $PT_OLDSS/4+1,%ecx
15405+ xorl %eax,%eax
15406+ rep stosl
15407+ popl_cfi %eax
15408+ popl_cfi %ecx
15409+ popl_cfi %edi
15410+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15411+ pushl_cfi %esp
15412+ call sys_execve
15413+ add $4,%esp
15414+ CFI_ADJUST_CFA_OFFSET -4
15415+ GET_THREAD_INFO(%ebp)
15416+ test %eax,%eax
15417+ jz syscall_exit
15418+ add $PT_OLDSS+4,%esp
15419+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
15420+ popl_cfi %ebp
15421+ ret
15422+ CFI_ENDPROC
15423+ENDPROC(kernel_execve)
15424+
15425 .macro FIXUP_ESPFIX_STACK
15426 /*
15427 * Switch back for ESPFIX stack to the normal zerobased stack
15428@@ -765,8 +1005,15 @@ ENDPROC(ptregs_clone)
15429 * normal stack and adjusts ESP with the matching offset.
15430 */
15431 /* fixup the stack */
15432- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
15433- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
15434+#ifdef CONFIG_SMP
15435+ movl PER_CPU_VAR(cpu_number), %ebx
15436+ shll $PAGE_SHIFT_asm, %ebx
15437+ addl $cpu_gdt_table, %ebx
15438+#else
15439+ movl $cpu_gdt_table, %ebx
15440+#endif
15441+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
15442+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
15443 shl $16, %eax
15444 addl %esp, %eax /* the adjusted stack pointer */
15445 pushl_cfi $__KERNEL_DS
15446@@ -819,7 +1066,7 @@ vector=vector+1
15447 .endr
15448 2: jmp common_interrupt
15449 .endr
15450-END(irq_entries_start)
15451+ENDPROC(irq_entries_start)
15452
15453 .previous
15454 END(interrupt)
15455@@ -867,7 +1114,7 @@ ENTRY(coprocessor_error)
15456 pushl_cfi $do_coprocessor_error
15457 jmp error_code
15458 CFI_ENDPROC
15459-END(coprocessor_error)
15460+ENDPROC(coprocessor_error)
15461
15462 ENTRY(simd_coprocessor_error)
15463 RING0_INT_FRAME
15464@@ -888,7 +1135,7 @@ ENTRY(simd_coprocessor_error)
15465 #endif
15466 jmp error_code
15467 CFI_ENDPROC
15468-END(simd_coprocessor_error)
15469+ENDPROC(simd_coprocessor_error)
15470
15471 ENTRY(device_not_available)
15472 RING0_INT_FRAME
15473@@ -896,7 +1143,7 @@ ENTRY(device_not_available)
15474 pushl_cfi $do_device_not_available
15475 jmp error_code
15476 CFI_ENDPROC
15477-END(device_not_available)
15478+ENDPROC(device_not_available)
15479
15480 #ifdef CONFIG_PARAVIRT
15481 ENTRY(native_iret)
15482@@ -905,12 +1152,12 @@ ENTRY(native_iret)
15483 .align 4
15484 .long native_iret, iret_exc
15485 .previous
15486-END(native_iret)
15487+ENDPROC(native_iret)
15488
15489 ENTRY(native_irq_enable_sysexit)
15490 sti
15491 sysexit
15492-END(native_irq_enable_sysexit)
15493+ENDPROC(native_irq_enable_sysexit)
15494 #endif
15495
15496 ENTRY(overflow)
15497@@ -919,7 +1166,7 @@ ENTRY(overflow)
15498 pushl_cfi $do_overflow
15499 jmp error_code
15500 CFI_ENDPROC
15501-END(overflow)
15502+ENDPROC(overflow)
15503
15504 ENTRY(bounds)
15505 RING0_INT_FRAME
15506@@ -927,7 +1174,7 @@ ENTRY(bounds)
15507 pushl_cfi $do_bounds
15508 jmp error_code
15509 CFI_ENDPROC
15510-END(bounds)
15511+ENDPROC(bounds)
15512
15513 ENTRY(invalid_op)
15514 RING0_INT_FRAME
15515@@ -935,7 +1182,7 @@ ENTRY(invalid_op)
15516 pushl_cfi $do_invalid_op
15517 jmp error_code
15518 CFI_ENDPROC
15519-END(invalid_op)
15520+ENDPROC(invalid_op)
15521
15522 ENTRY(coprocessor_segment_overrun)
15523 RING0_INT_FRAME
15524@@ -943,35 +1190,35 @@ ENTRY(coprocessor_segment_overrun)
15525 pushl_cfi $do_coprocessor_segment_overrun
15526 jmp error_code
15527 CFI_ENDPROC
15528-END(coprocessor_segment_overrun)
15529+ENDPROC(coprocessor_segment_overrun)
15530
15531 ENTRY(invalid_TSS)
15532 RING0_EC_FRAME
15533 pushl_cfi $do_invalid_TSS
15534 jmp error_code
15535 CFI_ENDPROC
15536-END(invalid_TSS)
15537+ENDPROC(invalid_TSS)
15538
15539 ENTRY(segment_not_present)
15540 RING0_EC_FRAME
15541 pushl_cfi $do_segment_not_present
15542 jmp error_code
15543 CFI_ENDPROC
15544-END(segment_not_present)
15545+ENDPROC(segment_not_present)
15546
15547 ENTRY(stack_segment)
15548 RING0_EC_FRAME
15549 pushl_cfi $do_stack_segment
15550 jmp error_code
15551 CFI_ENDPROC
15552-END(stack_segment)
15553+ENDPROC(stack_segment)
15554
15555 ENTRY(alignment_check)
15556 RING0_EC_FRAME
15557 pushl_cfi $do_alignment_check
15558 jmp error_code
15559 CFI_ENDPROC
15560-END(alignment_check)
15561+ENDPROC(alignment_check)
15562
15563 ENTRY(divide_error)
15564 RING0_INT_FRAME
15565@@ -979,7 +1226,7 @@ ENTRY(divide_error)
15566 pushl_cfi $do_divide_error
15567 jmp error_code
15568 CFI_ENDPROC
15569-END(divide_error)
15570+ENDPROC(divide_error)
15571
15572 #ifdef CONFIG_X86_MCE
15573 ENTRY(machine_check)
15574@@ -988,7 +1235,7 @@ ENTRY(machine_check)
15575 pushl_cfi machine_check_vector
15576 jmp error_code
15577 CFI_ENDPROC
15578-END(machine_check)
15579+ENDPROC(machine_check)
15580 #endif
15581
15582 ENTRY(spurious_interrupt_bug)
15583@@ -997,7 +1244,7 @@ ENTRY(spurious_interrupt_bug)
15584 pushl_cfi $do_spurious_interrupt_bug
15585 jmp error_code
15586 CFI_ENDPROC
15587-END(spurious_interrupt_bug)
15588+ENDPROC(spurious_interrupt_bug)
15589 /*
15590 * End of kprobes section
15591 */
15592@@ -1112,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
15593
15594 ENTRY(mcount)
15595 ret
15596-END(mcount)
15597+ENDPROC(mcount)
15598
15599 ENTRY(ftrace_caller)
15600 cmpl $0, function_trace_stop
15601@@ -1141,7 +1388,7 @@ ftrace_graph_call:
15602 .globl ftrace_stub
15603 ftrace_stub:
15604 ret
15605-END(ftrace_caller)
15606+ENDPROC(ftrace_caller)
15607
15608 #else /* ! CONFIG_DYNAMIC_FTRACE */
15609
15610@@ -1177,7 +1424,7 @@ trace:
15611 popl %ecx
15612 popl %eax
15613 jmp ftrace_stub
15614-END(mcount)
15615+ENDPROC(mcount)
15616 #endif /* CONFIG_DYNAMIC_FTRACE */
15617 #endif /* CONFIG_FUNCTION_TRACER */
15618
15619@@ -1198,7 +1445,7 @@ ENTRY(ftrace_graph_caller)
15620 popl %ecx
15621 popl %eax
15622 ret
15623-END(ftrace_graph_caller)
15624+ENDPROC(ftrace_graph_caller)
15625
15626 .globl return_to_handler
15627 return_to_handler:
15628@@ -1253,15 +1500,18 @@ error_code:
15629 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15630 REG_TO_PTGS %ecx
15631 SET_KERNEL_GS %ecx
15632- movl $(__USER_DS), %ecx
15633+ movl $(__KERNEL_DS), %ecx
15634 movl %ecx, %ds
15635 movl %ecx, %es
15636+
15637+ pax_enter_kernel
15638+
15639 TRACE_IRQS_OFF
15640 movl %esp,%eax # pt_regs pointer
15641 call *%edi
15642 jmp ret_from_exception
15643 CFI_ENDPROC
15644-END(page_fault)
15645+ENDPROC(page_fault)
15646
15647 /*
15648 * Debug traps and NMI can happen at the one SYSENTER instruction
15649@@ -1303,7 +1553,7 @@ debug_stack_correct:
15650 call do_debug
15651 jmp ret_from_exception
15652 CFI_ENDPROC
15653-END(debug)
15654+ENDPROC(debug)
15655
15656 /*
15657 * NMI is doubly nasty. It can happen _while_ we're handling
15658@@ -1340,6 +1590,9 @@ nmi_stack_correct:
15659 xorl %edx,%edx # zero error code
15660 movl %esp,%eax # pt_regs pointer
15661 call do_nmi
15662+
15663+ pax_exit_kernel
15664+
15665 jmp restore_all_notrace
15666 CFI_ENDPROC
15667
15668@@ -1376,12 +1629,15 @@ nmi_espfix_stack:
15669 FIXUP_ESPFIX_STACK # %eax == %esp
15670 xorl %edx,%edx # zero error code
15671 call do_nmi
15672+
15673+ pax_exit_kernel
15674+
15675 RESTORE_REGS
15676 lss 12+4(%esp), %esp # back to espfix stack
15677 CFI_ADJUST_CFA_OFFSET -24
15678 jmp irq_return
15679 CFI_ENDPROC
15680-END(nmi)
15681+ENDPROC(nmi)
15682
15683 ENTRY(int3)
15684 RING0_INT_FRAME
15685@@ -1393,14 +1649,14 @@ ENTRY(int3)
15686 call do_int3
15687 jmp ret_from_exception
15688 CFI_ENDPROC
15689-END(int3)
15690+ENDPROC(int3)
15691
15692 ENTRY(general_protection)
15693 RING0_EC_FRAME
15694 pushl_cfi $do_general_protection
15695 jmp error_code
15696 CFI_ENDPROC
15697-END(general_protection)
15698+ENDPROC(general_protection)
15699
15700 #ifdef CONFIG_KVM_GUEST
15701 ENTRY(async_page_fault)
15702@@ -1408,7 +1664,7 @@ ENTRY(async_page_fault)
15703 pushl_cfi $do_async_page_fault
15704 jmp error_code
15705 CFI_ENDPROC
15706-END(async_page_fault)
15707+ENDPROC(async_page_fault)
15708 #endif
15709
15710 /*
15711diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15712index cdc79b5..4710a75 100644
15713--- a/arch/x86/kernel/entry_64.S
15714+++ b/arch/x86/kernel/entry_64.S
15715@@ -56,6 +56,8 @@
15716 #include <asm/ftrace.h>
15717 #include <asm/percpu.h>
15718 #include <linux/err.h>
15719+#include <asm/pgtable.h>
15720+#include <asm/alternative-asm.h>
15721
15722 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15723 #include <linux/elf-em.h>
15724@@ -69,8 +71,9 @@
15725 #ifdef CONFIG_FUNCTION_TRACER
15726 #ifdef CONFIG_DYNAMIC_FTRACE
15727 ENTRY(mcount)
15728+ pax_force_retaddr
15729 retq
15730-END(mcount)
15731+ENDPROC(mcount)
15732
15733 ENTRY(ftrace_caller)
15734 cmpl $0, function_trace_stop
15735@@ -93,8 +96,9 @@ GLOBAL(ftrace_graph_call)
15736 #endif
15737
15738 GLOBAL(ftrace_stub)
15739+ pax_force_retaddr
15740 retq
15741-END(ftrace_caller)
15742+ENDPROC(ftrace_caller)
15743
15744 #else /* ! CONFIG_DYNAMIC_FTRACE */
15745 ENTRY(mcount)
15746@@ -113,6 +117,7 @@ ENTRY(mcount)
15747 #endif
15748
15749 GLOBAL(ftrace_stub)
15750+ pax_force_retaddr
15751 retq
15752
15753 trace:
15754@@ -122,12 +127,13 @@ trace:
15755 movq 8(%rbp), %rsi
15756 subq $MCOUNT_INSN_SIZE, %rdi
15757
15758+ pax_force_fptr ftrace_trace_function
15759 call *ftrace_trace_function
15760
15761 MCOUNT_RESTORE_FRAME
15762
15763 jmp ftrace_stub
15764-END(mcount)
15765+ENDPROC(mcount)
15766 #endif /* CONFIG_DYNAMIC_FTRACE */
15767 #endif /* CONFIG_FUNCTION_TRACER */
15768
15769@@ -147,8 +153,9 @@ ENTRY(ftrace_graph_caller)
15770
15771 MCOUNT_RESTORE_FRAME
15772
15773+ pax_force_retaddr
15774 retq
15775-END(ftrace_graph_caller)
15776+ENDPROC(ftrace_graph_caller)
15777
15778 GLOBAL(return_to_handler)
15779 subq $24, %rsp
15780@@ -164,6 +171,7 @@ GLOBAL(return_to_handler)
15781 movq 8(%rsp), %rdx
15782 movq (%rsp), %rax
15783 addq $24, %rsp
15784+ pax_force_fptr %rdi
15785 jmp *%rdi
15786 #endif
15787
15788@@ -179,6 +187,282 @@ ENTRY(native_usergs_sysret64)
15789 ENDPROC(native_usergs_sysret64)
15790 #endif /* CONFIG_PARAVIRT */
15791
15792+ .macro ljmpq sel, off
15793+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15794+ .byte 0x48; ljmp *1234f(%rip)
15795+ .pushsection .rodata
15796+ .align 16
15797+ 1234: .quad \off; .word \sel
15798+ .popsection
15799+#else
15800+ pushq $\sel
15801+ pushq $\off
15802+ lretq
15803+#endif
15804+ .endm
15805+
15806+ .macro pax_enter_kernel
15807+ pax_set_fptr_mask
15808+#ifdef CONFIG_PAX_KERNEXEC
15809+ call pax_enter_kernel
15810+#endif
15811+ .endm
15812+
15813+ .macro pax_exit_kernel
15814+#ifdef CONFIG_PAX_KERNEXEC
15815+ call pax_exit_kernel
15816+#endif
15817+ .endm
15818+
15819+#ifdef CONFIG_PAX_KERNEXEC
15820+ENTRY(pax_enter_kernel)
15821+ pushq %rdi
15822+
15823+#ifdef CONFIG_PARAVIRT
15824+ PV_SAVE_REGS(CLBR_RDI)
15825+#endif
15826+
15827+ GET_CR0_INTO_RDI
15828+ bts $16,%rdi
15829+ jnc 3f
15830+ mov %cs,%edi
15831+ cmp $__KERNEL_CS,%edi
15832+ jnz 2f
15833+1:
15834+
15835+#ifdef CONFIG_PARAVIRT
15836+ PV_RESTORE_REGS(CLBR_RDI)
15837+#endif
15838+
15839+ popq %rdi
15840+ pax_force_retaddr
15841+ retq
15842+
15843+2: ljmpq __KERNEL_CS,1f
15844+3: ljmpq __KERNEXEC_KERNEL_CS,4f
15845+4: SET_RDI_INTO_CR0
15846+ jmp 1b
15847+ENDPROC(pax_enter_kernel)
15848+
15849+ENTRY(pax_exit_kernel)
15850+ pushq %rdi
15851+
15852+#ifdef CONFIG_PARAVIRT
15853+ PV_SAVE_REGS(CLBR_RDI)
15854+#endif
15855+
15856+ mov %cs,%rdi
15857+ cmp $__KERNEXEC_KERNEL_CS,%edi
15858+ jz 2f
15859+1:
15860+
15861+#ifdef CONFIG_PARAVIRT
15862+ PV_RESTORE_REGS(CLBR_RDI);
15863+#endif
15864+
15865+ popq %rdi
15866+ pax_force_retaddr
15867+ retq
15868+
15869+2: GET_CR0_INTO_RDI
15870+ btr $16,%rdi
15871+ ljmpq __KERNEL_CS,3f
15872+3: SET_RDI_INTO_CR0
15873+ jmp 1b
15874+#ifdef CONFIG_PARAVIRT
15875+ PV_RESTORE_REGS(CLBR_RDI);
15876+#endif
15877+
15878+ popq %rdi
15879+ pax_force_retaddr
15880+ retq
15881+ENDPROC(pax_exit_kernel)
15882+#endif
15883+
15884+ .macro pax_enter_kernel_user
15885+ pax_set_fptr_mask
15886+#ifdef CONFIG_PAX_MEMORY_UDEREF
15887+ call pax_enter_kernel_user
15888+#endif
15889+ .endm
15890+
15891+ .macro pax_exit_kernel_user
15892+#ifdef CONFIG_PAX_MEMORY_UDEREF
15893+ call pax_exit_kernel_user
15894+#endif
15895+#ifdef CONFIG_PAX_RANDKSTACK
15896+ pushq %rax
15897+ call pax_randomize_kstack
15898+ popq %rax
15899+#endif
15900+ .endm
15901+
15902+#ifdef CONFIG_PAX_MEMORY_UDEREF
15903+ENTRY(pax_enter_kernel_user)
15904+ pushq %rdi
15905+ pushq %rbx
15906+
15907+#ifdef CONFIG_PARAVIRT
15908+ PV_SAVE_REGS(CLBR_RDI)
15909+#endif
15910+
15911+ GET_CR3_INTO_RDI
15912+ mov %rdi,%rbx
15913+ add $__START_KERNEL_map,%rbx
15914+ sub phys_base(%rip),%rbx
15915+
15916+#ifdef CONFIG_PARAVIRT
15917+ pushq %rdi
15918+ cmpl $0, pv_info+PARAVIRT_enabled
15919+ jz 1f
15920+ i = 0
15921+ .rept USER_PGD_PTRS
15922+ mov i*8(%rbx),%rsi
15923+ mov $0,%sil
15924+ lea i*8(%rbx),%rdi
15925+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15926+ i = i + 1
15927+ .endr
15928+ jmp 2f
15929+1:
15930+#endif
15931+
15932+ i = 0
15933+ .rept USER_PGD_PTRS
15934+ movb $0,i*8(%rbx)
15935+ i = i + 1
15936+ .endr
15937+
15938+#ifdef CONFIG_PARAVIRT
15939+2: popq %rdi
15940+#endif
15941+ SET_RDI_INTO_CR3
15942+
15943+#ifdef CONFIG_PAX_KERNEXEC
15944+ GET_CR0_INTO_RDI
15945+ bts $16,%rdi
15946+ SET_RDI_INTO_CR0
15947+#endif
15948+
15949+#ifdef CONFIG_PARAVIRT
15950+ PV_RESTORE_REGS(CLBR_RDI)
15951+#endif
15952+
15953+ popq %rbx
15954+ popq %rdi
15955+ pax_force_retaddr
15956+ retq
15957+ENDPROC(pax_enter_kernel_user)
15958+
15959+ENTRY(pax_exit_kernel_user)
15960+ push %rdi
15961+
15962+#ifdef CONFIG_PARAVIRT
15963+ pushq %rbx
15964+ PV_SAVE_REGS(CLBR_RDI)
15965+#endif
15966+
15967+#ifdef CONFIG_PAX_KERNEXEC
15968+ GET_CR0_INTO_RDI
15969+ btr $16,%rdi
15970+ SET_RDI_INTO_CR0
15971+#endif
15972+
15973+ GET_CR3_INTO_RDI
15974+ add $__START_KERNEL_map,%rdi
15975+ sub phys_base(%rip),%rdi
15976+
15977+#ifdef CONFIG_PARAVIRT
15978+ cmpl $0, pv_info+PARAVIRT_enabled
15979+ jz 1f
15980+ mov %rdi,%rbx
15981+ i = 0
15982+ .rept USER_PGD_PTRS
15983+ mov i*8(%rbx),%rsi
15984+ mov $0x67,%sil
15985+ lea i*8(%rbx),%rdi
15986+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15987+ i = i + 1
15988+ .endr
15989+ jmp 2f
15990+1:
15991+#endif
15992+
15993+ i = 0
15994+ .rept USER_PGD_PTRS
15995+ movb $0x67,i*8(%rdi)
15996+ i = i + 1
15997+ .endr
15998+
15999+#ifdef CONFIG_PARAVIRT
16000+2: PV_RESTORE_REGS(CLBR_RDI)
16001+ popq %rbx
16002+#endif
16003+
16004+ popq %rdi
16005+ pax_force_retaddr
16006+ retq
16007+ENDPROC(pax_exit_kernel_user)
16008+#endif
16009+
16010+.macro pax_erase_kstack
16011+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16012+ call pax_erase_kstack
16013+#endif
16014+.endm
16015+
16016+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16017+/*
16018+ * r11: thread_info
16019+ * rcx, rdx: can be clobbered
16020+ */
16021+ENTRY(pax_erase_kstack)
16022+ pushq %rdi
16023+ pushq %rax
16024+ pushq %r11
16025+
16026+ GET_THREAD_INFO(%r11)
16027+ mov TI_lowest_stack(%r11), %rdi
16028+ mov $-0xBEEF, %rax
16029+ std
16030+
16031+1: mov %edi, %ecx
16032+ and $THREAD_SIZE_asm - 1, %ecx
16033+ shr $3, %ecx
16034+ repne scasq
16035+ jecxz 2f
16036+
16037+ cmp $2*8, %ecx
16038+ jc 2f
16039+
16040+ mov $2*8, %ecx
16041+ repe scasq
16042+ jecxz 2f
16043+ jne 1b
16044+
16045+2: cld
16046+ mov %esp, %ecx
16047+ sub %edi, %ecx
16048+
16049+ cmp $THREAD_SIZE_asm, %rcx
16050+ jb 3f
16051+ ud2
16052+3:
16053+
16054+ shr $3, %ecx
16055+ rep stosq
16056+
16057+ mov TI_task_thread_sp0(%r11), %rdi
16058+ sub $256, %rdi
16059+ mov %rdi, TI_lowest_stack(%r11)
16060+
16061+ popq %r11
16062+ popq %rax
16063+ popq %rdi
16064+ pax_force_retaddr
16065+ ret
16066+ENDPROC(pax_erase_kstack)
16067+#endif
16068
16069 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
16070 #ifdef CONFIG_TRACE_IRQFLAGS
16071@@ -232,8 +516,8 @@ ENDPROC(native_usergs_sysret64)
16072 .endm
16073
16074 .macro UNFAKE_STACK_FRAME
16075- addq $8*6, %rsp
16076- CFI_ADJUST_CFA_OFFSET -(6*8)
16077+ addq $8*6 + ARG_SKIP, %rsp
16078+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16079 .endm
16080
16081 /*
16082@@ -320,7 +604,7 @@ ENDPROC(native_usergs_sysret64)
16083 movq %rsp, %rsi
16084
16085 leaq -RBP(%rsp),%rdi /* arg1 for handler */
16086- testl $3, CS-RBP(%rsi)
16087+ testb $3, CS-RBP(%rsi)
16088 je 1f
16089 SWAPGS
16090 /*
16091@@ -355,9 +639,10 @@ ENTRY(save_rest)
16092 movq_cfi r15, R15+16
16093 movq %r11, 8(%rsp) /* return address */
16094 FIXUP_TOP_OF_STACK %r11, 16
16095+ pax_force_retaddr
16096 ret
16097 CFI_ENDPROC
16098-END(save_rest)
16099+ENDPROC(save_rest)
16100
16101 /* save complete stack frame */
16102 .pushsection .kprobes.text, "ax"
16103@@ -386,9 +671,10 @@ ENTRY(save_paranoid)
16104 js 1f /* negative -> in kernel */
16105 SWAPGS
16106 xorl %ebx,%ebx
16107-1: ret
16108+1: pax_force_retaddr_bts
16109+ ret
16110 CFI_ENDPROC
16111-END(save_paranoid)
16112+ENDPROC(save_paranoid)
16113 .popsection
16114
16115 /*
16116@@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
16117
16118 RESTORE_REST
16119
16120- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16121+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16122 jz retint_restore_args
16123
16124 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16125@@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
16126 jmp ret_from_sys_call # go to the SYSRET fastpath
16127
16128 CFI_ENDPROC
16129-END(ret_from_fork)
16130+ENDPROC(ret_from_fork)
16131
16132 /*
16133 * System call entry. Up to 6 arguments in registers are supported.
16134@@ -456,7 +742,7 @@ END(ret_from_fork)
16135 ENTRY(system_call)
16136 CFI_STARTPROC simple
16137 CFI_SIGNAL_FRAME
16138- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16139+ CFI_DEF_CFA rsp,0
16140 CFI_REGISTER rip,rcx
16141 /*CFI_REGISTER rflags,r11*/
16142 SWAPGS_UNSAFE_STACK
16143@@ -469,16 +755,18 @@ GLOBAL(system_call_after_swapgs)
16144
16145 movq %rsp,PER_CPU_VAR(old_rsp)
16146 movq PER_CPU_VAR(kernel_stack),%rsp
16147+ SAVE_ARGS 8*6,0
16148+ pax_enter_kernel_user
16149 /*
16150 * No need to follow this irqs off/on section - it's straight
16151 * and short:
16152 */
16153 ENABLE_INTERRUPTS(CLBR_NONE)
16154- SAVE_ARGS 8,0
16155 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16156 movq %rcx,RIP-ARGOFFSET(%rsp)
16157 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16158- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16159+ GET_THREAD_INFO(%rcx)
16160+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
16161 jnz tracesys
16162 system_call_fastpath:
16163 #if __SYSCALL_MASK == ~0
16164@@ -488,7 +776,7 @@ system_call_fastpath:
16165 cmpl $__NR_syscall_max,%eax
16166 #endif
16167 ja badsys
16168- movq %r10,%rcx
16169+ movq R10-ARGOFFSET(%rsp),%rcx
16170 call *sys_call_table(,%rax,8) # XXX: rip relative
16171 movq %rax,RAX-ARGOFFSET(%rsp)
16172 /*
16173@@ -502,10 +790,13 @@ sysret_check:
16174 LOCKDEP_SYS_EXIT
16175 DISABLE_INTERRUPTS(CLBR_NONE)
16176 TRACE_IRQS_OFF
16177- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
16178+ GET_THREAD_INFO(%rcx)
16179+ movl TI_flags(%rcx),%edx
16180 andl %edi,%edx
16181 jnz sysret_careful
16182 CFI_REMEMBER_STATE
16183+ pax_exit_kernel_user
16184+ pax_erase_kstack
16185 /*
16186 * sysretq will re-enable interrupts:
16187 */
16188@@ -557,14 +848,18 @@ badsys:
16189 * jump back to the normal fast path.
16190 */
16191 auditsys:
16192- movq %r10,%r9 /* 6th arg: 4th syscall arg */
16193+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16194 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16195 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16196 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16197 movq %rax,%rsi /* 2nd arg: syscall number */
16198 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16199 call __audit_syscall_entry
16200+
16201+ pax_erase_kstack
16202+
16203 LOAD_ARGS 0 /* reload call-clobbered registers */
16204+ pax_set_fptr_mask
16205 jmp system_call_fastpath
16206
16207 /*
16208@@ -585,7 +880,7 @@ sysret_audit:
16209 /* Do syscall tracing */
16210 tracesys:
16211 #ifdef CONFIG_AUDITSYSCALL
16212- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16213+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
16214 jz auditsys
16215 #endif
16216 SAVE_REST
16217@@ -593,12 +888,16 @@ tracesys:
16218 FIXUP_TOP_OF_STACK %rdi
16219 movq %rsp,%rdi
16220 call syscall_trace_enter
16221+
16222+ pax_erase_kstack
16223+
16224 /*
16225 * Reload arg registers from stack in case ptrace changed them.
16226 * We don't reload %rax because syscall_trace_enter() returned
16227 * the value it wants us to use in the table lookup.
16228 */
16229 LOAD_ARGS ARGOFFSET, 1
16230+ pax_set_fptr_mask
16231 RESTORE_REST
16232 #if __SYSCALL_MASK == ~0
16233 cmpq $__NR_syscall_max,%rax
16234@@ -607,7 +906,7 @@ tracesys:
16235 cmpl $__NR_syscall_max,%eax
16236 #endif
16237 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16238- movq %r10,%rcx /* fixup for C */
16239+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16240 call *sys_call_table(,%rax,8)
16241 movq %rax,RAX-ARGOFFSET(%rsp)
16242 /* Use IRET because user could have changed frame */
16243@@ -628,6 +927,7 @@ GLOBAL(int_with_check)
16244 andl %edi,%edx
16245 jnz int_careful
16246 andl $~TS_COMPAT,TI_status(%rcx)
16247+ pax_erase_kstack
16248 jmp retint_swapgs
16249
16250 /* Either reschedule or signal or syscall exit tracking needed. */
16251@@ -674,7 +974,7 @@ int_restore_rest:
16252 TRACE_IRQS_OFF
16253 jmp int_with_check
16254 CFI_ENDPROC
16255-END(system_call)
16256+ENDPROC(system_call)
16257
16258 /*
16259 * Certain special system calls that need to save a complete full stack frame.
16260@@ -690,7 +990,7 @@ ENTRY(\label)
16261 call \func
16262 jmp ptregscall_common
16263 CFI_ENDPROC
16264-END(\label)
16265+ENDPROC(\label)
16266 .endm
16267
16268 PTREGSCALL stub_clone, sys_clone, %r8
16269@@ -708,9 +1008,10 @@ ENTRY(ptregscall_common)
16270 movq_cfi_restore R12+8, r12
16271 movq_cfi_restore RBP+8, rbp
16272 movq_cfi_restore RBX+8, rbx
16273+ pax_force_retaddr
16274 ret $REST_SKIP /* pop extended registers */
16275 CFI_ENDPROC
16276-END(ptregscall_common)
16277+ENDPROC(ptregscall_common)
16278
16279 ENTRY(stub_execve)
16280 CFI_STARTPROC
16281@@ -725,7 +1026,7 @@ ENTRY(stub_execve)
16282 RESTORE_REST
16283 jmp int_ret_from_sys_call
16284 CFI_ENDPROC
16285-END(stub_execve)
16286+ENDPROC(stub_execve)
16287
16288 /*
16289 * sigreturn is special because it needs to restore all registers on return.
16290@@ -743,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
16291 RESTORE_REST
16292 jmp int_ret_from_sys_call
16293 CFI_ENDPROC
16294-END(stub_rt_sigreturn)
16295+ENDPROC(stub_rt_sigreturn)
16296
16297 #ifdef CONFIG_X86_X32_ABI
16298 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
16299@@ -812,7 +1113,7 @@ vector=vector+1
16300 2: jmp common_interrupt
16301 .endr
16302 CFI_ENDPROC
16303-END(irq_entries_start)
16304+ENDPROC(irq_entries_start)
16305
16306 .previous
16307 END(interrupt)
16308@@ -832,6 +1133,16 @@ END(interrupt)
16309 subq $ORIG_RAX-RBP, %rsp
16310 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
16311 SAVE_ARGS_IRQ
16312+#ifdef CONFIG_PAX_MEMORY_UDEREF
16313+ testb $3, CS(%rdi)
16314+ jnz 1f
16315+ pax_enter_kernel
16316+ jmp 2f
16317+1: pax_enter_kernel_user
16318+2:
16319+#else
16320+ pax_enter_kernel
16321+#endif
16322 call \func
16323 .endm
16324
16325@@ -863,7 +1174,7 @@ ret_from_intr:
16326
16327 exit_intr:
16328 GET_THREAD_INFO(%rcx)
16329- testl $3,CS-ARGOFFSET(%rsp)
16330+ testb $3,CS-ARGOFFSET(%rsp)
16331 je retint_kernel
16332
16333 /* Interrupt came from user space */
16334@@ -885,12 +1196,15 @@ retint_swapgs: /* return to user-space */
16335 * The iretq could re-enable interrupts:
16336 */
16337 DISABLE_INTERRUPTS(CLBR_ANY)
16338+ pax_exit_kernel_user
16339 TRACE_IRQS_IRETQ
16340 SWAPGS
16341 jmp restore_args
16342
16343 retint_restore_args: /* return to kernel space */
16344 DISABLE_INTERRUPTS(CLBR_ANY)
16345+ pax_exit_kernel
16346+ pax_force_retaddr RIP-ARGOFFSET
16347 /*
16348 * The iretq could re-enable interrupts:
16349 */
16350@@ -979,7 +1293,7 @@ ENTRY(retint_kernel)
16351 #endif
16352
16353 CFI_ENDPROC
16354-END(common_interrupt)
16355+ENDPROC(common_interrupt)
16356 /*
16357 * End of kprobes section
16358 */
16359@@ -996,7 +1310,7 @@ ENTRY(\sym)
16360 interrupt \do_sym
16361 jmp ret_from_intr
16362 CFI_ENDPROC
16363-END(\sym)
16364+ENDPROC(\sym)
16365 .endm
16366
16367 #ifdef CONFIG_SMP
16368@@ -1069,12 +1383,22 @@ ENTRY(\sym)
16369 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16370 call error_entry
16371 DEFAULT_FRAME 0
16372+#ifdef CONFIG_PAX_MEMORY_UDEREF
16373+ testb $3, CS(%rsp)
16374+ jnz 1f
16375+ pax_enter_kernel
16376+ jmp 2f
16377+1: pax_enter_kernel_user
16378+2:
16379+#else
16380+ pax_enter_kernel
16381+#endif
16382 movq %rsp,%rdi /* pt_regs pointer */
16383 xorl %esi,%esi /* no error code */
16384 call \do_sym
16385 jmp error_exit /* %ebx: no swapgs flag */
16386 CFI_ENDPROC
16387-END(\sym)
16388+ENDPROC(\sym)
16389 .endm
16390
16391 .macro paranoidzeroentry sym do_sym
16392@@ -1086,15 +1410,25 @@ ENTRY(\sym)
16393 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16394 call save_paranoid
16395 TRACE_IRQS_OFF
16396+#ifdef CONFIG_PAX_MEMORY_UDEREF
16397+ testb $3, CS(%rsp)
16398+ jnz 1f
16399+ pax_enter_kernel
16400+ jmp 2f
16401+1: pax_enter_kernel_user
16402+2:
16403+#else
16404+ pax_enter_kernel
16405+#endif
16406 movq %rsp,%rdi /* pt_regs pointer */
16407 xorl %esi,%esi /* no error code */
16408 call \do_sym
16409 jmp paranoid_exit /* %ebx: no swapgs flag */
16410 CFI_ENDPROC
16411-END(\sym)
16412+ENDPROC(\sym)
16413 .endm
16414
16415-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
16416+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
16417 .macro paranoidzeroentry_ist sym do_sym ist
16418 ENTRY(\sym)
16419 INTR_FRAME
16420@@ -1104,14 +1438,30 @@ ENTRY(\sym)
16421 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16422 call save_paranoid
16423 TRACE_IRQS_OFF
16424+#ifdef CONFIG_PAX_MEMORY_UDEREF
16425+ testb $3, CS(%rsp)
16426+ jnz 1f
16427+ pax_enter_kernel
16428+ jmp 2f
16429+1: pax_enter_kernel_user
16430+2:
16431+#else
16432+ pax_enter_kernel
16433+#endif
16434 movq %rsp,%rdi /* pt_regs pointer */
16435 xorl %esi,%esi /* no error code */
16436+#ifdef CONFIG_SMP
16437+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
16438+ lea init_tss(%r12), %r12
16439+#else
16440+ lea init_tss(%rip), %r12
16441+#endif
16442 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16443 call \do_sym
16444 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16445 jmp paranoid_exit /* %ebx: no swapgs flag */
16446 CFI_ENDPROC
16447-END(\sym)
16448+ENDPROC(\sym)
16449 .endm
16450
16451 .macro errorentry sym do_sym
16452@@ -1122,13 +1472,23 @@ ENTRY(\sym)
16453 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16454 call error_entry
16455 DEFAULT_FRAME 0
16456+#ifdef CONFIG_PAX_MEMORY_UDEREF
16457+ testb $3, CS(%rsp)
16458+ jnz 1f
16459+ pax_enter_kernel
16460+ jmp 2f
16461+1: pax_enter_kernel_user
16462+2:
16463+#else
16464+ pax_enter_kernel
16465+#endif
16466 movq %rsp,%rdi /* pt_regs pointer */
16467 movq ORIG_RAX(%rsp),%rsi /* get error code */
16468 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16469 call \do_sym
16470 jmp error_exit /* %ebx: no swapgs flag */
16471 CFI_ENDPROC
16472-END(\sym)
16473+ENDPROC(\sym)
16474 .endm
16475
16476 /* error code is on the stack already */
16477@@ -1141,13 +1501,23 @@ ENTRY(\sym)
16478 call save_paranoid
16479 DEFAULT_FRAME 0
16480 TRACE_IRQS_OFF
16481+#ifdef CONFIG_PAX_MEMORY_UDEREF
16482+ testb $3, CS(%rsp)
16483+ jnz 1f
16484+ pax_enter_kernel
16485+ jmp 2f
16486+1: pax_enter_kernel_user
16487+2:
16488+#else
16489+ pax_enter_kernel
16490+#endif
16491 movq %rsp,%rdi /* pt_regs pointer */
16492 movq ORIG_RAX(%rsp),%rsi /* get error code */
16493 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16494 call \do_sym
16495 jmp paranoid_exit /* %ebx: no swapgs flag */
16496 CFI_ENDPROC
16497-END(\sym)
16498+ENDPROC(\sym)
16499 .endm
16500
16501 zeroentry divide_error do_divide_error
16502@@ -1177,9 +1547,10 @@ gs_change:
16503 2: mfence /* workaround */
16504 SWAPGS
16505 popfq_cfi
16506+ pax_force_retaddr
16507 ret
16508 CFI_ENDPROC
16509-END(native_load_gs_index)
16510+ENDPROC(native_load_gs_index)
16511
16512 .section __ex_table,"a"
16513 .align 8
16514@@ -1201,13 +1572,14 @@ ENTRY(kernel_thread_helper)
16515 * Here we are in the child and the registers are set as they were
16516 * at kernel_thread() invocation in the parent.
16517 */
16518+ pax_force_fptr %rsi
16519 call *%rsi
16520 # exit
16521 mov %eax, %edi
16522 call do_exit
16523 ud2 # padding for call trace
16524 CFI_ENDPROC
16525-END(kernel_thread_helper)
16526+ENDPROC(kernel_thread_helper)
16527
16528 /*
16529 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16530@@ -1234,11 +1606,11 @@ ENTRY(kernel_execve)
16531 RESTORE_REST
16532 testq %rax,%rax
16533 je int_ret_from_sys_call
16534- RESTORE_ARGS
16535 UNFAKE_STACK_FRAME
16536+ pax_force_retaddr
16537 ret
16538 CFI_ENDPROC
16539-END(kernel_execve)
16540+ENDPROC(kernel_execve)
16541
16542 /* Call softirq on interrupt stack. Interrupts are off. */
16543 ENTRY(call_softirq)
16544@@ -1256,9 +1628,10 @@ ENTRY(call_softirq)
16545 CFI_DEF_CFA_REGISTER rsp
16546 CFI_ADJUST_CFA_OFFSET -8
16547 decl PER_CPU_VAR(irq_count)
16548+ pax_force_retaddr
16549 ret
16550 CFI_ENDPROC
16551-END(call_softirq)
16552+ENDPROC(call_softirq)
16553
16554 #ifdef CONFIG_XEN
16555 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16556@@ -1296,7 +1669,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16557 decl PER_CPU_VAR(irq_count)
16558 jmp error_exit
16559 CFI_ENDPROC
16560-END(xen_do_hypervisor_callback)
16561+ENDPROC(xen_do_hypervisor_callback)
16562
16563 /*
16564 * Hypervisor uses this for application faults while it executes.
16565@@ -1355,7 +1728,7 @@ ENTRY(xen_failsafe_callback)
16566 SAVE_ALL
16567 jmp error_exit
16568 CFI_ENDPROC
16569-END(xen_failsafe_callback)
16570+ENDPROC(xen_failsafe_callback)
16571
16572 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
16573 xen_hvm_callback_vector xen_evtchn_do_upcall
16574@@ -1404,16 +1777,31 @@ ENTRY(paranoid_exit)
16575 TRACE_IRQS_OFF
16576 testl %ebx,%ebx /* swapgs needed? */
16577 jnz paranoid_restore
16578- testl $3,CS(%rsp)
16579+ testb $3,CS(%rsp)
16580 jnz paranoid_userspace
16581+#ifdef CONFIG_PAX_MEMORY_UDEREF
16582+ pax_exit_kernel
16583+ TRACE_IRQS_IRETQ 0
16584+ SWAPGS_UNSAFE_STACK
16585+ RESTORE_ALL 8
16586+ pax_force_retaddr_bts
16587+ jmp irq_return
16588+#endif
16589 paranoid_swapgs:
16590+#ifdef CONFIG_PAX_MEMORY_UDEREF
16591+ pax_exit_kernel_user
16592+#else
16593+ pax_exit_kernel
16594+#endif
16595 TRACE_IRQS_IRETQ 0
16596 SWAPGS_UNSAFE_STACK
16597 RESTORE_ALL 8
16598 jmp irq_return
16599 paranoid_restore:
16600+ pax_exit_kernel
16601 TRACE_IRQS_IRETQ 0
16602 RESTORE_ALL 8
16603+ pax_force_retaddr_bts
16604 jmp irq_return
16605 paranoid_userspace:
16606 GET_THREAD_INFO(%rcx)
16607@@ -1442,7 +1830,7 @@ paranoid_schedule:
16608 TRACE_IRQS_OFF
16609 jmp paranoid_userspace
16610 CFI_ENDPROC
16611-END(paranoid_exit)
16612+ENDPROC(paranoid_exit)
16613
16614 /*
16615 * Exception entry point. This expects an error code/orig_rax on the stack.
16616@@ -1469,12 +1857,13 @@ ENTRY(error_entry)
16617 movq_cfi r14, R14+8
16618 movq_cfi r15, R15+8
16619 xorl %ebx,%ebx
16620- testl $3,CS+8(%rsp)
16621+ testb $3,CS+8(%rsp)
16622 je error_kernelspace
16623 error_swapgs:
16624 SWAPGS
16625 error_sti:
16626 TRACE_IRQS_OFF
16627+ pax_force_retaddr_bts
16628 ret
16629
16630 /*
16631@@ -1501,7 +1890,7 @@ bstep_iret:
16632 movq %rcx,RIP+8(%rsp)
16633 jmp error_swapgs
16634 CFI_ENDPROC
16635-END(error_entry)
16636+ENDPROC(error_entry)
16637
16638
16639 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16640@@ -1521,7 +1910,7 @@ ENTRY(error_exit)
16641 jnz retint_careful
16642 jmp retint_swapgs
16643 CFI_ENDPROC
16644-END(error_exit)
16645+ENDPROC(error_exit)
16646
16647 /*
16648 * Test if a given stack is an NMI stack or not.
16649@@ -1579,9 +1968,11 @@ ENTRY(nmi)
16650 * If %cs was not the kernel segment, then the NMI triggered in user
16651 * space, which means it is definitely not nested.
16652 */
16653+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
16654+ je 1f
16655 cmpl $__KERNEL_CS, 16(%rsp)
16656 jne first_nmi
16657-
16658+1:
16659 /*
16660 * Check the special variable on the stack to see if NMIs are
16661 * executing.
16662@@ -1728,6 +2119,16 @@ end_repeat_nmi:
16663 */
16664 call save_paranoid
16665 DEFAULT_FRAME 0
16666+#ifdef CONFIG_PAX_MEMORY_UDEREF
16667+ testb $3, CS(%rsp)
16668+ jnz 1f
16669+ pax_enter_kernel
16670+ jmp 2f
16671+1: pax_enter_kernel_user
16672+2:
16673+#else
16674+ pax_enter_kernel
16675+#endif
16676 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16677 movq %rsp,%rdi
16678 movq $-1,%rsi
16679@@ -1735,21 +2136,32 @@ end_repeat_nmi:
16680 testl %ebx,%ebx /* swapgs needed? */
16681 jnz nmi_restore
16682 nmi_swapgs:
16683+#ifdef CONFIG_PAX_MEMORY_UDEREF
16684+ pax_exit_kernel_user
16685+#else
16686+ pax_exit_kernel
16687+#endif
16688 SWAPGS_UNSAFE_STACK
16689+ RESTORE_ALL 8
16690+ /* Clear the NMI executing stack variable */
16691+ movq $0, 10*8(%rsp)
16692+ jmp irq_return
16693 nmi_restore:
16694+ pax_exit_kernel
16695 RESTORE_ALL 8
16696+ pax_force_retaddr_bts
16697 /* Clear the NMI executing stack variable */
16698 movq $0, 10*8(%rsp)
16699 jmp irq_return
16700 CFI_ENDPROC
16701-END(nmi)
16702+ENDPROC(nmi)
16703
16704 ENTRY(ignore_sysret)
16705 CFI_STARTPROC
16706 mov $-ENOSYS,%eax
16707 sysret
16708 CFI_ENDPROC
16709-END(ignore_sysret)
16710+ENDPROC(ignore_sysret)
16711
16712 /*
16713 * End of kprobes section
16714diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16715index c9a281f..ce2f317 100644
16716--- a/arch/x86/kernel/ftrace.c
16717+++ b/arch/x86/kernel/ftrace.c
16718@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16719 static const void *mod_code_newcode; /* holds the text to write to the IP */
16720
16721 static unsigned nmi_wait_count;
16722-static atomic_t nmi_update_count = ATOMIC_INIT(0);
16723+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16724
16725 int ftrace_arch_read_dyn_info(char *buf, int size)
16726 {
16727@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16728
16729 r = snprintf(buf, size, "%u %u",
16730 nmi_wait_count,
16731- atomic_read(&nmi_update_count));
16732+ atomic_read_unchecked(&nmi_update_count));
16733 return r;
16734 }
16735
16736@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
16737
16738 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16739 smp_rmb();
16740+ pax_open_kernel();
16741 ftrace_mod_code();
16742- atomic_inc(&nmi_update_count);
16743+ pax_close_kernel();
16744+ atomic_inc_unchecked(&nmi_update_count);
16745 }
16746 /* Must have previous changes seen before executions */
16747 smp_mb();
16748@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
16749 {
16750 unsigned char replaced[MCOUNT_INSN_SIZE];
16751
16752+ ip = ktla_ktva(ip);
16753+
16754 /*
16755 * Note: Due to modules and __init, code can
16756 * disappear and change, we need to protect against faulting
16757@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16758 unsigned char old[MCOUNT_INSN_SIZE], *new;
16759 int ret;
16760
16761- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16762+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16763 new = ftrace_call_replace(ip, (unsigned long)func);
16764 ret = ftrace_modify_code(ip, old, new);
16765
16766@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16767 {
16768 unsigned char code[MCOUNT_INSN_SIZE];
16769
16770+ ip = ktla_ktva(ip);
16771+
16772 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16773 return -EFAULT;
16774
16775diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16776index 51ff186..9e77418 100644
16777--- a/arch/x86/kernel/head32.c
16778+++ b/arch/x86/kernel/head32.c
16779@@ -19,6 +19,7 @@
16780 #include <asm/io_apic.h>
16781 #include <asm/bios_ebda.h>
16782 #include <asm/tlbflush.h>
16783+#include <asm/boot.h>
16784
16785 static void __init i386_default_early_setup(void)
16786 {
16787@@ -31,8 +32,7 @@ static void __init i386_default_early_setup(void)
16788
16789 void __init i386_start_kernel(void)
16790 {
16791- memblock_reserve(__pa_symbol(&_text),
16792- __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
16793+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
16794
16795 #ifdef CONFIG_BLK_DEV_INITRD
16796 /* Reserve INITRD */
16797diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16798index ce0be7c..c41476e 100644
16799--- a/arch/x86/kernel/head_32.S
16800+++ b/arch/x86/kernel/head_32.S
16801@@ -25,6 +25,12 @@
16802 /* Physical address */
16803 #define pa(X) ((X) - __PAGE_OFFSET)
16804
16805+#ifdef CONFIG_PAX_KERNEXEC
16806+#define ta(X) (X)
16807+#else
16808+#define ta(X) ((X) - __PAGE_OFFSET)
16809+#endif
16810+
16811 /*
16812 * References to members of the new_cpu_data structure.
16813 */
16814@@ -54,11 +60,7 @@
16815 * and small than max_low_pfn, otherwise will waste some page table entries
16816 */
16817
16818-#if PTRS_PER_PMD > 1
16819-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16820-#else
16821-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16822-#endif
16823+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16824
16825 /* Number of possible pages in the lowmem region */
16826 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
16827@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
16828 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16829
16830 /*
16831+ * Real beginning of normal "text" segment
16832+ */
16833+ENTRY(stext)
16834+ENTRY(_stext)
16835+
16836+/*
16837 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16838 * %esi points to the real-mode code as a 32-bit pointer.
16839 * CS and DS must be 4 GB flat segments, but we don't depend on
16840@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16841 * can.
16842 */
16843 __HEAD
16844+
16845+#ifdef CONFIG_PAX_KERNEXEC
16846+ jmp startup_32
16847+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16848+.fill PAGE_SIZE-5,1,0xcc
16849+#endif
16850+
16851 ENTRY(startup_32)
16852 movl pa(stack_start),%ecx
16853
16854@@ -105,6 +120,57 @@ ENTRY(startup_32)
16855 2:
16856 leal -__PAGE_OFFSET(%ecx),%esp
16857
16858+#ifdef CONFIG_SMP
16859+ movl $pa(cpu_gdt_table),%edi
16860+ movl $__per_cpu_load,%eax
16861+ movw %ax,__KERNEL_PERCPU + 2(%edi)
16862+ rorl $16,%eax
16863+ movb %al,__KERNEL_PERCPU + 4(%edi)
16864+ movb %ah,__KERNEL_PERCPU + 7(%edi)
16865+ movl $__per_cpu_end - 1,%eax
16866+ subl $__per_cpu_start,%eax
16867+ movw %ax,__KERNEL_PERCPU + 0(%edi)
16868+#endif
16869+
16870+#ifdef CONFIG_PAX_MEMORY_UDEREF
16871+ movl $NR_CPUS,%ecx
16872+ movl $pa(cpu_gdt_table),%edi
16873+1:
16874+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16875+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16876+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16877+ addl $PAGE_SIZE_asm,%edi
16878+ loop 1b
16879+#endif
16880+
16881+#ifdef CONFIG_PAX_KERNEXEC
16882+ movl $pa(boot_gdt),%edi
16883+ movl $__LOAD_PHYSICAL_ADDR,%eax
16884+ movw %ax,__BOOT_CS + 2(%edi)
16885+ rorl $16,%eax
16886+ movb %al,__BOOT_CS + 4(%edi)
16887+ movb %ah,__BOOT_CS + 7(%edi)
16888+ rorl $16,%eax
16889+
16890+ ljmp $(__BOOT_CS),$1f
16891+1:
16892+
16893+ movl $NR_CPUS,%ecx
16894+ movl $pa(cpu_gdt_table),%edi
16895+ addl $__PAGE_OFFSET,%eax
16896+1:
16897+ movw %ax,__KERNEL_CS + 2(%edi)
16898+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16899+ rorl $16,%eax
16900+ movb %al,__KERNEL_CS + 4(%edi)
16901+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16902+ movb %ah,__KERNEL_CS + 7(%edi)
16903+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16904+ rorl $16,%eax
16905+ addl $PAGE_SIZE_asm,%edi
16906+ loop 1b
16907+#endif
16908+
16909 /*
16910 * Clear BSS first so that there are no surprises...
16911 */
16912@@ -195,8 +261,11 @@ ENTRY(startup_32)
16913 movl %eax, pa(max_pfn_mapped)
16914
16915 /* Do early initialization of the fixmap area */
16916- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16917- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
16918+#ifdef CONFIG_COMPAT_VDSO
16919+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
16920+#else
16921+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
16922+#endif
16923 #else /* Not PAE */
16924
16925 page_pde_offset = (__PAGE_OFFSET >> 20);
16926@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16927 movl %eax, pa(max_pfn_mapped)
16928
16929 /* Do early initialization of the fixmap area */
16930- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16931- movl %eax,pa(initial_page_table+0xffc)
16932+#ifdef CONFIG_COMPAT_VDSO
16933+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
16934+#else
16935+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
16936+#endif
16937 #endif
16938
16939 #ifdef CONFIG_PARAVIRT
16940@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16941 cmpl $num_subarch_entries, %eax
16942 jae bad_subarch
16943
16944- movl pa(subarch_entries)(,%eax,4), %eax
16945- subl $__PAGE_OFFSET, %eax
16946- jmp *%eax
16947+ jmp *pa(subarch_entries)(,%eax,4)
16948
16949 bad_subarch:
16950 WEAK(lguest_entry)
16951@@ -255,10 +325,10 @@ WEAK(xen_entry)
16952 __INITDATA
16953
16954 subarch_entries:
16955- .long default_entry /* normal x86/PC */
16956- .long lguest_entry /* lguest hypervisor */
16957- .long xen_entry /* Xen hypervisor */
16958- .long default_entry /* Moorestown MID */
16959+ .long ta(default_entry) /* normal x86/PC */
16960+ .long ta(lguest_entry) /* lguest hypervisor */
16961+ .long ta(xen_entry) /* Xen hypervisor */
16962+ .long ta(default_entry) /* Moorestown MID */
16963 num_subarch_entries = (. - subarch_entries) / 4
16964 .previous
16965 #else
16966@@ -312,6 +382,7 @@ default_entry:
16967 orl %edx,%eax
16968 movl %eax,%cr4
16969
16970+#ifdef CONFIG_X86_PAE
16971 testb $X86_CR4_PAE, %al # check if PAE is enabled
16972 jz 6f
16973
16974@@ -340,6 +411,9 @@ default_entry:
16975 /* Make changes effective */
16976 wrmsr
16977
16978+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16979+#endif
16980+
16981 6:
16982
16983 /*
16984@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
16985 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16986 movl %eax,%ss # after changing gdt.
16987
16988- movl $(__USER_DS),%eax # DS/ES contains default USER segment
16989+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16990 movl %eax,%ds
16991 movl %eax,%es
16992
16993@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
16994 */
16995 cmpb $0,ready
16996 jne 1f
16997- movl $gdt_page,%eax
16998+ movl $cpu_gdt_table,%eax
16999 movl $stack_canary,%ecx
17000+#ifdef CONFIG_SMP
17001+ addl $__per_cpu_load,%ecx
17002+#endif
17003 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17004 shrl $16, %ecx
17005 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
17006 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
17007 1:
17008-#endif
17009 movl $(__KERNEL_STACK_CANARY),%eax
17010+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17011+ movl $(__USER_DS),%eax
17012+#else
17013+ xorl %eax,%eax
17014+#endif
17015 movl %eax,%gs
17016
17017 xorl %eax,%eax # Clear LDT
17018@@ -558,22 +639,22 @@ early_page_fault:
17019 jmp early_fault
17020
17021 early_fault:
17022- cld
17023 #ifdef CONFIG_PRINTK
17024+ cmpl $1,%ss:early_recursion_flag
17025+ je hlt_loop
17026+ incl %ss:early_recursion_flag
17027+ cld
17028 pusha
17029 movl $(__KERNEL_DS),%eax
17030 movl %eax,%ds
17031 movl %eax,%es
17032- cmpl $2,early_recursion_flag
17033- je hlt_loop
17034- incl early_recursion_flag
17035 movl %cr2,%eax
17036 pushl %eax
17037 pushl %edx /* trapno */
17038 pushl $fault_msg
17039 call printk
17040+; call dump_stack
17041 #endif
17042- call dump_stack
17043 hlt_loop:
17044 hlt
17045 jmp hlt_loop
17046@@ -581,8 +662,11 @@ hlt_loop:
17047 /* This is the default interrupt "handler" :-) */
17048 ALIGN
17049 ignore_int:
17050- cld
17051 #ifdef CONFIG_PRINTK
17052+ cmpl $2,%ss:early_recursion_flag
17053+ je hlt_loop
17054+ incl %ss:early_recursion_flag
17055+ cld
17056 pushl %eax
17057 pushl %ecx
17058 pushl %edx
17059@@ -591,9 +675,6 @@ ignore_int:
17060 movl $(__KERNEL_DS),%eax
17061 movl %eax,%ds
17062 movl %eax,%es
17063- cmpl $2,early_recursion_flag
17064- je hlt_loop
17065- incl early_recursion_flag
17066 pushl 16(%esp)
17067 pushl 24(%esp)
17068 pushl 32(%esp)
17069@@ -622,29 +703,43 @@ ENTRY(initial_code)
17070 /*
17071 * BSS section
17072 */
17073-__PAGE_ALIGNED_BSS
17074- .align PAGE_SIZE
17075 #ifdef CONFIG_X86_PAE
17076+.section .initial_pg_pmd,"a",@progbits
17077 initial_pg_pmd:
17078 .fill 1024*KPMDS,4,0
17079 #else
17080+.section .initial_page_table,"a",@progbits
17081 ENTRY(initial_page_table)
17082 .fill 1024,4,0
17083 #endif
17084+.section .initial_pg_fixmap,"a",@progbits
17085 initial_pg_fixmap:
17086 .fill 1024,4,0
17087+.section .empty_zero_page,"a",@progbits
17088 ENTRY(empty_zero_page)
17089 .fill 4096,1,0
17090+.section .swapper_pg_dir,"a",@progbits
17091 ENTRY(swapper_pg_dir)
17092+#ifdef CONFIG_X86_PAE
17093+ .fill 4,8,0
17094+#else
17095 .fill 1024,4,0
17096+#endif
17097+
17098+/*
17099+ * The IDT has to be page-aligned to simplify the Pentium
17100+ * F0 0F bug workaround.. We have a special link segment
17101+ * for this.
17102+ */
17103+.section .idt,"a",@progbits
17104+ENTRY(idt_table)
17105+ .fill 256,8,0
17106
17107 /*
17108 * This starts the data section.
17109 */
17110 #ifdef CONFIG_X86_PAE
17111-__PAGE_ALIGNED_DATA
17112- /* Page-aligned for the benefit of paravirt? */
17113- .align PAGE_SIZE
17114+.section .initial_page_table,"a",@progbits
17115 ENTRY(initial_page_table)
17116 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17117 # if KPMDS == 3
17118@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
17119 # error "Kernel PMDs should be 1, 2 or 3"
17120 # endif
17121 .align PAGE_SIZE /* needs to be page-sized too */
17122+
17123+#ifdef CONFIG_PAX_PER_CPU_PGD
17124+ENTRY(cpu_pgd)
17125+ .rept NR_CPUS
17126+ .fill 4,8,0
17127+ .endr
17128+#endif
17129+
17130 #endif
17131
17132 .data
17133 .balign 4
17134 ENTRY(stack_start)
17135- .long init_thread_union+THREAD_SIZE
17136+ .long init_thread_union+THREAD_SIZE-8
17137
17138+ready: .byte 0
17139+
17140+.section .rodata,"a",@progbits
17141 early_recursion_flag:
17142 .long 0
17143
17144-ready: .byte 0
17145-
17146 int_msg:
17147 .asciz "Unknown interrupt or fault at: %p %p %p\n"
17148
17149@@ -707,7 +811,7 @@ fault_msg:
17150 .word 0 # 32 bit align gdt_desc.address
17151 boot_gdt_descr:
17152 .word __BOOT_DS+7
17153- .long boot_gdt - __PAGE_OFFSET
17154+ .long pa(boot_gdt)
17155
17156 .word 0 # 32-bit align idt_desc.address
17157 idt_descr:
17158@@ -718,7 +822,7 @@ idt_descr:
17159 .word 0 # 32 bit align gdt_desc.address
17160 ENTRY(early_gdt_descr)
17161 .word GDT_ENTRIES*8-1
17162- .long gdt_page /* Overwritten for secondary CPUs */
17163+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
17164
17165 /*
17166 * The boot_gdt must mirror the equivalent in setup.S and is
17167@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
17168 .align L1_CACHE_BYTES
17169 ENTRY(boot_gdt)
17170 .fill GDT_ENTRY_BOOT_CS,8,0
17171- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17172- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17173+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17174+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17175+
17176+ .align PAGE_SIZE_asm
17177+ENTRY(cpu_gdt_table)
17178+ .rept NR_CPUS
17179+ .quad 0x0000000000000000 /* NULL descriptor */
17180+ .quad 0x0000000000000000 /* 0x0b reserved */
17181+ .quad 0x0000000000000000 /* 0x13 reserved */
17182+ .quad 0x0000000000000000 /* 0x1b reserved */
17183+
17184+#ifdef CONFIG_PAX_KERNEXEC
17185+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17186+#else
17187+ .quad 0x0000000000000000 /* 0x20 unused */
17188+#endif
17189+
17190+ .quad 0x0000000000000000 /* 0x28 unused */
17191+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17192+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17193+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17194+ .quad 0x0000000000000000 /* 0x4b reserved */
17195+ .quad 0x0000000000000000 /* 0x53 reserved */
17196+ .quad 0x0000000000000000 /* 0x5b reserved */
17197+
17198+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17199+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17200+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17201+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17202+
17203+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17204+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17205+
17206+ /*
17207+ * Segments used for calling PnP BIOS have byte granularity.
17208+ * The code segments and data segments have fixed 64k limits,
17209+ * the transfer segment sizes are set at run time.
17210+ */
17211+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
17212+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
17213+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
17214+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
17215+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
17216+
17217+ /*
17218+ * The APM segments have byte granularity and their bases
17219+ * are set at run time. All have 64k limits.
17220+ */
17221+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17222+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17223+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
17224+
17225+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17226+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17227+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17228+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17229+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17230+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17231+
17232+ /* Be sure this is zeroed to avoid false validations in Xen */
17233+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17234+ .endr
17235diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17236index 40f4eb3..6d24d9d 100644
17237--- a/arch/x86/kernel/head_64.S
17238+++ b/arch/x86/kernel/head_64.S
17239@@ -19,6 +19,8 @@
17240 #include <asm/cache.h>
17241 #include <asm/processor-flags.h>
17242 #include <asm/percpu.h>
17243+#include <asm/cpufeature.h>
17244+#include <asm/alternative-asm.h>
17245
17246 #ifdef CONFIG_PARAVIRT
17247 #include <asm/asm-offsets.h>
17248@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17249 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17250 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17251 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17252+L4_VMALLOC_START = pgd_index(VMALLOC_START)
17253+L3_VMALLOC_START = pud_index(VMALLOC_START)
17254+L4_VMALLOC_END = pgd_index(VMALLOC_END)
17255+L3_VMALLOC_END = pud_index(VMALLOC_END)
17256+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17257+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17258
17259 .text
17260 __HEAD
17261@@ -85,35 +93,23 @@ startup_64:
17262 */
17263 addq %rbp, init_level4_pgt + 0(%rip)
17264 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17265+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17266+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17267+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17268 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17269
17270 addq %rbp, level3_ident_pgt + 0(%rip)
17271+#ifndef CONFIG_XEN
17272+ addq %rbp, level3_ident_pgt + 8(%rip)
17273+#endif
17274
17275- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17276- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17277+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17278+
17279+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17280+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17281
17282 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17283-
17284- /* Add an Identity mapping if I am above 1G */
17285- leaq _text(%rip), %rdi
17286- andq $PMD_PAGE_MASK, %rdi
17287-
17288- movq %rdi, %rax
17289- shrq $PUD_SHIFT, %rax
17290- andq $(PTRS_PER_PUD - 1), %rax
17291- jz ident_complete
17292-
17293- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17294- leaq level3_ident_pgt(%rip), %rbx
17295- movq %rdx, 0(%rbx, %rax, 8)
17296-
17297- movq %rdi, %rax
17298- shrq $PMD_SHIFT, %rax
17299- andq $(PTRS_PER_PMD - 1), %rax
17300- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17301- leaq level2_spare_pgt(%rip), %rbx
17302- movq %rdx, 0(%rbx, %rax, 8)
17303-ident_complete:
17304+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17305
17306 /*
17307 * Fixup the kernel text+data virtual addresses. Note that
17308@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
17309 * after the boot processor executes this code.
17310 */
17311
17312- /* Enable PAE mode and PGE */
17313- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17314+ /* Enable PAE mode and PSE/PGE */
17315+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17316 movq %rax, %cr4
17317
17318 /* Setup early boot stage 4 level pagetables. */
17319@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
17320 movl $MSR_EFER, %ecx
17321 rdmsr
17322 btsl $_EFER_SCE, %eax /* Enable System Call */
17323- btl $20,%edi /* No Execute supported? */
17324+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17325 jnc 1f
17326 btsl $_EFER_NX, %eax
17327+ leaq init_level4_pgt(%rip), %rdi
17328+#ifndef CONFIG_EFI
17329+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17330+#endif
17331+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17332+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17333+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17334+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
17335 1: wrmsr /* Make changes effective */
17336
17337 /* Setup cr0 */
17338@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
17339 * jump. In addition we need to ensure %cs is set so we make this
17340 * a far return.
17341 */
17342+ pax_set_fptr_mask
17343 movq initial_code(%rip),%rax
17344 pushq $0 # fake return address to stop unwinder
17345 pushq $__KERNEL_CS # set correct cs
17346@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
17347 bad_address:
17348 jmp bad_address
17349
17350- .section ".init.text","ax"
17351+ __INIT
17352 #ifdef CONFIG_EARLY_PRINTK
17353 .globl early_idt_handlers
17354 early_idt_handlers:
17355@@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
17356 #endif /* EARLY_PRINTK */
17357 1: hlt
17358 jmp 1b
17359+ .previous
17360
17361 #ifdef CONFIG_EARLY_PRINTK
17362+ __INITDATA
17363 early_recursion_flag:
17364 .long 0
17365+ .previous
17366
17367+ .section .rodata,"a",@progbits
17368 early_idt_msg:
17369 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17370 early_idt_ripmsg:
17371 .asciz "RIP %s\n"
17372+ .previous
17373 #endif /* CONFIG_EARLY_PRINTK */
17374- .previous
17375
17376+ .section .rodata,"a",@progbits
17377 #define NEXT_PAGE(name) \
17378 .balign PAGE_SIZE; \
17379 ENTRY(name)
17380@@ -338,7 +348,6 @@ ENTRY(name)
17381 i = i + 1 ; \
17382 .endr
17383
17384- .data
17385 /*
17386 * This default setting generates an ident mapping at address 0x100000
17387 * and a mapping for the kernel that precisely maps virtual address
17388@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
17389 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17390 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17391 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17392+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
17393+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17394+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
17395+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17396+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17397+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17398 .org init_level4_pgt + L4_START_KERNEL*8, 0
17399 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17400 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17401
17402+#ifdef CONFIG_PAX_PER_CPU_PGD
17403+NEXT_PAGE(cpu_pgd)
17404+ .rept NR_CPUS
17405+ .fill 512,8,0
17406+ .endr
17407+#endif
17408+
17409 NEXT_PAGE(level3_ident_pgt)
17410 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17411+#ifdef CONFIG_XEN
17412 .fill 511,8,0
17413+#else
17414+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17415+ .fill 510,8,0
17416+#endif
17417+
17418+NEXT_PAGE(level3_vmalloc_start_pgt)
17419+ .fill 512,8,0
17420+
17421+NEXT_PAGE(level3_vmalloc_end_pgt)
17422+ .fill 512,8,0
17423+
17424+NEXT_PAGE(level3_vmemmap_pgt)
17425+ .fill L3_VMEMMAP_START,8,0
17426+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17427
17428 NEXT_PAGE(level3_kernel_pgt)
17429 .fill L3_START_KERNEL,8,0
17430@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
17431 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17432 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17433
17434+NEXT_PAGE(level2_vmemmap_pgt)
17435+ .fill 512,8,0
17436+
17437 NEXT_PAGE(level2_fixmap_pgt)
17438- .fill 506,8,0
17439- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17440- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17441- .fill 5,8,0
17442+ .fill 507,8,0
17443+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17444+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17445+ .fill 4,8,0
17446
17447-NEXT_PAGE(level1_fixmap_pgt)
17448+NEXT_PAGE(level1_vsyscall_pgt)
17449 .fill 512,8,0
17450
17451-NEXT_PAGE(level2_ident_pgt)
17452- /* Since I easily can, map the first 1G.
17453+ /* Since I easily can, map the first 2G.
17454 * Don't set NX because code runs from these pages.
17455 */
17456- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17457+NEXT_PAGE(level2_ident_pgt)
17458+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17459
17460 NEXT_PAGE(level2_kernel_pgt)
17461 /*
17462@@ -389,37 +429,59 @@ NEXT_PAGE(level2_kernel_pgt)
17463 * If you want to increase this then increase MODULES_VADDR
17464 * too.)
17465 */
17466- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17467- KERNEL_IMAGE_SIZE/PMD_SIZE)
17468-
17469-NEXT_PAGE(level2_spare_pgt)
17470- .fill 512, 8, 0
17471+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17472
17473 #undef PMDS
17474 #undef NEXT_PAGE
17475
17476- .data
17477+ .align PAGE_SIZE
17478+ENTRY(cpu_gdt_table)
17479+ .rept NR_CPUS
17480+ .quad 0x0000000000000000 /* NULL descriptor */
17481+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17482+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
17483+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
17484+ .quad 0x00cffb000000ffff /* __USER32_CS */
17485+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17486+ .quad 0x00affb000000ffff /* __USER_CS */
17487+
17488+#ifdef CONFIG_PAX_KERNEXEC
17489+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17490+#else
17491+ .quad 0x0 /* unused */
17492+#endif
17493+
17494+ .quad 0,0 /* TSS */
17495+ .quad 0,0 /* LDT */
17496+ .quad 0,0,0 /* three TLS descriptors */
17497+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
17498+ /* asm/segment.h:GDT_ENTRIES must match this */
17499+
17500+ /* zero the remaining page */
17501+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17502+ .endr
17503+
17504 .align 16
17505 .globl early_gdt_descr
17506 early_gdt_descr:
17507 .word GDT_ENTRIES*8-1
17508 early_gdt_descr_base:
17509- .quad INIT_PER_CPU_VAR(gdt_page)
17510+ .quad cpu_gdt_table
17511
17512 ENTRY(phys_base)
17513 /* This must match the first entry in level2_kernel_pgt */
17514 .quad 0x0000000000000000
17515
17516 #include "../../x86/xen/xen-head.S"
17517-
17518- .section .bss, "aw", @nobits
17519+
17520+ .section .rodata,"a",@progbits
17521 .align L1_CACHE_BYTES
17522 ENTRY(idt_table)
17523- .skip IDT_ENTRIES * 16
17524+ .fill 512,8,0
17525
17526 .align L1_CACHE_BYTES
17527 ENTRY(nmi_idt_table)
17528- .skip IDT_ENTRIES * 16
17529+ .fill 512,8,0
17530
17531 __PAGE_ALIGNED_BSS
17532 .align PAGE_SIZE
17533diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17534index 9c3bd4a..e1d9b35 100644
17535--- a/arch/x86/kernel/i386_ksyms_32.c
17536+++ b/arch/x86/kernel/i386_ksyms_32.c
17537@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17538 EXPORT_SYMBOL(cmpxchg8b_emu);
17539 #endif
17540
17541+EXPORT_SYMBOL_GPL(cpu_gdt_table);
17542+
17543 /* Networking helper routines. */
17544 EXPORT_SYMBOL(csum_partial_copy_generic);
17545+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17546+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17547
17548 EXPORT_SYMBOL(__get_user_1);
17549 EXPORT_SYMBOL(__get_user_2);
17550@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17551
17552 EXPORT_SYMBOL(csum_partial);
17553 EXPORT_SYMBOL(empty_zero_page);
17554+
17555+#ifdef CONFIG_PAX_KERNEXEC
17556+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17557+#endif
17558diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
17559index 2d6e649..df6e1af 100644
17560--- a/arch/x86/kernel/i387.c
17561+++ b/arch/x86/kernel/i387.c
17562@@ -59,7 +59,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
17563 static inline bool interrupted_user_mode(void)
17564 {
17565 struct pt_regs *regs = get_irq_regs();
17566- return regs && user_mode_vm(regs);
17567+ return regs && user_mode(regs);
17568 }
17569
17570 /*
17571diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17572index 36d1853..bf25736 100644
17573--- a/arch/x86/kernel/i8259.c
17574+++ b/arch/x86/kernel/i8259.c
17575@@ -209,7 +209,7 @@ spurious_8259A_irq:
17576 "spurious 8259A interrupt: IRQ%d.\n", irq);
17577 spurious_irq_mask |= irqmask;
17578 }
17579- atomic_inc(&irq_err_count);
17580+ atomic_inc_unchecked(&irq_err_count);
17581 /*
17582 * Theoretically we do not have to handle this IRQ,
17583 * but in Linux this does not cause problems and is
17584diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17585index 43e9ccf..44ccf6f 100644
17586--- a/arch/x86/kernel/init_task.c
17587+++ b/arch/x86/kernel/init_task.c
17588@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17589 * way process stacks are handled. This is done by having a special
17590 * "init_task" linker map entry..
17591 */
17592-union thread_union init_thread_union __init_task_data =
17593- { INIT_THREAD_INFO(init_task) };
17594+union thread_union init_thread_union __init_task_data;
17595
17596 /*
17597 * Initial task structure.
17598@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17599 * section. Since TSS's are completely CPU-local, we want them
17600 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17601 */
17602-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17603-
17604+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17605+EXPORT_SYMBOL(init_tss);
17606diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17607index 8c96897..be66bfa 100644
17608--- a/arch/x86/kernel/ioport.c
17609+++ b/arch/x86/kernel/ioport.c
17610@@ -6,6 +6,7 @@
17611 #include <linux/sched.h>
17612 #include <linux/kernel.h>
17613 #include <linux/capability.h>
17614+#include <linux/security.h>
17615 #include <linux/errno.h>
17616 #include <linux/types.h>
17617 #include <linux/ioport.h>
17618@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17619
17620 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17621 return -EINVAL;
17622+#ifdef CONFIG_GRKERNSEC_IO
17623+ if (turn_on && grsec_disable_privio) {
17624+ gr_handle_ioperm();
17625+ return -EPERM;
17626+ }
17627+#endif
17628 if (turn_on && !capable(CAP_SYS_RAWIO))
17629 return -EPERM;
17630
17631@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17632 * because the ->io_bitmap_max value must match the bitmap
17633 * contents:
17634 */
17635- tss = &per_cpu(init_tss, get_cpu());
17636+ tss = init_tss + get_cpu();
17637
17638 if (turn_on)
17639 bitmap_clear(t->io_bitmap_ptr, from, num);
17640@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
17641 return -EINVAL;
17642 /* Trying to gain more privileges? */
17643 if (level > old) {
17644+#ifdef CONFIG_GRKERNSEC_IO
17645+ if (grsec_disable_privio) {
17646+ gr_handle_iopl();
17647+ return -EPERM;
17648+ }
17649+#endif
17650 if (!capable(CAP_SYS_RAWIO))
17651 return -EPERM;
17652 }
17653diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17654index 3dafc60..aa8e9c4 100644
17655--- a/arch/x86/kernel/irq.c
17656+++ b/arch/x86/kernel/irq.c
17657@@ -18,7 +18,7 @@
17658 #include <asm/mce.h>
17659 #include <asm/hw_irq.h>
17660
17661-atomic_t irq_err_count;
17662+atomic_unchecked_t irq_err_count;
17663
17664 /* Function pointer for generic interrupt vector handling */
17665 void (*x86_platform_ipi_callback)(void) = NULL;
17666@@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
17667 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17668 seq_printf(p, " Machine check polls\n");
17669 #endif
17670- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17671+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17672 #if defined(CONFIG_X86_IO_APIC)
17673- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17674+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17675 #endif
17676 return 0;
17677 }
17678@@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17679
17680 u64 arch_irq_stat(void)
17681 {
17682- u64 sum = atomic_read(&irq_err_count);
17683+ u64 sum = atomic_read_unchecked(&irq_err_count);
17684
17685 #ifdef CONFIG_X86_IO_APIC
17686- sum += atomic_read(&irq_mis_count);
17687+ sum += atomic_read_unchecked(&irq_mis_count);
17688 #endif
17689 return sum;
17690 }
17691diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17692index 58b7f27..e112d08 100644
17693--- a/arch/x86/kernel/irq_32.c
17694+++ b/arch/x86/kernel/irq_32.c
17695@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
17696 __asm__ __volatile__("andl %%esp,%0" :
17697 "=r" (sp) : "0" (THREAD_SIZE - 1));
17698
17699- return sp < (sizeof(struct thread_info) + STACK_WARN);
17700+ return sp < STACK_WARN;
17701 }
17702
17703 static void print_stack_overflow(void)
17704@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
17705 * per-CPU IRQ handling contexts (thread information and stack)
17706 */
17707 union irq_ctx {
17708- struct thread_info tinfo;
17709- u32 stack[THREAD_SIZE/sizeof(u32)];
17710+ unsigned long previous_esp;
17711+ u32 stack[THREAD_SIZE/sizeof(u32)];
17712 } __attribute__((aligned(THREAD_SIZE)));
17713
17714 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17715@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
17716 static inline int
17717 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17718 {
17719- union irq_ctx *curctx, *irqctx;
17720+ union irq_ctx *irqctx;
17721 u32 *isp, arg1, arg2;
17722
17723- curctx = (union irq_ctx *) current_thread_info();
17724 irqctx = __this_cpu_read(hardirq_ctx);
17725
17726 /*
17727@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17728 * handler) we can't do that and just have to keep using the
17729 * current stack (which is the irq stack already after all)
17730 */
17731- if (unlikely(curctx == irqctx))
17732+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17733 return 0;
17734
17735 /* build the stack frame on the IRQ stack */
17736- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17737- irqctx->tinfo.task = curctx->tinfo.task;
17738- irqctx->tinfo.previous_esp = current_stack_pointer;
17739+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17740+ irqctx->previous_esp = current_stack_pointer;
17741
17742- /* Copy the preempt_count so that the [soft]irq checks work. */
17743- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
17744+#ifdef CONFIG_PAX_MEMORY_UDEREF
17745+ __set_fs(MAKE_MM_SEG(0));
17746+#endif
17747
17748 if (unlikely(overflow))
17749 call_on_stack(print_stack_overflow, isp);
17750@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17751 : "0" (irq), "1" (desc), "2" (isp),
17752 "D" (desc->handle_irq)
17753 : "memory", "cc", "ecx");
17754+
17755+#ifdef CONFIG_PAX_MEMORY_UDEREF
17756+ __set_fs(current_thread_info()->addr_limit);
17757+#endif
17758+
17759 return 1;
17760 }
17761
17762@@ -121,29 +125,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17763 */
17764 void __cpuinit irq_ctx_init(int cpu)
17765 {
17766- union irq_ctx *irqctx;
17767-
17768 if (per_cpu(hardirq_ctx, cpu))
17769 return;
17770
17771- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17772- THREAD_FLAGS,
17773- THREAD_ORDER));
17774- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17775- irqctx->tinfo.cpu = cpu;
17776- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17777- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17778-
17779- per_cpu(hardirq_ctx, cpu) = irqctx;
17780-
17781- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17782- THREAD_FLAGS,
17783- THREAD_ORDER));
17784- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17785- irqctx->tinfo.cpu = cpu;
17786- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17787-
17788- per_cpu(softirq_ctx, cpu) = irqctx;
17789+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17790+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17791
17792 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17793 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17794@@ -152,7 +138,6 @@ void __cpuinit irq_ctx_init(int cpu)
17795 asmlinkage void do_softirq(void)
17796 {
17797 unsigned long flags;
17798- struct thread_info *curctx;
17799 union irq_ctx *irqctx;
17800 u32 *isp;
17801
17802@@ -162,15 +147,22 @@ asmlinkage void do_softirq(void)
17803 local_irq_save(flags);
17804
17805 if (local_softirq_pending()) {
17806- curctx = current_thread_info();
17807 irqctx = __this_cpu_read(softirq_ctx);
17808- irqctx->tinfo.task = curctx->task;
17809- irqctx->tinfo.previous_esp = current_stack_pointer;
17810+ irqctx->previous_esp = current_stack_pointer;
17811
17812 /* build the stack frame on the softirq stack */
17813- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17814+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17815+
17816+#ifdef CONFIG_PAX_MEMORY_UDEREF
17817+ __set_fs(MAKE_MM_SEG(0));
17818+#endif
17819
17820 call_on_stack(__do_softirq, isp);
17821+
17822+#ifdef CONFIG_PAX_MEMORY_UDEREF
17823+ __set_fs(current_thread_info()->addr_limit);
17824+#endif
17825+
17826 /*
17827 * Shouldn't happen, we returned above if in_interrupt():
17828 */
17829@@ -191,7 +183,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
17830 if (unlikely(!desc))
17831 return false;
17832
17833- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17834+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17835 if (unlikely(overflow))
17836 print_stack_overflow();
17837 desc->handle_irq(irq, desc);
17838diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
17839index d04d3ec..ea4b374 100644
17840--- a/arch/x86/kernel/irq_64.c
17841+++ b/arch/x86/kernel/irq_64.c
17842@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17843 u64 estack_top, estack_bottom;
17844 u64 curbase = (u64)task_stack_page(current);
17845
17846- if (user_mode_vm(regs))
17847+ if (user_mode(regs))
17848 return;
17849
17850 if (regs->sp >= curbase + sizeof(struct thread_info) +
17851diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
17852index 1d5d31e..ab846ed 100644
17853--- a/arch/x86/kernel/kdebugfs.c
17854+++ b/arch/x86/kernel/kdebugfs.c
17855@@ -28,6 +28,8 @@ struct setup_data_node {
17856 };
17857
17858 static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17859+ size_t count, loff_t *ppos) __size_overflow(3);
17860+static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17861 size_t count, loff_t *ppos)
17862 {
17863 struct setup_data_node *node = file->private_data;
17864diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17865index 8bfb614..2b3b35f 100644
17866--- a/arch/x86/kernel/kgdb.c
17867+++ b/arch/x86/kernel/kgdb.c
17868@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
17869 #ifdef CONFIG_X86_32
17870 switch (regno) {
17871 case GDB_SS:
17872- if (!user_mode_vm(regs))
17873+ if (!user_mode(regs))
17874 *(unsigned long *)mem = __KERNEL_DS;
17875 break;
17876 case GDB_SP:
17877- if (!user_mode_vm(regs))
17878+ if (!user_mode(regs))
17879 *(unsigned long *)mem = kernel_stack_pointer(regs);
17880 break;
17881 case GDB_GS:
17882@@ -476,12 +476,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17883 case 'k':
17884 /* clear the trace bit */
17885 linux_regs->flags &= ~X86_EFLAGS_TF;
17886- atomic_set(&kgdb_cpu_doing_single_step, -1);
17887+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17888
17889 /* set the trace bit if we're stepping */
17890 if (remcomInBuffer[0] == 's') {
17891 linux_regs->flags |= X86_EFLAGS_TF;
17892- atomic_set(&kgdb_cpu_doing_single_step,
17893+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17894 raw_smp_processor_id());
17895 }
17896
17897@@ -546,7 +546,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17898
17899 switch (cmd) {
17900 case DIE_DEBUG:
17901- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
17902+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
17903 if (user_mode(regs))
17904 return single_step_cont(regs, args);
17905 break;
17906diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
17907index c5e410e..da6aaf9 100644
17908--- a/arch/x86/kernel/kprobes-opt.c
17909+++ b/arch/x86/kernel/kprobes-opt.c
17910@@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17911 * Verify if the address gap is in 2GB range, because this uses
17912 * a relative jump.
17913 */
17914- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
17915+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
17916 if (abs(rel) > 0x7fffffff)
17917 return -ERANGE;
17918
17919@@ -359,11 +359,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17920 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
17921
17922 /* Set probe function call */
17923- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
17924+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
17925
17926 /* Set returning jmp instruction at the tail of out-of-line buffer */
17927 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
17928- (u8 *)op->kp.addr + op->optinsn.size);
17929+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
17930
17931 flush_icache_range((unsigned long) buf,
17932 (unsigned long) buf + TMPL_END_IDX +
17933@@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
17934 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
17935
17936 /* Backup instructions which will be replaced by jump address */
17937- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
17938+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
17939 RELATIVE_ADDR_SIZE);
17940
17941 insn_buf[0] = RELATIVEJUMP_OPCODE;
17942diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17943index e213fc8..d783ba4 100644
17944--- a/arch/x86/kernel/kprobes.c
17945+++ b/arch/x86/kernel/kprobes.c
17946@@ -120,8 +120,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
17947 } __attribute__((packed)) *insn;
17948
17949 insn = (struct __arch_relative_insn *)from;
17950+
17951+ pax_open_kernel();
17952 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
17953 insn->op = op;
17954+ pax_close_kernel();
17955 }
17956
17957 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
17958@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
17959 kprobe_opcode_t opcode;
17960 kprobe_opcode_t *orig_opcodes = opcodes;
17961
17962- if (search_exception_tables((unsigned long)opcodes))
17963+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17964 return 0; /* Page fault may occur on this address. */
17965
17966 retry:
17967@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17968 /* Another subsystem puts a breakpoint, failed to recover */
17969 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
17970 return 0;
17971+ pax_open_kernel();
17972 memcpy(dest, insn.kaddr, insn.length);
17973+ pax_close_kernel();
17974
17975 #ifdef CONFIG_X86_64
17976 if (insn_rip_relative(&insn)) {
17977@@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17978 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
17979 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
17980 disp = (u8 *) dest + insn_offset_displacement(&insn);
17981+ pax_open_kernel();
17982 *(s32 *) disp = (s32) newdisp;
17983+ pax_close_kernel();
17984 }
17985 #endif
17986 return insn.length;
17987@@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17988 * nor set current_kprobe, because it doesn't use single
17989 * stepping.
17990 */
17991- regs->ip = (unsigned long)p->ainsn.insn;
17992+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17993 preempt_enable_no_resched();
17994 return;
17995 }
17996@@ -504,7 +511,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17997 if (p->opcode == BREAKPOINT_INSTRUCTION)
17998 regs->ip = (unsigned long)p->addr;
17999 else
18000- regs->ip = (unsigned long)p->ainsn.insn;
18001+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18002 }
18003
18004 /*
18005@@ -583,7 +590,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
18006 setup_singlestep(p, regs, kcb, 0);
18007 return 1;
18008 }
18009- } else if (*addr != BREAKPOINT_INSTRUCTION) {
18010+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
18011 /*
18012 * The breakpoint instruction was removed right
18013 * after we hit it. Another cpu has removed
18014@@ -628,6 +635,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
18015 " movq %rax, 152(%rsp)\n"
18016 RESTORE_REGS_STRING
18017 " popfq\n"
18018+#ifdef KERNEXEC_PLUGIN
18019+ " btsq $63,(%rsp)\n"
18020+#endif
18021 #else
18022 " pushf\n"
18023 SAVE_REGS_STRING
18024@@ -765,7 +775,7 @@ static void __kprobes
18025 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
18026 {
18027 unsigned long *tos = stack_addr(regs);
18028- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
18029+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
18030 unsigned long orig_ip = (unsigned long)p->addr;
18031 kprobe_opcode_t *insn = p->ainsn.insn;
18032
18033@@ -947,7 +957,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
18034 struct die_args *args = data;
18035 int ret = NOTIFY_DONE;
18036
18037- if (args->regs && user_mode_vm(args->regs))
18038+ if (args->regs && user_mode(args->regs))
18039 return ret;
18040
18041 switch (val) {
18042diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
18043index ebc9873..1b9724b 100644
18044--- a/arch/x86/kernel/ldt.c
18045+++ b/arch/x86/kernel/ldt.c
18046@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
18047 if (reload) {
18048 #ifdef CONFIG_SMP
18049 preempt_disable();
18050- load_LDT(pc);
18051+ load_LDT_nolock(pc);
18052 if (!cpumask_equal(mm_cpumask(current->mm),
18053 cpumask_of(smp_processor_id())))
18054 smp_call_function(flush_ldt, current->mm, 1);
18055 preempt_enable();
18056 #else
18057- load_LDT(pc);
18058+ load_LDT_nolock(pc);
18059 #endif
18060 }
18061 if (oldsize) {
18062@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18063 return err;
18064
18065 for (i = 0; i < old->size; i++)
18066- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18067+ write_ldt_entry(new->ldt, i, old->ldt + i);
18068 return 0;
18069 }
18070
18071@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18072 retval = copy_ldt(&mm->context, &old_mm->context);
18073 mutex_unlock(&old_mm->context.lock);
18074 }
18075+
18076+ if (tsk == current) {
18077+ mm->context.vdso = 0;
18078+
18079+#ifdef CONFIG_X86_32
18080+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18081+ mm->context.user_cs_base = 0UL;
18082+ mm->context.user_cs_limit = ~0UL;
18083+
18084+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18085+ cpus_clear(mm->context.cpu_user_cs_mask);
18086+#endif
18087+
18088+#endif
18089+#endif
18090+
18091+ }
18092+
18093 return retval;
18094 }
18095
18096@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18097 }
18098 }
18099
18100+#ifdef CONFIG_PAX_SEGMEXEC
18101+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18102+ error = -EINVAL;
18103+ goto out_unlock;
18104+ }
18105+#endif
18106+
18107 fill_ldt(&ldt, &ldt_info);
18108 if (oldmode)
18109 ldt.avl = 0;
18110diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18111index 5b19e4d..6476a76 100644
18112--- a/arch/x86/kernel/machine_kexec_32.c
18113+++ b/arch/x86/kernel/machine_kexec_32.c
18114@@ -26,7 +26,7 @@
18115 #include <asm/cacheflush.h>
18116 #include <asm/debugreg.h>
18117
18118-static void set_idt(void *newidt, __u16 limit)
18119+static void set_idt(struct desc_struct *newidt, __u16 limit)
18120 {
18121 struct desc_ptr curidt;
18122
18123@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18124 }
18125
18126
18127-static void set_gdt(void *newgdt, __u16 limit)
18128+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18129 {
18130 struct desc_ptr curgdt;
18131
18132@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
18133 }
18134
18135 control_page = page_address(image->control_code_page);
18136- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18137+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18138
18139 relocate_kernel_ptr = control_page;
18140 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18141diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18142index 0327e2b..e43737b 100644
18143--- a/arch/x86/kernel/microcode_intel.c
18144+++ b/arch/x86/kernel/microcode_intel.c
18145@@ -430,13 +430,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18146
18147 static int get_ucode_user(void *to, const void *from, size_t n)
18148 {
18149- return copy_from_user(to, from, n);
18150+ return copy_from_user(to, (const void __force_user *)from, n);
18151 }
18152
18153 static enum ucode_state
18154 request_microcode_user(int cpu, const void __user *buf, size_t size)
18155 {
18156- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18157+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18158 }
18159
18160 static void microcode_fini_cpu(int cpu)
18161diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18162index f21fd94..61565cd 100644
18163--- a/arch/x86/kernel/module.c
18164+++ b/arch/x86/kernel/module.c
18165@@ -35,15 +35,60 @@
18166 #define DEBUGP(fmt...)
18167 #endif
18168
18169-void *module_alloc(unsigned long size)
18170+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
18171 {
18172- if (PAGE_ALIGN(size) > MODULES_LEN)
18173+ if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
18174 return NULL;
18175 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
18176- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
18177+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
18178 -1, __builtin_return_address(0));
18179 }
18180
18181+void *module_alloc(unsigned long size)
18182+{
18183+
18184+#ifdef CONFIG_PAX_KERNEXEC
18185+ return __module_alloc(size, PAGE_KERNEL);
18186+#else
18187+ return __module_alloc(size, PAGE_KERNEL_EXEC);
18188+#endif
18189+
18190+}
18191+
18192+#ifdef CONFIG_PAX_KERNEXEC
18193+#ifdef CONFIG_X86_32
18194+void *module_alloc_exec(unsigned long size)
18195+{
18196+ struct vm_struct *area;
18197+
18198+ if (size == 0)
18199+ return NULL;
18200+
18201+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18202+ return area ? area->addr : NULL;
18203+}
18204+EXPORT_SYMBOL(module_alloc_exec);
18205+
18206+void module_free_exec(struct module *mod, void *module_region)
18207+{
18208+ vunmap(module_region);
18209+}
18210+EXPORT_SYMBOL(module_free_exec);
18211+#else
18212+void module_free_exec(struct module *mod, void *module_region)
18213+{
18214+ module_free(mod, module_region);
18215+}
18216+EXPORT_SYMBOL(module_free_exec);
18217+
18218+void *module_alloc_exec(unsigned long size)
18219+{
18220+ return __module_alloc(size, PAGE_KERNEL_RX);
18221+}
18222+EXPORT_SYMBOL(module_alloc_exec);
18223+#endif
18224+#endif
18225+
18226 #ifdef CONFIG_X86_32
18227 int apply_relocate(Elf32_Shdr *sechdrs,
18228 const char *strtab,
18229@@ -54,14 +99,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18230 unsigned int i;
18231 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18232 Elf32_Sym *sym;
18233- uint32_t *location;
18234+ uint32_t *plocation, location;
18235
18236 DEBUGP("Applying relocate section %u to %u\n", relsec,
18237 sechdrs[relsec].sh_info);
18238 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18239 /* This is where to make the change */
18240- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18241- + rel[i].r_offset;
18242+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18243+ location = (uint32_t)plocation;
18244+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18245+ plocation = ktla_ktva((void *)plocation);
18246 /* This is the symbol it is referring to. Note that all
18247 undefined symbols have been resolved. */
18248 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18249@@ -70,11 +117,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18250 switch (ELF32_R_TYPE(rel[i].r_info)) {
18251 case R_386_32:
18252 /* We add the value into the location given */
18253- *location += sym->st_value;
18254+ pax_open_kernel();
18255+ *plocation += sym->st_value;
18256+ pax_close_kernel();
18257 break;
18258 case R_386_PC32:
18259 /* Add the value, subtract its postition */
18260- *location += sym->st_value - (uint32_t)location;
18261+ pax_open_kernel();
18262+ *plocation += sym->st_value - location;
18263+ pax_close_kernel();
18264 break;
18265 default:
18266 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18267@@ -119,21 +170,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18268 case R_X86_64_NONE:
18269 break;
18270 case R_X86_64_64:
18271+ pax_open_kernel();
18272 *(u64 *)loc = val;
18273+ pax_close_kernel();
18274 break;
18275 case R_X86_64_32:
18276+ pax_open_kernel();
18277 *(u32 *)loc = val;
18278+ pax_close_kernel();
18279 if (val != *(u32 *)loc)
18280 goto overflow;
18281 break;
18282 case R_X86_64_32S:
18283+ pax_open_kernel();
18284 *(s32 *)loc = val;
18285+ pax_close_kernel();
18286 if ((s64)val != *(s32 *)loc)
18287 goto overflow;
18288 break;
18289 case R_X86_64_PC32:
18290 val -= (u64)loc;
18291+ pax_open_kernel();
18292 *(u32 *)loc = val;
18293+ pax_close_kernel();
18294+
18295 #if 0
18296 if ((s64)val != *(s32 *)loc)
18297 goto overflow;
18298diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
18299index 32856fa..ce95eaa 100644
18300--- a/arch/x86/kernel/nmi.c
18301+++ b/arch/x86/kernel/nmi.c
18302@@ -507,6 +507,17 @@ static inline void nmi_nesting_postprocess(void)
18303 dotraplinkage notrace __kprobes void
18304 do_nmi(struct pt_regs *regs, long error_code)
18305 {
18306+
18307+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18308+ if (!user_mode(regs)) {
18309+ unsigned long cs = regs->cs & 0xFFFF;
18310+ unsigned long ip = ktva_ktla(regs->ip);
18311+
18312+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
18313+ regs->ip = ip;
18314+ }
18315+#endif
18316+
18317 nmi_nesting_preprocess(regs);
18318
18319 nmi_enter();
18320diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18321index 676b8c7..870ba04 100644
18322--- a/arch/x86/kernel/paravirt-spinlocks.c
18323+++ b/arch/x86/kernel/paravirt-spinlocks.c
18324@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
18325 arch_spin_lock(lock);
18326 }
18327
18328-struct pv_lock_ops pv_lock_ops = {
18329+struct pv_lock_ops pv_lock_ops __read_only = {
18330 #ifdef CONFIG_SMP
18331 .spin_is_locked = __ticket_spin_is_locked,
18332 .spin_is_contended = __ticket_spin_is_contended,
18333diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18334index ab13760..01218e0 100644
18335--- a/arch/x86/kernel/paravirt.c
18336+++ b/arch/x86/kernel/paravirt.c
18337@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
18338 {
18339 return x;
18340 }
18341+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18342+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18343+#endif
18344
18345 void __init default_banner(void)
18346 {
18347@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18348 if (opfunc == NULL)
18349 /* If there's no function, patch it with a ud2a (BUG) */
18350 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18351- else if (opfunc == _paravirt_nop)
18352+ else if (opfunc == (void *)_paravirt_nop)
18353 /* If the operation is a nop, then nop the callsite */
18354 ret = paravirt_patch_nop();
18355
18356 /* identity functions just return their single argument */
18357- else if (opfunc == _paravirt_ident_32)
18358+ else if (opfunc == (void *)_paravirt_ident_32)
18359 ret = paravirt_patch_ident_32(insnbuf, len);
18360- else if (opfunc == _paravirt_ident_64)
18361+ else if (opfunc == (void *)_paravirt_ident_64)
18362 ret = paravirt_patch_ident_64(insnbuf, len);
18363+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18364+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18365+ ret = paravirt_patch_ident_64(insnbuf, len);
18366+#endif
18367
18368 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18369 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18370@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18371 if (insn_len > len || start == NULL)
18372 insn_len = len;
18373 else
18374- memcpy(insnbuf, start, insn_len);
18375+ memcpy(insnbuf, ktla_ktva(start), insn_len);
18376
18377 return insn_len;
18378 }
18379@@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
18380 preempt_enable();
18381 }
18382
18383-struct pv_info pv_info = {
18384+struct pv_info pv_info __read_only = {
18385 .name = "bare hardware",
18386 .paravirt_enabled = 0,
18387 .kernel_rpl = 0,
18388@@ -315,16 +322,16 @@ struct pv_info pv_info = {
18389 #endif
18390 };
18391
18392-struct pv_init_ops pv_init_ops = {
18393+struct pv_init_ops pv_init_ops __read_only = {
18394 .patch = native_patch,
18395 };
18396
18397-struct pv_time_ops pv_time_ops = {
18398+struct pv_time_ops pv_time_ops __read_only = {
18399 .sched_clock = native_sched_clock,
18400 .steal_clock = native_steal_clock,
18401 };
18402
18403-struct pv_irq_ops pv_irq_ops = {
18404+struct pv_irq_ops pv_irq_ops __read_only = {
18405 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18406 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18407 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18408@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
18409 #endif
18410 };
18411
18412-struct pv_cpu_ops pv_cpu_ops = {
18413+struct pv_cpu_ops pv_cpu_ops __read_only = {
18414 .cpuid = native_cpuid,
18415 .get_debugreg = native_get_debugreg,
18416 .set_debugreg = native_set_debugreg,
18417@@ -397,21 +404,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18418 .end_context_switch = paravirt_nop,
18419 };
18420
18421-struct pv_apic_ops pv_apic_ops = {
18422+struct pv_apic_ops pv_apic_ops __read_only = {
18423 #ifdef CONFIG_X86_LOCAL_APIC
18424 .startup_ipi_hook = paravirt_nop,
18425 #endif
18426 };
18427
18428-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18429+#ifdef CONFIG_X86_32
18430+#ifdef CONFIG_X86_PAE
18431+/* 64-bit pagetable entries */
18432+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18433+#else
18434 /* 32-bit pagetable entries */
18435 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18436+#endif
18437 #else
18438 /* 64-bit pagetable entries */
18439 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18440 #endif
18441
18442-struct pv_mmu_ops pv_mmu_ops = {
18443+struct pv_mmu_ops pv_mmu_ops __read_only = {
18444
18445 .read_cr2 = native_read_cr2,
18446 .write_cr2 = native_write_cr2,
18447@@ -461,6 +473,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18448 .make_pud = PTE_IDENT,
18449
18450 .set_pgd = native_set_pgd,
18451+ .set_pgd_batched = native_set_pgd_batched,
18452 #endif
18453 #endif /* PAGETABLE_LEVELS >= 3 */
18454
18455@@ -480,6 +493,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18456 },
18457
18458 .set_fixmap = native_set_fixmap,
18459+
18460+#ifdef CONFIG_PAX_KERNEXEC
18461+ .pax_open_kernel = native_pax_open_kernel,
18462+ .pax_close_kernel = native_pax_close_kernel,
18463+#endif
18464+
18465 };
18466
18467 EXPORT_SYMBOL_GPL(pv_time_ops);
18468diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
18469index 35ccf75..7a15747 100644
18470--- a/arch/x86/kernel/pci-iommu_table.c
18471+++ b/arch/x86/kernel/pci-iommu_table.c
18472@@ -2,7 +2,7 @@
18473 #include <asm/iommu_table.h>
18474 #include <linux/string.h>
18475 #include <linux/kallsyms.h>
18476-
18477+#include <linux/sched.h>
18478
18479 #define DEBUG 1
18480
18481diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18482index 1d92a5a..7bc8c29 100644
18483--- a/arch/x86/kernel/process.c
18484+++ b/arch/x86/kernel/process.c
18485@@ -69,16 +69,33 @@ void free_thread_xstate(struct task_struct *tsk)
18486
18487 void free_thread_info(struct thread_info *ti)
18488 {
18489- free_thread_xstate(ti->task);
18490 free_pages((unsigned long)ti, THREAD_ORDER);
18491 }
18492
18493+static struct kmem_cache *task_struct_cachep;
18494+
18495 void arch_task_cache_init(void)
18496 {
18497- task_xstate_cachep =
18498- kmem_cache_create("task_xstate", xstate_size,
18499+ /* create a slab on which task_structs can be allocated */
18500+ task_struct_cachep =
18501+ kmem_cache_create("task_struct", sizeof(struct task_struct),
18502+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18503+
18504+ task_xstate_cachep =
18505+ kmem_cache_create("task_xstate", xstate_size,
18506 __alignof__(union thread_xstate),
18507- SLAB_PANIC | SLAB_NOTRACK, NULL);
18508+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18509+}
18510+
18511+struct task_struct *alloc_task_struct_node(int node)
18512+{
18513+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
18514+}
18515+
18516+void free_task_struct(struct task_struct *task)
18517+{
18518+ free_thread_xstate(task);
18519+ kmem_cache_free(task_struct_cachep, task);
18520 }
18521
18522 /*
18523@@ -91,7 +108,7 @@ void exit_thread(void)
18524 unsigned long *bp = t->io_bitmap_ptr;
18525
18526 if (bp) {
18527- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18528+ struct tss_struct *tss = init_tss + get_cpu();
18529
18530 t->io_bitmap_ptr = NULL;
18531 clear_thread_flag(TIF_IO_BITMAP);
18532@@ -127,7 +144,7 @@ void show_regs_common(void)
18533
18534 printk(KERN_CONT "\n");
18535 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
18536- current->pid, current->comm, print_tainted(),
18537+ task_pid_nr(current), current->comm, print_tainted(),
18538 init_utsname()->release,
18539 (int)strcspn(init_utsname()->version, " "),
18540 init_utsname()->version);
18541@@ -141,6 +158,9 @@ void flush_thread(void)
18542 {
18543 struct task_struct *tsk = current;
18544
18545+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18546+ loadsegment(gs, 0);
18547+#endif
18548 flush_ptrace_hw_breakpoint(tsk);
18549 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
18550 /*
18551@@ -303,10 +323,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18552 regs.di = (unsigned long) arg;
18553
18554 #ifdef CONFIG_X86_32
18555- regs.ds = __USER_DS;
18556- regs.es = __USER_DS;
18557+ regs.ds = __KERNEL_DS;
18558+ regs.es = __KERNEL_DS;
18559 regs.fs = __KERNEL_PERCPU;
18560- regs.gs = __KERNEL_STACK_CANARY;
18561+ savesegment(gs, regs.gs);
18562 #else
18563 regs.ss = __KERNEL_DS;
18564 #endif
18565@@ -392,7 +412,7 @@ static void __exit_idle(void)
18566 void exit_idle(void)
18567 {
18568 /* idle loop has pid 0 */
18569- if (current->pid)
18570+ if (task_pid_nr(current))
18571 return;
18572 __exit_idle();
18573 }
18574@@ -501,7 +521,7 @@ bool set_pm_idle_to_default(void)
18575
18576 return ret;
18577 }
18578-void stop_this_cpu(void *dummy)
18579+__noreturn void stop_this_cpu(void *dummy)
18580 {
18581 local_irq_disable();
18582 /*
18583@@ -743,16 +763,37 @@ static int __init idle_setup(char *str)
18584 }
18585 early_param("idle", idle_setup);
18586
18587-unsigned long arch_align_stack(unsigned long sp)
18588+#ifdef CONFIG_PAX_RANDKSTACK
18589+void pax_randomize_kstack(struct pt_regs *regs)
18590 {
18591- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18592- sp -= get_random_int() % 8192;
18593- return sp & ~0xf;
18594-}
18595+ struct thread_struct *thread = &current->thread;
18596+ unsigned long time;
18597
18598-unsigned long arch_randomize_brk(struct mm_struct *mm)
18599-{
18600- unsigned long range_end = mm->brk + 0x02000000;
18601- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18602-}
18603+ if (!randomize_va_space)
18604+ return;
18605+
18606+ if (v8086_mode(regs))
18607+ return;
18608
18609+ rdtscl(time);
18610+
18611+ /* P4 seems to return a 0 LSB, ignore it */
18612+#ifdef CONFIG_MPENTIUM4
18613+ time &= 0x3EUL;
18614+ time <<= 2;
18615+#elif defined(CONFIG_X86_64)
18616+ time &= 0xFUL;
18617+ time <<= 4;
18618+#else
18619+ time &= 0x1FUL;
18620+ time <<= 3;
18621+#endif
18622+
18623+ thread->sp0 ^= time;
18624+ load_sp0(init_tss + smp_processor_id(), thread);
18625+
18626+#ifdef CONFIG_X86_64
18627+ percpu_write(kernel_stack, thread->sp0);
18628+#endif
18629+}
18630+#endif
18631diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18632index ae68473..7b0bb71 100644
18633--- a/arch/x86/kernel/process_32.c
18634+++ b/arch/x86/kernel/process_32.c
18635@@ -64,6 +64,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18636 unsigned long thread_saved_pc(struct task_struct *tsk)
18637 {
18638 return ((unsigned long *)tsk->thread.sp)[3];
18639+//XXX return tsk->thread.eip;
18640 }
18641
18642 void __show_regs(struct pt_regs *regs, int all)
18643@@ -73,15 +74,14 @@ void __show_regs(struct pt_regs *regs, int all)
18644 unsigned long sp;
18645 unsigned short ss, gs;
18646
18647- if (user_mode_vm(regs)) {
18648+ if (user_mode(regs)) {
18649 sp = regs->sp;
18650 ss = regs->ss & 0xffff;
18651- gs = get_user_gs(regs);
18652 } else {
18653 sp = kernel_stack_pointer(regs);
18654 savesegment(ss, ss);
18655- savesegment(gs, gs);
18656 }
18657+ gs = get_user_gs(regs);
18658
18659 show_regs_common();
18660
18661@@ -143,13 +143,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18662 struct task_struct *tsk;
18663 int err;
18664
18665- childregs = task_pt_regs(p);
18666+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18667 *childregs = *regs;
18668 childregs->ax = 0;
18669 childregs->sp = sp;
18670
18671 p->thread.sp = (unsigned long) childregs;
18672 p->thread.sp0 = (unsigned long) (childregs+1);
18673+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18674
18675 p->thread.ip = (unsigned long) ret_from_fork;
18676
18677@@ -240,7 +241,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18678 struct thread_struct *prev = &prev_p->thread,
18679 *next = &next_p->thread;
18680 int cpu = smp_processor_id();
18681- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18682+ struct tss_struct *tss = init_tss + cpu;
18683 fpu_switch_t fpu;
18684
18685 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18686@@ -264,6 +265,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18687 */
18688 lazy_save_gs(prev->gs);
18689
18690+#ifdef CONFIG_PAX_MEMORY_UDEREF
18691+ __set_fs(task_thread_info(next_p)->addr_limit);
18692+#endif
18693+
18694 /*
18695 * Load the per-thread Thread-Local Storage descriptor.
18696 */
18697@@ -294,6 +299,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18698 */
18699 arch_end_context_switch(next_p);
18700
18701+ percpu_write(current_task, next_p);
18702+ percpu_write(current_tinfo, &next_p->tinfo);
18703+
18704 /*
18705 * Restore %gs if needed (which is common)
18706 */
18707@@ -302,8 +310,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18708
18709 switch_fpu_finish(next_p, fpu);
18710
18711- percpu_write(current_task, next_p);
18712-
18713 return prev_p;
18714 }
18715
18716@@ -333,4 +339,3 @@ unsigned long get_wchan(struct task_struct *p)
18717 } while (count++ < 16);
18718 return 0;
18719 }
18720-
18721diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18722index 43d8b48..c45d566 100644
18723--- a/arch/x86/kernel/process_64.c
18724+++ b/arch/x86/kernel/process_64.c
18725@@ -162,8 +162,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18726 struct pt_regs *childregs;
18727 struct task_struct *me = current;
18728
18729- childregs = ((struct pt_regs *)
18730- (THREAD_SIZE + task_stack_page(p))) - 1;
18731+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18732 *childregs = *regs;
18733
18734 childregs->ax = 0;
18735@@ -175,6 +174,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18736 p->thread.sp = (unsigned long) childregs;
18737 p->thread.sp0 = (unsigned long) (childregs+1);
18738 p->thread.usersp = me->thread.usersp;
18739+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18740
18741 set_tsk_thread_flag(p, TIF_FORK);
18742
18743@@ -280,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18744 struct thread_struct *prev = &prev_p->thread;
18745 struct thread_struct *next = &next_p->thread;
18746 int cpu = smp_processor_id();
18747- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18748+ struct tss_struct *tss = init_tss + cpu;
18749 unsigned fsindex, gsindex;
18750 fpu_switch_t fpu;
18751
18752@@ -362,10 +362,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18753 prev->usersp = percpu_read(old_rsp);
18754 percpu_write(old_rsp, next->usersp);
18755 percpu_write(current_task, next_p);
18756+ percpu_write(current_tinfo, &next_p->tinfo);
18757
18758- percpu_write(kernel_stack,
18759- (unsigned long)task_stack_page(next_p) +
18760- THREAD_SIZE - KERNEL_STACK_OFFSET);
18761+ percpu_write(kernel_stack, next->sp0);
18762
18763 /*
18764 * Now maybe reload the debug registers and handle I/O bitmaps
18765@@ -434,12 +433,11 @@ unsigned long get_wchan(struct task_struct *p)
18766 if (!p || p == current || p->state == TASK_RUNNING)
18767 return 0;
18768 stack = (unsigned long)task_stack_page(p);
18769- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18770+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18771 return 0;
18772 fp = *(u64 *)(p->thread.sp);
18773 do {
18774- if (fp < (unsigned long)stack ||
18775- fp >= (unsigned long)stack+THREAD_SIZE)
18776+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18777 return 0;
18778 ip = *(u64 *)(fp+8);
18779 if (!in_sched_functions(ip))
18780diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18781index cf11783..e7ce551 100644
18782--- a/arch/x86/kernel/ptrace.c
18783+++ b/arch/x86/kernel/ptrace.c
18784@@ -824,7 +824,7 @@ long arch_ptrace(struct task_struct *child, long request,
18785 unsigned long addr, unsigned long data)
18786 {
18787 int ret;
18788- unsigned long __user *datap = (unsigned long __user *)data;
18789+ unsigned long __user *datap = (__force unsigned long __user *)data;
18790
18791 switch (request) {
18792 /* read the word at location addr in the USER area. */
18793@@ -909,14 +909,14 @@ long arch_ptrace(struct task_struct *child, long request,
18794 if ((int) addr < 0)
18795 return -EIO;
18796 ret = do_get_thread_area(child, addr,
18797- (struct user_desc __user *)data);
18798+ (__force struct user_desc __user *) data);
18799 break;
18800
18801 case PTRACE_SET_THREAD_AREA:
18802 if ((int) addr < 0)
18803 return -EIO;
18804 ret = do_set_thread_area(child, addr,
18805- (struct user_desc __user *)data, 0);
18806+ (__force struct user_desc __user *) data, 0);
18807 break;
18808 #endif
18809
18810@@ -1426,7 +1426,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
18811 memset(info, 0, sizeof(*info));
18812 info->si_signo = SIGTRAP;
18813 info->si_code = si_code;
18814- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18815+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18816 }
18817
18818 void user_single_step_siginfo(struct task_struct *tsk,
18819@@ -1455,6 +1455,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18820 # define IS_IA32 0
18821 #endif
18822
18823+#ifdef CONFIG_GRKERNSEC_SETXID
18824+extern void gr_delayed_cred_worker(void);
18825+#endif
18826+
18827 /*
18828 * We must return the syscall number to actually look up in the table.
18829 * This can be -1L to skip running any syscall at all.
18830@@ -1463,6 +1467,11 @@ long syscall_trace_enter(struct pt_regs *regs)
18831 {
18832 long ret = 0;
18833
18834+#ifdef CONFIG_GRKERNSEC_SETXID
18835+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18836+ gr_delayed_cred_worker();
18837+#endif
18838+
18839 /*
18840 * If we stepped into a sysenter/syscall insn, it trapped in
18841 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
18842@@ -1506,6 +1515,11 @@ void syscall_trace_leave(struct pt_regs *regs)
18843 {
18844 bool step;
18845
18846+#ifdef CONFIG_GRKERNSEC_SETXID
18847+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18848+ gr_delayed_cred_worker();
18849+#endif
18850+
18851 audit_syscall_exit(regs);
18852
18853 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
18854diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
18855index 42eb330..139955c 100644
18856--- a/arch/x86/kernel/pvclock.c
18857+++ b/arch/x86/kernel/pvclock.c
18858@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
18859 return pv_tsc_khz;
18860 }
18861
18862-static atomic64_t last_value = ATOMIC64_INIT(0);
18863+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
18864
18865 void pvclock_resume(void)
18866 {
18867- atomic64_set(&last_value, 0);
18868+ atomic64_set_unchecked(&last_value, 0);
18869 }
18870
18871 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18872@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18873 * updating at the same time, and one of them could be slightly behind,
18874 * making the assumption that last_value always go forward fail to hold.
18875 */
18876- last = atomic64_read(&last_value);
18877+ last = atomic64_read_unchecked(&last_value);
18878 do {
18879 if (ret < last)
18880 return last;
18881- last = atomic64_cmpxchg(&last_value, last, ret);
18882+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
18883 } while (unlikely(last != ret));
18884
18885 return ret;
18886diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18887index 3034ee5..7cfbfa6 100644
18888--- a/arch/x86/kernel/reboot.c
18889+++ b/arch/x86/kernel/reboot.c
18890@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
18891 EXPORT_SYMBOL(pm_power_off);
18892
18893 static const struct desc_ptr no_idt = {};
18894-static int reboot_mode;
18895+static unsigned short reboot_mode;
18896 enum reboot_type reboot_type = BOOT_ACPI;
18897 int reboot_force;
18898
18899@@ -335,13 +335,17 @@ core_initcall(reboot_init);
18900 extern const unsigned char machine_real_restart_asm[];
18901 extern const u64 machine_real_restart_gdt[3];
18902
18903-void machine_real_restart(unsigned int type)
18904+__noreturn void machine_real_restart(unsigned int type)
18905 {
18906 void *restart_va;
18907 unsigned long restart_pa;
18908- void (*restart_lowmem)(unsigned int);
18909+ void (* __noreturn restart_lowmem)(unsigned int);
18910 u64 *lowmem_gdt;
18911
18912+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18913+ struct desc_struct *gdt;
18914+#endif
18915+
18916 local_irq_disable();
18917
18918 /* Write zero to CMOS register number 0x0f, which the BIOS POST
18919@@ -367,14 +371,14 @@ void machine_real_restart(unsigned int type)
18920 boot)". This seems like a fairly standard thing that gets set by
18921 REBOOT.COM programs, and the previous reset routine did this
18922 too. */
18923- *((unsigned short *)0x472) = reboot_mode;
18924+ *(unsigned short *)(__va(0x472)) = reboot_mode;
18925
18926 /* Patch the GDT in the low memory trampoline */
18927 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
18928
18929 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
18930 restart_pa = virt_to_phys(restart_va);
18931- restart_lowmem = (void (*)(unsigned int))restart_pa;
18932+ restart_lowmem = (void *)restart_pa;
18933
18934 /* GDT[0]: GDT self-pointer */
18935 lowmem_gdt[0] =
18936@@ -385,7 +389,33 @@ void machine_real_restart(unsigned int type)
18937 GDT_ENTRY(0x009b, restart_pa, 0xffff);
18938
18939 /* Jump to the identity-mapped low memory code */
18940+
18941+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18942+ gdt = get_cpu_gdt_table(smp_processor_id());
18943+ pax_open_kernel();
18944+#ifdef CONFIG_PAX_MEMORY_UDEREF
18945+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
18946+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
18947+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
18948+#endif
18949+#ifdef CONFIG_PAX_KERNEXEC
18950+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
18951+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
18952+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
18953+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
18954+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
18955+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
18956+#endif
18957+ pax_close_kernel();
18958+#endif
18959+
18960+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18961+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
18962+ unreachable();
18963+#else
18964 restart_lowmem(type);
18965+#endif
18966+
18967 }
18968 #ifdef CONFIG_APM_MODULE
18969 EXPORT_SYMBOL(machine_real_restart);
18970@@ -564,7 +594,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
18971 * try to force a triple fault and then cycle between hitting the keyboard
18972 * controller and doing that
18973 */
18974-static void native_machine_emergency_restart(void)
18975+__noreturn static void native_machine_emergency_restart(void)
18976 {
18977 int i;
18978 int attempt = 0;
18979@@ -688,13 +718,13 @@ void native_machine_shutdown(void)
18980 #endif
18981 }
18982
18983-static void __machine_emergency_restart(int emergency)
18984+static __noreturn void __machine_emergency_restart(int emergency)
18985 {
18986 reboot_emergency = emergency;
18987 machine_ops.emergency_restart();
18988 }
18989
18990-static void native_machine_restart(char *__unused)
18991+static __noreturn void native_machine_restart(char *__unused)
18992 {
18993 printk("machine restart\n");
18994
18995@@ -703,7 +733,7 @@ static void native_machine_restart(char *__unused)
18996 __machine_emergency_restart(0);
18997 }
18998
18999-static void native_machine_halt(void)
19000+static __noreturn void native_machine_halt(void)
19001 {
19002 /* stop other cpus and apics */
19003 machine_shutdown();
19004@@ -714,7 +744,7 @@ static void native_machine_halt(void)
19005 stop_this_cpu(NULL);
19006 }
19007
19008-static void native_machine_power_off(void)
19009+__noreturn static void native_machine_power_off(void)
19010 {
19011 if (pm_power_off) {
19012 if (!reboot_force)
19013@@ -723,6 +753,7 @@ static void native_machine_power_off(void)
19014 }
19015 /* a fallback in case there is no PM info available */
19016 tboot_shutdown(TB_SHUTDOWN_HALT);
19017+ unreachable();
19018 }
19019
19020 struct machine_ops machine_ops = {
19021diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19022index 7a6f3b3..bed145d7 100644
19023--- a/arch/x86/kernel/relocate_kernel_64.S
19024+++ b/arch/x86/kernel/relocate_kernel_64.S
19025@@ -11,6 +11,7 @@
19026 #include <asm/kexec.h>
19027 #include <asm/processor-flags.h>
19028 #include <asm/pgtable_types.h>
19029+#include <asm/alternative-asm.h>
19030
19031 /*
19032 * Must be relocatable PIC code callable as a C function
19033@@ -160,13 +161,14 @@ identity_mapped:
19034 xorq %rbp, %rbp
19035 xorq %r8, %r8
19036 xorq %r9, %r9
19037- xorq %r10, %r9
19038+ xorq %r10, %r10
19039 xorq %r11, %r11
19040 xorq %r12, %r12
19041 xorq %r13, %r13
19042 xorq %r14, %r14
19043 xorq %r15, %r15
19044
19045+ pax_force_retaddr 0, 1
19046 ret
19047
19048 1:
19049diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19050index 1a29015..712f324 100644
19051--- a/arch/x86/kernel/setup.c
19052+++ b/arch/x86/kernel/setup.c
19053@@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
19054
19055 switch (data->type) {
19056 case SETUP_E820_EXT:
19057- parse_e820_ext(data);
19058+ parse_e820_ext((struct setup_data __force_kernel *)data);
19059 break;
19060 case SETUP_DTB:
19061 add_dtb(pa_data);
19062@@ -639,7 +639,7 @@ static void __init trim_bios_range(void)
19063 * area (640->1Mb) as ram even though it is not.
19064 * take them out.
19065 */
19066- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
19067+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
19068 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
19069 }
19070
19071@@ -763,14 +763,14 @@ void __init setup_arch(char **cmdline_p)
19072
19073 if (!boot_params.hdr.root_flags)
19074 root_mountflags &= ~MS_RDONLY;
19075- init_mm.start_code = (unsigned long) _text;
19076- init_mm.end_code = (unsigned long) _etext;
19077+ init_mm.start_code = ktla_ktva((unsigned long) _text);
19078+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
19079 init_mm.end_data = (unsigned long) _edata;
19080 init_mm.brk = _brk_end;
19081
19082- code_resource.start = virt_to_phys(_text);
19083- code_resource.end = virt_to_phys(_etext)-1;
19084- data_resource.start = virt_to_phys(_etext);
19085+ code_resource.start = virt_to_phys(ktla_ktva(_text));
19086+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19087+ data_resource.start = virt_to_phys(_sdata);
19088 data_resource.end = virt_to_phys(_edata)-1;
19089 bss_resource.start = virt_to_phys(&__bss_start);
19090 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19091diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19092index 5a98aa2..2f9288d 100644
19093--- a/arch/x86/kernel/setup_percpu.c
19094+++ b/arch/x86/kernel/setup_percpu.c
19095@@ -21,19 +21,17 @@
19096 #include <asm/cpu.h>
19097 #include <asm/stackprotector.h>
19098
19099-DEFINE_PER_CPU(int, cpu_number);
19100+#ifdef CONFIG_SMP
19101+DEFINE_PER_CPU(unsigned int, cpu_number);
19102 EXPORT_PER_CPU_SYMBOL(cpu_number);
19103+#endif
19104
19105-#ifdef CONFIG_X86_64
19106 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19107-#else
19108-#define BOOT_PERCPU_OFFSET 0
19109-#endif
19110
19111 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19112 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19113
19114-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19115+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19116 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19117 };
19118 EXPORT_SYMBOL(__per_cpu_offset);
19119@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
19120 {
19121 #ifdef CONFIG_X86_32
19122 struct desc_struct gdt;
19123+ unsigned long base = per_cpu_offset(cpu);
19124
19125- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19126- 0x2 | DESCTYPE_S, 0x8);
19127- gdt.s = 1;
19128+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19129+ 0x83 | DESCTYPE_S, 0xC);
19130 write_gdt_entry(get_cpu_gdt_table(cpu),
19131 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19132 #endif
19133@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
19134 /* alrighty, percpu areas up and running */
19135 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19136 for_each_possible_cpu(cpu) {
19137+#ifdef CONFIG_CC_STACKPROTECTOR
19138+#ifdef CONFIG_X86_32
19139+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
19140+#endif
19141+#endif
19142 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19143 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19144 per_cpu(cpu_number, cpu) = cpu;
19145@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
19146 */
19147 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
19148 #endif
19149+#ifdef CONFIG_CC_STACKPROTECTOR
19150+#ifdef CONFIG_X86_32
19151+ if (!cpu)
19152+ per_cpu(stack_canary.canary, cpu) = canary;
19153+#endif
19154+#endif
19155 /*
19156 * Up to this point, the boot CPU has been using .init.data
19157 * area. Reload any changed state for the boot CPU.
19158diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19159index 115eac4..c0591d5 100644
19160--- a/arch/x86/kernel/signal.c
19161+++ b/arch/x86/kernel/signal.c
19162@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
19163 * Align the stack pointer according to the i386 ABI,
19164 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19165 */
19166- sp = ((sp + 4) & -16ul) - 4;
19167+ sp = ((sp - 12) & -16ul) - 4;
19168 #else /* !CONFIG_X86_32 */
19169 sp = round_down(sp, 16) - 8;
19170 #endif
19171@@ -241,11 +241,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19172 * Return an always-bogus address instead so we will die with SIGSEGV.
19173 */
19174 if (onsigstack && !likely(on_sig_stack(sp)))
19175- return (void __user *)-1L;
19176+ return (__force void __user *)-1L;
19177
19178 /* save i387 state */
19179 if (used_math() && save_i387_xstate(*fpstate) < 0)
19180- return (void __user *)-1L;
19181+ return (__force void __user *)-1L;
19182
19183 return (void __user *)sp;
19184 }
19185@@ -300,9 +300,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19186 }
19187
19188 if (current->mm->context.vdso)
19189- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19190+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19191 else
19192- restorer = &frame->retcode;
19193+ restorer = (void __user *)&frame->retcode;
19194 if (ka->sa.sa_flags & SA_RESTORER)
19195 restorer = ka->sa.sa_restorer;
19196
19197@@ -316,7 +316,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19198 * reasons and because gdb uses it as a signature to notice
19199 * signal handler stack frames.
19200 */
19201- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19202+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19203
19204 if (err)
19205 return -EFAULT;
19206@@ -370,7 +370,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19207 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19208
19209 /* Set up to return from userspace. */
19210- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19211+ if (current->mm->context.vdso)
19212+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19213+ else
19214+ restorer = (void __user *)&frame->retcode;
19215 if (ka->sa.sa_flags & SA_RESTORER)
19216 restorer = ka->sa.sa_restorer;
19217 put_user_ex(restorer, &frame->pretcode);
19218@@ -382,7 +385,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19219 * reasons and because gdb uses it as a signature to notice
19220 * signal handler stack frames.
19221 */
19222- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19223+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19224 } put_user_catch(err);
19225
19226 if (err)
19227@@ -773,7 +776,7 @@ static void do_signal(struct pt_regs *regs)
19228 * X86_32: vm86 regs switched out by assembly code before reaching
19229 * here, so testing against kernel CS suffices.
19230 */
19231- if (!user_mode(regs))
19232+ if (!user_mode_novm(regs))
19233 return;
19234
19235 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
19236diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19237index 6e1e406..edfb7cb 100644
19238--- a/arch/x86/kernel/smpboot.c
19239+++ b/arch/x86/kernel/smpboot.c
19240@@ -699,17 +699,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19241 set_idle_for_cpu(cpu, c_idle.idle);
19242 do_rest:
19243 per_cpu(current_task, cpu) = c_idle.idle;
19244+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19245 #ifdef CONFIG_X86_32
19246 /* Stack for startup_32 can be just as for start_secondary onwards */
19247 irq_ctx_init(cpu);
19248 #else
19249 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19250 initial_gs = per_cpu_offset(cpu);
19251- per_cpu(kernel_stack, cpu) =
19252- (unsigned long)task_stack_page(c_idle.idle) -
19253- KERNEL_STACK_OFFSET + THREAD_SIZE;
19254+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19255 #endif
19256+
19257+ pax_open_kernel();
19258 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19259+ pax_close_kernel();
19260+
19261 initial_code = (unsigned long)start_secondary;
19262 stack_start = c_idle.idle->thread.sp;
19263
19264@@ -851,6 +854,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19265
19266 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19267
19268+#ifdef CONFIG_PAX_PER_CPU_PGD
19269+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19270+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19271+ KERNEL_PGD_PTRS);
19272+#endif
19273+
19274 err = do_boot_cpu(apicid, cpu);
19275 if (err) {
19276 pr_debug("do_boot_cpu failed %d\n", err);
19277diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19278index c346d11..d43b163 100644
19279--- a/arch/x86/kernel/step.c
19280+++ b/arch/x86/kernel/step.c
19281@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19282 struct desc_struct *desc;
19283 unsigned long base;
19284
19285- seg &= ~7UL;
19286+ seg >>= 3;
19287
19288 mutex_lock(&child->mm->context.lock);
19289- if (unlikely((seg >> 3) >= child->mm->context.size))
19290+ if (unlikely(seg >= child->mm->context.size))
19291 addr = -1L; /* bogus selector, access would fault */
19292 else {
19293 desc = child->mm->context.ldt + seg;
19294@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19295 addr += base;
19296 }
19297 mutex_unlock(&child->mm->context.lock);
19298- }
19299+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19300+ addr = ktla_ktva(addr);
19301
19302 return addr;
19303 }
19304@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19305 unsigned char opcode[15];
19306 unsigned long addr = convert_ip_to_linear(child, regs);
19307
19308+ if (addr == -EINVAL)
19309+ return 0;
19310+
19311 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19312 for (i = 0; i < copied; i++) {
19313 switch (opcode[i]) {
19314diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19315index 0b0cb5f..db6b9ed 100644
19316--- a/arch/x86/kernel/sys_i386_32.c
19317+++ b/arch/x86/kernel/sys_i386_32.c
19318@@ -24,17 +24,224 @@
19319
19320 #include <asm/syscalls.h>
19321
19322-/*
19323- * Do a system call from kernel instead of calling sys_execve so we
19324- * end up with proper pt_regs.
19325- */
19326-int kernel_execve(const char *filename,
19327- const char *const argv[],
19328- const char *const envp[])
19329+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19330 {
19331- long __res;
19332- asm volatile ("int $0x80"
19333- : "=a" (__res)
19334- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
19335- return __res;
19336+ unsigned long pax_task_size = TASK_SIZE;
19337+
19338+#ifdef CONFIG_PAX_SEGMEXEC
19339+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19340+ pax_task_size = SEGMEXEC_TASK_SIZE;
19341+#endif
19342+
19343+ if (len > pax_task_size || addr > pax_task_size - len)
19344+ return -EINVAL;
19345+
19346+ return 0;
19347+}
19348+
19349+unsigned long
19350+arch_get_unmapped_area(struct file *filp, unsigned long addr,
19351+ unsigned long len, unsigned long pgoff, unsigned long flags)
19352+{
19353+ struct mm_struct *mm = current->mm;
19354+ struct vm_area_struct *vma;
19355+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19356+
19357+#ifdef CONFIG_PAX_SEGMEXEC
19358+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19359+ pax_task_size = SEGMEXEC_TASK_SIZE;
19360+#endif
19361+
19362+ pax_task_size -= PAGE_SIZE;
19363+
19364+ if (len > pax_task_size)
19365+ return -ENOMEM;
19366+
19367+ if (flags & MAP_FIXED)
19368+ return addr;
19369+
19370+#ifdef CONFIG_PAX_RANDMMAP
19371+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19372+#endif
19373+
19374+ if (addr) {
19375+ addr = PAGE_ALIGN(addr);
19376+ if (pax_task_size - len >= addr) {
19377+ vma = find_vma(mm, addr);
19378+ if (check_heap_stack_gap(vma, addr, len))
19379+ return addr;
19380+ }
19381+ }
19382+ if (len > mm->cached_hole_size) {
19383+ start_addr = addr = mm->free_area_cache;
19384+ } else {
19385+ start_addr = addr = mm->mmap_base;
19386+ mm->cached_hole_size = 0;
19387+ }
19388+
19389+#ifdef CONFIG_PAX_PAGEEXEC
19390+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19391+ start_addr = 0x00110000UL;
19392+
19393+#ifdef CONFIG_PAX_RANDMMAP
19394+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19395+ start_addr += mm->delta_mmap & 0x03FFF000UL;
19396+#endif
19397+
19398+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19399+ start_addr = addr = mm->mmap_base;
19400+ else
19401+ addr = start_addr;
19402+ }
19403+#endif
19404+
19405+full_search:
19406+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19407+ /* At this point: (!vma || addr < vma->vm_end). */
19408+ if (pax_task_size - len < addr) {
19409+ /*
19410+ * Start a new search - just in case we missed
19411+ * some holes.
19412+ */
19413+ if (start_addr != mm->mmap_base) {
19414+ start_addr = addr = mm->mmap_base;
19415+ mm->cached_hole_size = 0;
19416+ goto full_search;
19417+ }
19418+ return -ENOMEM;
19419+ }
19420+ if (check_heap_stack_gap(vma, addr, len))
19421+ break;
19422+ if (addr + mm->cached_hole_size < vma->vm_start)
19423+ mm->cached_hole_size = vma->vm_start - addr;
19424+ addr = vma->vm_end;
19425+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
19426+ start_addr = addr = mm->mmap_base;
19427+ mm->cached_hole_size = 0;
19428+ goto full_search;
19429+ }
19430+ }
19431+
19432+ /*
19433+ * Remember the place where we stopped the search:
19434+ */
19435+ mm->free_area_cache = addr + len;
19436+ return addr;
19437+}
19438+
19439+unsigned long
19440+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19441+ const unsigned long len, const unsigned long pgoff,
19442+ const unsigned long flags)
19443+{
19444+ struct vm_area_struct *vma;
19445+ struct mm_struct *mm = current->mm;
19446+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19447+
19448+#ifdef CONFIG_PAX_SEGMEXEC
19449+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19450+ pax_task_size = SEGMEXEC_TASK_SIZE;
19451+#endif
19452+
19453+ pax_task_size -= PAGE_SIZE;
19454+
19455+ /* requested length too big for entire address space */
19456+ if (len > pax_task_size)
19457+ return -ENOMEM;
19458+
19459+ if (flags & MAP_FIXED)
19460+ return addr;
19461+
19462+#ifdef CONFIG_PAX_PAGEEXEC
19463+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19464+ goto bottomup;
19465+#endif
19466+
19467+#ifdef CONFIG_PAX_RANDMMAP
19468+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19469+#endif
19470+
19471+ /* requesting a specific address */
19472+ if (addr) {
19473+ addr = PAGE_ALIGN(addr);
19474+ if (pax_task_size - len >= addr) {
19475+ vma = find_vma(mm, addr);
19476+ if (check_heap_stack_gap(vma, addr, len))
19477+ return addr;
19478+ }
19479+ }
19480+
19481+ /* check if free_area_cache is useful for us */
19482+ if (len <= mm->cached_hole_size) {
19483+ mm->cached_hole_size = 0;
19484+ mm->free_area_cache = mm->mmap_base;
19485+ }
19486+
19487+ /* either no address requested or can't fit in requested address hole */
19488+ addr = mm->free_area_cache;
19489+
19490+ /* make sure it can fit in the remaining address space */
19491+ if (addr > len) {
19492+ vma = find_vma(mm, addr-len);
19493+ if (check_heap_stack_gap(vma, addr - len, len))
19494+ /* remember the address as a hint for next time */
19495+ return (mm->free_area_cache = addr-len);
19496+ }
19497+
19498+ if (mm->mmap_base < len)
19499+ goto bottomup;
19500+
19501+ addr = mm->mmap_base-len;
19502+
19503+ do {
19504+ /*
19505+ * Lookup failure means no vma is above this address,
19506+ * else if new region fits below vma->vm_start,
19507+ * return with success:
19508+ */
19509+ vma = find_vma(mm, addr);
19510+ if (check_heap_stack_gap(vma, addr, len))
19511+ /* remember the address as a hint for next time */
19512+ return (mm->free_area_cache = addr);
19513+
19514+ /* remember the largest hole we saw so far */
19515+ if (addr + mm->cached_hole_size < vma->vm_start)
19516+ mm->cached_hole_size = vma->vm_start - addr;
19517+
19518+ /* try just below the current vma->vm_start */
19519+ addr = skip_heap_stack_gap(vma, len);
19520+ } while (!IS_ERR_VALUE(addr));
19521+
19522+bottomup:
19523+ /*
19524+ * A failed mmap() very likely causes application failure,
19525+ * so fall back to the bottom-up function here. This scenario
19526+ * can happen with large stack limits and large mmap()
19527+ * allocations.
19528+ */
19529+
19530+#ifdef CONFIG_PAX_SEGMEXEC
19531+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19532+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19533+ else
19534+#endif
19535+
19536+ mm->mmap_base = TASK_UNMAPPED_BASE;
19537+
19538+#ifdef CONFIG_PAX_RANDMMAP
19539+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19540+ mm->mmap_base += mm->delta_mmap;
19541+#endif
19542+
19543+ mm->free_area_cache = mm->mmap_base;
19544+ mm->cached_hole_size = ~0UL;
19545+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19546+ /*
19547+ * Restore the topdown base:
19548+ */
19549+ mm->mmap_base = base;
19550+ mm->free_area_cache = base;
19551+ mm->cached_hole_size = ~0UL;
19552+
19553+ return addr;
19554 }
19555diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19556index b4d3c39..82bb73b 100644
19557--- a/arch/x86/kernel/sys_x86_64.c
19558+++ b/arch/x86/kernel/sys_x86_64.c
19559@@ -95,8 +95,8 @@ out:
19560 return error;
19561 }
19562
19563-static void find_start_end(unsigned long flags, unsigned long *begin,
19564- unsigned long *end)
19565+static void find_start_end(struct mm_struct *mm, unsigned long flags,
19566+ unsigned long *begin, unsigned long *end)
19567 {
19568 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
19569 unsigned long new_begin;
19570@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19571 *begin = new_begin;
19572 }
19573 } else {
19574- *begin = TASK_UNMAPPED_BASE;
19575+ *begin = mm->mmap_base;
19576 *end = TASK_SIZE;
19577 }
19578 }
19579@@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19580 if (flags & MAP_FIXED)
19581 return addr;
19582
19583- find_start_end(flags, &begin, &end);
19584+ find_start_end(mm, flags, &begin, &end);
19585
19586 if (len > end)
19587 return -ENOMEM;
19588
19589+#ifdef CONFIG_PAX_RANDMMAP
19590+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19591+#endif
19592+
19593 if (addr) {
19594 addr = PAGE_ALIGN(addr);
19595 vma = find_vma(mm, addr);
19596- if (end - len >= addr &&
19597- (!vma || addr + len <= vma->vm_start))
19598+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19599 return addr;
19600 }
19601 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
19602@@ -172,7 +175,7 @@ full_search:
19603 }
19604 return -ENOMEM;
19605 }
19606- if (!vma || addr + len <= vma->vm_start) {
19607+ if (check_heap_stack_gap(vma, addr, len)) {
19608 /*
19609 * Remember the place where we stopped the search:
19610 */
19611@@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19612 {
19613 struct vm_area_struct *vma;
19614 struct mm_struct *mm = current->mm;
19615- unsigned long addr = addr0, start_addr;
19616+ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
19617
19618 /* requested length too big for entire address space */
19619 if (len > TASK_SIZE)
19620@@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19621 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
19622 goto bottomup;
19623
19624+#ifdef CONFIG_PAX_RANDMMAP
19625+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19626+#endif
19627+
19628 /* requesting a specific address */
19629 if (addr) {
19630 addr = PAGE_ALIGN(addr);
19631- vma = find_vma(mm, addr);
19632- if (TASK_SIZE - len >= addr &&
19633- (!vma || addr + len <= vma->vm_start))
19634- return addr;
19635+ if (TASK_SIZE - len >= addr) {
19636+ vma = find_vma(mm, addr);
19637+ if (check_heap_stack_gap(vma, addr, len))
19638+ return addr;
19639+ }
19640 }
19641
19642 /* check if free_area_cache is useful for us */
19643@@ -240,7 +248,7 @@ try_again:
19644 * return with success:
19645 */
19646 vma = find_vma(mm, addr);
19647- if (!vma || addr+len <= vma->vm_start)
19648+ if (check_heap_stack_gap(vma, addr, len))
19649 /* remember the address as a hint for next time */
19650 return mm->free_area_cache = addr;
19651
19652@@ -249,8 +257,8 @@ try_again:
19653 mm->cached_hole_size = vma->vm_start - addr;
19654
19655 /* try just below the current vma->vm_start */
19656- addr = vma->vm_start-len;
19657- } while (len < vma->vm_start);
19658+ addr = skip_heap_stack_gap(vma, len);
19659+ } while (!IS_ERR_VALUE(addr));
19660
19661 fail:
19662 /*
19663@@ -270,13 +278,21 @@ bottomup:
19664 * can happen with large stack limits and large mmap()
19665 * allocations.
19666 */
19667+ mm->mmap_base = TASK_UNMAPPED_BASE;
19668+
19669+#ifdef CONFIG_PAX_RANDMMAP
19670+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19671+ mm->mmap_base += mm->delta_mmap;
19672+#endif
19673+
19674+ mm->free_area_cache = mm->mmap_base;
19675 mm->cached_hole_size = ~0UL;
19676- mm->free_area_cache = TASK_UNMAPPED_BASE;
19677 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19678 /*
19679 * Restore the topdown base:
19680 */
19681- mm->free_area_cache = mm->mmap_base;
19682+ mm->mmap_base = base;
19683+ mm->free_area_cache = base;
19684 mm->cached_hole_size = ~0UL;
19685
19686 return addr;
19687diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19688index 6410744..79758f0 100644
19689--- a/arch/x86/kernel/tboot.c
19690+++ b/arch/x86/kernel/tboot.c
19691@@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
19692
19693 void tboot_shutdown(u32 shutdown_type)
19694 {
19695- void (*shutdown)(void);
19696+ void (* __noreturn shutdown)(void);
19697
19698 if (!tboot_enabled())
19699 return;
19700@@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
19701
19702 switch_to_tboot_pt();
19703
19704- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19705+ shutdown = (void *)tboot->shutdown_entry;
19706 shutdown();
19707
19708 /* should not reach here */
19709@@ -299,7 +299,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19710 return 0;
19711 }
19712
19713-static atomic_t ap_wfs_count;
19714+static atomic_unchecked_t ap_wfs_count;
19715
19716 static int tboot_wait_for_aps(int num_aps)
19717 {
19718@@ -323,9 +323,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19719 {
19720 switch (action) {
19721 case CPU_DYING:
19722- atomic_inc(&ap_wfs_count);
19723+ atomic_inc_unchecked(&ap_wfs_count);
19724 if (num_online_cpus() == 1)
19725- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19726+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19727 return NOTIFY_BAD;
19728 break;
19729 }
19730@@ -344,7 +344,7 @@ static __init int tboot_late_init(void)
19731
19732 tboot_create_trampoline();
19733
19734- atomic_set(&ap_wfs_count, 0);
19735+ atomic_set_unchecked(&ap_wfs_count, 0);
19736 register_hotcpu_notifier(&tboot_cpu_notifier);
19737
19738 acpi_os_set_prepare_sleep(&tboot_sleep);
19739diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19740index c6eba2b..3303326 100644
19741--- a/arch/x86/kernel/time.c
19742+++ b/arch/x86/kernel/time.c
19743@@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
19744 {
19745 unsigned long pc = instruction_pointer(regs);
19746
19747- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19748+ if (!user_mode(regs) && in_lock_functions(pc)) {
19749 #ifdef CONFIG_FRAME_POINTER
19750- return *(unsigned long *)(regs->bp + sizeof(long));
19751+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19752 #else
19753 unsigned long *sp =
19754 (unsigned long *)kernel_stack_pointer(regs);
19755@@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19756 * or above a saved flags. Eflags has bits 22-31 zero,
19757 * kernel addresses don't.
19758 */
19759+
19760+#ifdef CONFIG_PAX_KERNEXEC
19761+ return ktla_ktva(sp[0]);
19762+#else
19763 if (sp[0] >> 22)
19764 return sp[0];
19765 if (sp[1] >> 22)
19766 return sp[1];
19767 #endif
19768+
19769+#endif
19770 }
19771 return pc;
19772 }
19773diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19774index 9d9d2f9..ed344e4 100644
19775--- a/arch/x86/kernel/tls.c
19776+++ b/arch/x86/kernel/tls.c
19777@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19778 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19779 return -EINVAL;
19780
19781+#ifdef CONFIG_PAX_SEGMEXEC
19782+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19783+ return -EINVAL;
19784+#endif
19785+
19786 set_tls_desc(p, idx, &info, 1);
19787
19788 return 0;
19789diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19790index 451c0a7..e57f551 100644
19791--- a/arch/x86/kernel/trampoline_32.S
19792+++ b/arch/x86/kernel/trampoline_32.S
19793@@ -32,6 +32,12 @@
19794 #include <asm/segment.h>
19795 #include <asm/page_types.h>
19796
19797+#ifdef CONFIG_PAX_KERNEXEC
19798+#define ta(X) (X)
19799+#else
19800+#define ta(X) ((X) - __PAGE_OFFSET)
19801+#endif
19802+
19803 #ifdef CONFIG_SMP
19804
19805 .section ".x86_trampoline","a"
19806@@ -62,7 +68,7 @@ r_base = .
19807 inc %ax # protected mode (PE) bit
19808 lmsw %ax # into protected mode
19809 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19810- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19811+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
19812
19813 # These need to be in the same 64K segment as the above;
19814 # hence we don't use the boot_gdt_descr defined in head.S
19815diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19816index 09ff517..df19fbff 100644
19817--- a/arch/x86/kernel/trampoline_64.S
19818+++ b/arch/x86/kernel/trampoline_64.S
19819@@ -90,7 +90,7 @@ startup_32:
19820 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19821 movl %eax, %ds
19822
19823- movl $X86_CR4_PAE, %eax
19824+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19825 movl %eax, %cr4 # Enable PAE mode
19826
19827 # Setup trampoline 4 level pagetables
19828@@ -138,7 +138,7 @@ tidt:
19829 # so the kernel can live anywhere
19830 .balign 4
19831 tgdt:
19832- .short tgdt_end - tgdt # gdt limit
19833+ .short tgdt_end - tgdt - 1 # gdt limit
19834 .long tgdt - r_base
19835 .short 0
19836 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19837diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19838index ff9281f1..30cb4ac 100644
19839--- a/arch/x86/kernel/traps.c
19840+++ b/arch/x86/kernel/traps.c
19841@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
19842
19843 /* Do we ignore FPU interrupts ? */
19844 char ignore_fpu_irq;
19845-
19846-/*
19847- * The IDT has to be page-aligned to simplify the Pentium
19848- * F0 0F bug workaround.
19849- */
19850-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19851 #endif
19852
19853 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19854@@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19855 }
19856
19857 static void __kprobes
19858-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19859+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19860 long error_code, siginfo_t *info)
19861 {
19862 struct task_struct *tsk = current;
19863
19864 #ifdef CONFIG_X86_32
19865- if (regs->flags & X86_VM_MASK) {
19866+ if (v8086_mode(regs)) {
19867 /*
19868 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19869 * On nmi (interrupt 2), do_trap should not be called.
19870@@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19871 }
19872 #endif
19873
19874- if (!user_mode(regs))
19875+ if (!user_mode_novm(regs))
19876 goto kernel_trap;
19877
19878 #ifdef CONFIG_X86_32
19879@@ -148,7 +142,7 @@ trap_signal:
19880 printk_ratelimit()) {
19881 printk(KERN_INFO
19882 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19883- tsk->comm, tsk->pid, str,
19884+ tsk->comm, task_pid_nr(tsk), str,
19885 regs->ip, regs->sp, error_code);
19886 print_vma_addr(" in ", regs->ip);
19887 printk("\n");
19888@@ -165,8 +159,20 @@ kernel_trap:
19889 if (!fixup_exception(regs)) {
19890 tsk->thread.error_code = error_code;
19891 tsk->thread.trap_nr = trapnr;
19892+
19893+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19894+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19895+ str = "PAX: suspicious stack segment fault";
19896+#endif
19897+
19898 die(str, regs, error_code);
19899 }
19900+
19901+#ifdef CONFIG_PAX_REFCOUNT
19902+ if (trapnr == 4)
19903+ pax_report_refcount_overflow(regs);
19904+#endif
19905+
19906 return;
19907
19908 #ifdef CONFIG_X86_32
19909@@ -259,14 +265,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19910 conditional_sti(regs);
19911
19912 #ifdef CONFIG_X86_32
19913- if (regs->flags & X86_VM_MASK)
19914+ if (v8086_mode(regs))
19915 goto gp_in_vm86;
19916 #endif
19917
19918 tsk = current;
19919- if (!user_mode(regs))
19920+ if (!user_mode_novm(regs))
19921 goto gp_in_kernel;
19922
19923+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19924+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
19925+ struct mm_struct *mm = tsk->mm;
19926+ unsigned long limit;
19927+
19928+ down_write(&mm->mmap_sem);
19929+ limit = mm->context.user_cs_limit;
19930+ if (limit < TASK_SIZE) {
19931+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
19932+ up_write(&mm->mmap_sem);
19933+ return;
19934+ }
19935+ up_write(&mm->mmap_sem);
19936+ }
19937+#endif
19938+
19939 tsk->thread.error_code = error_code;
19940 tsk->thread.trap_nr = X86_TRAP_GP;
19941
19942@@ -299,6 +321,13 @@ gp_in_kernel:
19943 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
19944 X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
19945 return;
19946+
19947+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19948+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
19949+ die("PAX: suspicious general protection fault", regs, error_code);
19950+ else
19951+#endif
19952+
19953 die("general protection fault", regs, error_code);
19954 }
19955
19956@@ -425,7 +454,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19957 /* It's safe to allow irq's after DR6 has been saved */
19958 preempt_conditional_sti(regs);
19959
19960- if (regs->flags & X86_VM_MASK) {
19961+ if (v8086_mode(regs)) {
19962 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
19963 X86_TRAP_DB);
19964 preempt_conditional_cli(regs);
19965@@ -440,7 +469,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19966 * We already checked v86 mode above, so we can check for kernel mode
19967 * by just checking the CPL of CS.
19968 */
19969- if ((dr6 & DR_STEP) && !user_mode(regs)) {
19970+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
19971 tsk->thread.debugreg6 &= ~DR_STEP;
19972 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
19973 regs->flags &= ~X86_EFLAGS_TF;
19974@@ -471,7 +500,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
19975 return;
19976 conditional_sti(regs);
19977
19978- if (!user_mode_vm(regs))
19979+ if (!user_mode(regs))
19980 {
19981 if (!fixup_exception(regs)) {
19982 task->thread.error_code = error_code;
19983diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
19984index b9242ba..50c5edd 100644
19985--- a/arch/x86/kernel/verify_cpu.S
19986+++ b/arch/x86/kernel/verify_cpu.S
19987@@ -20,6 +20,7 @@
19988 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
19989 * arch/x86/kernel/trampoline_64.S: secondary processor verification
19990 * arch/x86/kernel/head_32.S: processor startup
19991+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
19992 *
19993 * verify_cpu, returns the status of longmode and SSE in register %eax.
19994 * 0: Success 1: Failure
19995diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
19996index 255f58a..5e91150 100644
19997--- a/arch/x86/kernel/vm86_32.c
19998+++ b/arch/x86/kernel/vm86_32.c
19999@@ -41,6 +41,7 @@
20000 #include <linux/ptrace.h>
20001 #include <linux/audit.h>
20002 #include <linux/stddef.h>
20003+#include <linux/grsecurity.h>
20004
20005 #include <asm/uaccess.h>
20006 #include <asm/io.h>
20007@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
20008 do_exit(SIGSEGV);
20009 }
20010
20011- tss = &per_cpu(init_tss, get_cpu());
20012+ tss = init_tss + get_cpu();
20013 current->thread.sp0 = current->thread.saved_sp0;
20014 current->thread.sysenter_cs = __KERNEL_CS;
20015 load_sp0(tss, &current->thread);
20016@@ -210,6 +211,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
20017 struct task_struct *tsk;
20018 int tmp, ret = -EPERM;
20019
20020+#ifdef CONFIG_GRKERNSEC_VM86
20021+ if (!capable(CAP_SYS_RAWIO)) {
20022+ gr_handle_vm86();
20023+ goto out;
20024+ }
20025+#endif
20026+
20027 tsk = current;
20028 if (tsk->thread.saved_sp0)
20029 goto out;
20030@@ -240,6 +248,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
20031 int tmp, ret;
20032 struct vm86plus_struct __user *v86;
20033
20034+#ifdef CONFIG_GRKERNSEC_VM86
20035+ if (!capable(CAP_SYS_RAWIO)) {
20036+ gr_handle_vm86();
20037+ ret = -EPERM;
20038+ goto out;
20039+ }
20040+#endif
20041+
20042 tsk = current;
20043 switch (cmd) {
20044 case VM86_REQUEST_IRQ:
20045@@ -326,7 +342,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
20046 tsk->thread.saved_fs = info->regs32->fs;
20047 tsk->thread.saved_gs = get_user_gs(info->regs32);
20048
20049- tss = &per_cpu(init_tss, get_cpu());
20050+ tss = init_tss + get_cpu();
20051 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20052 if (cpu_has_sep)
20053 tsk->thread.sysenter_cs = 0;
20054@@ -533,7 +549,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
20055 goto cannot_handle;
20056 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20057 goto cannot_handle;
20058- intr_ptr = (unsigned long __user *) (i << 2);
20059+ intr_ptr = (__force unsigned long __user *) (i << 2);
20060 if (get_user(segoffs, intr_ptr))
20061 goto cannot_handle;
20062 if ((segoffs >> 16) == BIOSSEG)
20063diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20064index 0f703f1..9e15f64 100644
20065--- a/arch/x86/kernel/vmlinux.lds.S
20066+++ b/arch/x86/kernel/vmlinux.lds.S
20067@@ -26,6 +26,13 @@
20068 #include <asm/page_types.h>
20069 #include <asm/cache.h>
20070 #include <asm/boot.h>
20071+#include <asm/segment.h>
20072+
20073+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20074+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20075+#else
20076+#define __KERNEL_TEXT_OFFSET 0
20077+#endif
20078
20079 #undef i386 /* in case the preprocessor is a 32bit one */
20080
20081@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
20082
20083 PHDRS {
20084 text PT_LOAD FLAGS(5); /* R_E */
20085+#ifdef CONFIG_X86_32
20086+ module PT_LOAD FLAGS(5); /* R_E */
20087+#endif
20088+#ifdef CONFIG_XEN
20089+ rodata PT_LOAD FLAGS(5); /* R_E */
20090+#else
20091+ rodata PT_LOAD FLAGS(4); /* R__ */
20092+#endif
20093 data PT_LOAD FLAGS(6); /* RW_ */
20094-#ifdef CONFIG_X86_64
20095+ init.begin PT_LOAD FLAGS(6); /* RW_ */
20096 #ifdef CONFIG_SMP
20097 percpu PT_LOAD FLAGS(6); /* RW_ */
20098 #endif
20099+ text.init PT_LOAD FLAGS(5); /* R_E */
20100+ text.exit PT_LOAD FLAGS(5); /* R_E */
20101 init PT_LOAD FLAGS(7); /* RWE */
20102-#endif
20103 note PT_NOTE FLAGS(0); /* ___ */
20104 }
20105
20106 SECTIONS
20107 {
20108 #ifdef CONFIG_X86_32
20109- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20110- phys_startup_32 = startup_32 - LOAD_OFFSET;
20111+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20112 #else
20113- . = __START_KERNEL;
20114- phys_startup_64 = startup_64 - LOAD_OFFSET;
20115+ . = __START_KERNEL;
20116 #endif
20117
20118 /* Text and read-only data */
20119- .text : AT(ADDR(.text) - LOAD_OFFSET) {
20120- _text = .;
20121+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20122 /* bootstrapping code */
20123+#ifdef CONFIG_X86_32
20124+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20125+#else
20126+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20127+#endif
20128+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20129+ _text = .;
20130 HEAD_TEXT
20131 #ifdef CONFIG_X86_32
20132 . = ALIGN(PAGE_SIZE);
20133@@ -108,13 +128,47 @@ SECTIONS
20134 IRQENTRY_TEXT
20135 *(.fixup)
20136 *(.gnu.warning)
20137- /* End of text section */
20138- _etext = .;
20139 } :text = 0x9090
20140
20141- NOTES :text :note
20142+ . += __KERNEL_TEXT_OFFSET;
20143
20144- EXCEPTION_TABLE(16) :text = 0x9090
20145+#ifdef CONFIG_X86_32
20146+ . = ALIGN(PAGE_SIZE);
20147+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20148+
20149+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20150+ MODULES_EXEC_VADDR = .;
20151+ BYTE(0)
20152+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20153+ . = ALIGN(HPAGE_SIZE);
20154+ MODULES_EXEC_END = . - 1;
20155+#endif
20156+
20157+ } :module
20158+#endif
20159+
20160+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20161+ /* End of text section */
20162+ _etext = . - __KERNEL_TEXT_OFFSET;
20163+ }
20164+
20165+#ifdef CONFIG_X86_32
20166+ . = ALIGN(PAGE_SIZE);
20167+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20168+ *(.idt)
20169+ . = ALIGN(PAGE_SIZE);
20170+ *(.empty_zero_page)
20171+ *(.initial_pg_fixmap)
20172+ *(.initial_pg_pmd)
20173+ *(.initial_page_table)
20174+ *(.swapper_pg_dir)
20175+ } :rodata
20176+#endif
20177+
20178+ . = ALIGN(PAGE_SIZE);
20179+ NOTES :rodata :note
20180+
20181+ EXCEPTION_TABLE(16) :rodata
20182
20183 #if defined(CONFIG_DEBUG_RODATA)
20184 /* .text should occupy whole number of pages */
20185@@ -126,16 +180,20 @@ SECTIONS
20186
20187 /* Data */
20188 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20189+
20190+#ifdef CONFIG_PAX_KERNEXEC
20191+ . = ALIGN(HPAGE_SIZE);
20192+#else
20193+ . = ALIGN(PAGE_SIZE);
20194+#endif
20195+
20196 /* Start of data section */
20197 _sdata = .;
20198
20199 /* init_task */
20200 INIT_TASK_DATA(THREAD_SIZE)
20201
20202-#ifdef CONFIG_X86_32
20203- /* 32 bit has nosave before _edata */
20204 NOSAVE_DATA
20205-#endif
20206
20207 PAGE_ALIGNED_DATA(PAGE_SIZE)
20208
20209@@ -176,12 +234,19 @@ SECTIONS
20210 #endif /* CONFIG_X86_64 */
20211
20212 /* Init code and data - will be freed after init */
20213- . = ALIGN(PAGE_SIZE);
20214 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20215+ BYTE(0)
20216+
20217+#ifdef CONFIG_PAX_KERNEXEC
20218+ . = ALIGN(HPAGE_SIZE);
20219+#else
20220+ . = ALIGN(PAGE_SIZE);
20221+#endif
20222+
20223 __init_begin = .; /* paired with __init_end */
20224- }
20225+ } :init.begin
20226
20227-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20228+#ifdef CONFIG_SMP
20229 /*
20230 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20231 * output PHDR, so the next output section - .init.text - should
20232@@ -190,12 +255,27 @@ SECTIONS
20233 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
20234 #endif
20235
20236- INIT_TEXT_SECTION(PAGE_SIZE)
20237-#ifdef CONFIG_X86_64
20238- :init
20239-#endif
20240+ . = ALIGN(PAGE_SIZE);
20241+ init_begin = .;
20242+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20243+ VMLINUX_SYMBOL(_sinittext) = .;
20244+ INIT_TEXT
20245+ VMLINUX_SYMBOL(_einittext) = .;
20246+ . = ALIGN(PAGE_SIZE);
20247+ } :text.init
20248
20249- INIT_DATA_SECTION(16)
20250+ /*
20251+ * .exit.text is discard at runtime, not link time, to deal with
20252+ * references from .altinstructions and .eh_frame
20253+ */
20254+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20255+ EXIT_TEXT
20256+ . = ALIGN(16);
20257+ } :text.exit
20258+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20259+
20260+ . = ALIGN(PAGE_SIZE);
20261+ INIT_DATA_SECTION(16) :init
20262
20263 /*
20264 * Code and data for a variety of lowlevel trampolines, to be
20265@@ -269,19 +349,12 @@ SECTIONS
20266 }
20267
20268 . = ALIGN(8);
20269- /*
20270- * .exit.text is discard at runtime, not link time, to deal with
20271- * references from .altinstructions and .eh_frame
20272- */
20273- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20274- EXIT_TEXT
20275- }
20276
20277 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20278 EXIT_DATA
20279 }
20280
20281-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20282+#ifndef CONFIG_SMP
20283 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
20284 #endif
20285
20286@@ -300,16 +373,10 @@ SECTIONS
20287 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
20288 __smp_locks = .;
20289 *(.smp_locks)
20290- . = ALIGN(PAGE_SIZE);
20291 __smp_locks_end = .;
20292+ . = ALIGN(PAGE_SIZE);
20293 }
20294
20295-#ifdef CONFIG_X86_64
20296- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20297- NOSAVE_DATA
20298- }
20299-#endif
20300-
20301 /* BSS */
20302 . = ALIGN(PAGE_SIZE);
20303 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20304@@ -325,6 +392,7 @@ SECTIONS
20305 __brk_base = .;
20306 . += 64 * 1024; /* 64k alignment slop space */
20307 *(.brk_reservation) /* areas brk users have reserved */
20308+ . = ALIGN(HPAGE_SIZE);
20309 __brk_limit = .;
20310 }
20311
20312@@ -351,13 +419,12 @@ SECTIONS
20313 * for the boot processor.
20314 */
20315 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
20316-INIT_PER_CPU(gdt_page);
20317 INIT_PER_CPU(irq_stack_union);
20318
20319 /*
20320 * Build-time check on the image size:
20321 */
20322-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20323+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20324 "kernel image bigger than KERNEL_IMAGE_SIZE");
20325
20326 #ifdef CONFIG_SMP
20327diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20328index 7515cf0..331a1a0 100644
20329--- a/arch/x86/kernel/vsyscall_64.c
20330+++ b/arch/x86/kernel/vsyscall_64.c
20331@@ -54,15 +54,13 @@
20332 DEFINE_VVAR(int, vgetcpu_mode);
20333 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
20334
20335-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
20336+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
20337
20338 static int __init vsyscall_setup(char *str)
20339 {
20340 if (str) {
20341 if (!strcmp("emulate", str))
20342 vsyscall_mode = EMULATE;
20343- else if (!strcmp("native", str))
20344- vsyscall_mode = NATIVE;
20345 else if (!strcmp("none", str))
20346 vsyscall_mode = NONE;
20347 else
20348@@ -206,7 +204,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20349
20350 tsk = current;
20351 if (seccomp_mode(&tsk->seccomp))
20352- do_exit(SIGKILL);
20353+ do_group_exit(SIGKILL);
20354
20355 /*
20356 * With a real vsyscall, page faults cause SIGSEGV. We want to
20357@@ -278,8 +276,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20358 return true;
20359
20360 sigsegv:
20361- force_sig(SIGSEGV, current);
20362- return true;
20363+ do_group_exit(SIGKILL);
20364 }
20365
20366 /*
20367@@ -332,10 +329,7 @@ void __init map_vsyscall(void)
20368 extern char __vvar_page;
20369 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
20370
20371- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
20372- vsyscall_mode == NATIVE
20373- ? PAGE_KERNEL_VSYSCALL
20374- : PAGE_KERNEL_VVAR);
20375+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
20376 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
20377 (unsigned long)VSYSCALL_START);
20378
20379diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20380index 9796c2f..f686fbf 100644
20381--- a/arch/x86/kernel/x8664_ksyms_64.c
20382+++ b/arch/x86/kernel/x8664_ksyms_64.c
20383@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
20384 EXPORT_SYMBOL(copy_user_generic_string);
20385 EXPORT_SYMBOL(copy_user_generic_unrolled);
20386 EXPORT_SYMBOL(__copy_user_nocache);
20387-EXPORT_SYMBOL(_copy_from_user);
20388-EXPORT_SYMBOL(_copy_to_user);
20389
20390 EXPORT_SYMBOL(copy_page);
20391 EXPORT_SYMBOL(clear_page);
20392diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
20393index e62728e..5fc3a07 100644
20394--- a/arch/x86/kernel/xsave.c
20395+++ b/arch/x86/kernel/xsave.c
20396@@ -131,7 +131,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
20397 fx_sw_user->xstate_size > fx_sw_user->extended_size)
20398 return -EINVAL;
20399
20400- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
20401+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
20402 fx_sw_user->extended_size -
20403 FP_XSTATE_MAGIC2_SIZE));
20404 if (err)
20405@@ -267,7 +267,7 @@ fx_only:
20406 * the other extended state.
20407 */
20408 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
20409- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
20410+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
20411 }
20412
20413 /*
20414@@ -296,7 +296,7 @@ int restore_i387_xstate(void __user *buf)
20415 if (use_xsave())
20416 err = restore_user_xstate(buf);
20417 else
20418- err = fxrstor_checking((__force struct i387_fxsave_struct *)
20419+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
20420 buf);
20421 if (unlikely(err)) {
20422 /*
20423diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
20424index 9fed5be..18fd595 100644
20425--- a/arch/x86/kvm/cpuid.c
20426+++ b/arch/x86/kvm/cpuid.c
20427@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
20428 struct kvm_cpuid2 *cpuid,
20429 struct kvm_cpuid_entry2 __user *entries)
20430 {
20431- int r;
20432+ int r, i;
20433
20434 r = -E2BIG;
20435 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
20436 goto out;
20437 r = -EFAULT;
20438- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
20439- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20440+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20441 goto out;
20442+ for (i = 0; i < cpuid->nent; ++i) {
20443+ struct kvm_cpuid_entry2 cpuid_entry;
20444+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
20445+ goto out;
20446+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
20447+ }
20448 vcpu->arch.cpuid_nent = cpuid->nent;
20449 kvm_apic_set_version(vcpu);
20450 kvm_x86_ops->cpuid_update(vcpu);
20451@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
20452 struct kvm_cpuid2 *cpuid,
20453 struct kvm_cpuid_entry2 __user *entries)
20454 {
20455- int r;
20456+ int r, i;
20457
20458 r = -E2BIG;
20459 if (cpuid->nent < vcpu->arch.cpuid_nent)
20460 goto out;
20461 r = -EFAULT;
20462- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
20463- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20464+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20465 goto out;
20466+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
20467+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
20468+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
20469+ goto out;
20470+ }
20471 return 0;
20472
20473 out:
20474diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
20475index 8375622..b7bca1a 100644
20476--- a/arch/x86/kvm/emulate.c
20477+++ b/arch/x86/kvm/emulate.c
20478@@ -252,6 +252,7 @@ struct gprefix {
20479
20480 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
20481 do { \
20482+ unsigned long _tmp; \
20483 __asm__ __volatile__ ( \
20484 _PRE_EFLAGS("0", "4", "2") \
20485 _op _suffix " %"_x"3,%1; " \
20486@@ -266,8 +267,6 @@ struct gprefix {
20487 /* Raw emulation: instruction has two explicit operands. */
20488 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
20489 do { \
20490- unsigned long _tmp; \
20491- \
20492 switch ((ctxt)->dst.bytes) { \
20493 case 2: \
20494 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
20495@@ -283,7 +282,6 @@ struct gprefix {
20496
20497 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
20498 do { \
20499- unsigned long _tmp; \
20500 switch ((ctxt)->dst.bytes) { \
20501 case 1: \
20502 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
20503diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
20504index 8584322..17d5955 100644
20505--- a/arch/x86/kvm/lapic.c
20506+++ b/arch/x86/kvm/lapic.c
20507@@ -54,7 +54,7 @@
20508 #define APIC_BUS_CYCLE_NS 1
20509
20510 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
20511-#define apic_debug(fmt, arg...)
20512+#define apic_debug(fmt, arg...) do {} while (0)
20513
20514 #define APIC_LVT_NUM 6
20515 /* 14 is the version for Xeon and Pentium 8.4.8*/
20516diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
20517index df5a703..63748a7 100644
20518--- a/arch/x86/kvm/paging_tmpl.h
20519+++ b/arch/x86/kvm/paging_tmpl.h
20520@@ -197,7 +197,7 @@ retry_walk:
20521 if (unlikely(kvm_is_error_hva(host_addr)))
20522 goto error;
20523
20524- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
20525+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
20526 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
20527 goto error;
20528
20529diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
20530index e334389..6839087 100644
20531--- a/arch/x86/kvm/svm.c
20532+++ b/arch/x86/kvm/svm.c
20533@@ -3509,7 +3509,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
20534 int cpu = raw_smp_processor_id();
20535
20536 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
20537+
20538+ pax_open_kernel();
20539 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
20540+ pax_close_kernel();
20541+
20542 load_TR_desc();
20543 }
20544
20545@@ -3887,6 +3891,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
20546 #endif
20547 #endif
20548
20549+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20550+ __set_fs(current_thread_info()->addr_limit);
20551+#endif
20552+
20553 reload_tss(vcpu);
20554
20555 local_irq_disable();
20556diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
20557index 4ff0ab9..2ff68d3 100644
20558--- a/arch/x86/kvm/vmx.c
20559+++ b/arch/x86/kvm/vmx.c
20560@@ -1303,7 +1303,11 @@ static void reload_tss(void)
20561 struct desc_struct *descs;
20562
20563 descs = (void *)gdt->address;
20564+
20565+ pax_open_kernel();
20566 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
20567+ pax_close_kernel();
20568+
20569 load_TR_desc();
20570 }
20571
20572@@ -2625,8 +2629,11 @@ static __init int hardware_setup(void)
20573 if (!cpu_has_vmx_flexpriority())
20574 flexpriority_enabled = 0;
20575
20576- if (!cpu_has_vmx_tpr_shadow())
20577- kvm_x86_ops->update_cr8_intercept = NULL;
20578+ if (!cpu_has_vmx_tpr_shadow()) {
20579+ pax_open_kernel();
20580+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
20581+ pax_close_kernel();
20582+ }
20583
20584 if (enable_ept && !cpu_has_vmx_ept_2m_page())
20585 kvm_disable_largepages();
20586@@ -3642,7 +3649,7 @@ static void vmx_set_constant_host_state(void)
20587 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
20588
20589 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
20590- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
20591+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
20592
20593 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
20594 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
20595@@ -6180,6 +6187,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20596 "jmp .Lkvm_vmx_return \n\t"
20597 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
20598 ".Lkvm_vmx_return: "
20599+
20600+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20601+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
20602+ ".Lkvm_vmx_return2: "
20603+#endif
20604+
20605 /* Save guest registers, load host registers, keep flags */
20606 "mov %0, %c[wordsize](%%"R"sp) \n\t"
20607 "pop %0 \n\t"
20608@@ -6228,6 +6241,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20609 #endif
20610 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
20611 [wordsize]"i"(sizeof(ulong))
20612+
20613+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20614+ ,[cs]"i"(__KERNEL_CS)
20615+#endif
20616+
20617 : "cc", "memory"
20618 , R"ax", R"bx", R"di", R"si"
20619 #ifdef CONFIG_X86_64
20620@@ -6256,7 +6274,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20621 }
20622 }
20623
20624- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
20625+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
20626+
20627+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20628+ loadsegment(fs, __KERNEL_PERCPU);
20629+#endif
20630+
20631+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20632+ __set_fs(current_thread_info()->addr_limit);
20633+#endif
20634+
20635 vmx->loaded_vmcs->launched = 1;
20636
20637 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
20638diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
20639index 185a2b8..866d2a6 100644
20640--- a/arch/x86/kvm/x86.c
20641+++ b/arch/x86/kvm/x86.c
20642@@ -1357,8 +1357,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
20643 {
20644 struct kvm *kvm = vcpu->kvm;
20645 int lm = is_long_mode(vcpu);
20646- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20647- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20648+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20649+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20650 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
20651 : kvm->arch.xen_hvm_config.blob_size_32;
20652 u32 page_num = data & ~PAGE_MASK;
20653@@ -2213,6 +2213,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
20654 if (n < msr_list.nmsrs)
20655 goto out;
20656 r = -EFAULT;
20657+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
20658+ goto out;
20659 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
20660 num_msrs_to_save * sizeof(u32)))
20661 goto out;
20662@@ -2338,7 +2340,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
20663 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
20664 struct kvm_interrupt *irq)
20665 {
20666- if (irq->irq < 0 || irq->irq >= 256)
20667+ if (irq->irq >= 256)
20668 return -EINVAL;
20669 if (irqchip_in_kernel(vcpu->kvm))
20670 return -ENXIO;
20671@@ -4860,7 +4862,7 @@ static void kvm_set_mmio_spte_mask(void)
20672 kvm_mmu_set_mmio_spte_mask(mask);
20673 }
20674
20675-int kvm_arch_init(void *opaque)
20676+int kvm_arch_init(const void *opaque)
20677 {
20678 int r;
20679 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
20680diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
20681index 642d880..44e0f3f 100644
20682--- a/arch/x86/lguest/boot.c
20683+++ b/arch/x86/lguest/boot.c
20684@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
20685 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
20686 * Launcher to reboot us.
20687 */
20688-static void lguest_restart(char *reason)
20689+static __noreturn void lguest_restart(char *reason)
20690 {
20691 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
20692+ BUG();
20693 }
20694
20695 /*G:050
20696diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
20697index 00933d5..3a64af9 100644
20698--- a/arch/x86/lib/atomic64_386_32.S
20699+++ b/arch/x86/lib/atomic64_386_32.S
20700@@ -48,6 +48,10 @@ BEGIN(read)
20701 movl (v), %eax
20702 movl 4(v), %edx
20703 RET_ENDP
20704+BEGIN(read_unchecked)
20705+ movl (v), %eax
20706+ movl 4(v), %edx
20707+RET_ENDP
20708 #undef v
20709
20710 #define v %esi
20711@@ -55,6 +59,10 @@ BEGIN(set)
20712 movl %ebx, (v)
20713 movl %ecx, 4(v)
20714 RET_ENDP
20715+BEGIN(set_unchecked)
20716+ movl %ebx, (v)
20717+ movl %ecx, 4(v)
20718+RET_ENDP
20719 #undef v
20720
20721 #define v %esi
20722@@ -70,6 +78,20 @@ RET_ENDP
20723 BEGIN(add)
20724 addl %eax, (v)
20725 adcl %edx, 4(v)
20726+
20727+#ifdef CONFIG_PAX_REFCOUNT
20728+ jno 0f
20729+ subl %eax, (v)
20730+ sbbl %edx, 4(v)
20731+ int $4
20732+0:
20733+ _ASM_EXTABLE(0b, 0b)
20734+#endif
20735+
20736+RET_ENDP
20737+BEGIN(add_unchecked)
20738+ addl %eax, (v)
20739+ adcl %edx, 4(v)
20740 RET_ENDP
20741 #undef v
20742
20743@@ -77,6 +99,24 @@ RET_ENDP
20744 BEGIN(add_return)
20745 addl (v), %eax
20746 adcl 4(v), %edx
20747+
20748+#ifdef CONFIG_PAX_REFCOUNT
20749+ into
20750+1234:
20751+ _ASM_EXTABLE(1234b, 2f)
20752+#endif
20753+
20754+ movl %eax, (v)
20755+ movl %edx, 4(v)
20756+
20757+#ifdef CONFIG_PAX_REFCOUNT
20758+2:
20759+#endif
20760+
20761+RET_ENDP
20762+BEGIN(add_return_unchecked)
20763+ addl (v), %eax
20764+ adcl 4(v), %edx
20765 movl %eax, (v)
20766 movl %edx, 4(v)
20767 RET_ENDP
20768@@ -86,6 +126,20 @@ RET_ENDP
20769 BEGIN(sub)
20770 subl %eax, (v)
20771 sbbl %edx, 4(v)
20772+
20773+#ifdef CONFIG_PAX_REFCOUNT
20774+ jno 0f
20775+ addl %eax, (v)
20776+ adcl %edx, 4(v)
20777+ int $4
20778+0:
20779+ _ASM_EXTABLE(0b, 0b)
20780+#endif
20781+
20782+RET_ENDP
20783+BEGIN(sub_unchecked)
20784+ subl %eax, (v)
20785+ sbbl %edx, 4(v)
20786 RET_ENDP
20787 #undef v
20788
20789@@ -96,6 +150,27 @@ BEGIN(sub_return)
20790 sbbl $0, %edx
20791 addl (v), %eax
20792 adcl 4(v), %edx
20793+
20794+#ifdef CONFIG_PAX_REFCOUNT
20795+ into
20796+1234:
20797+ _ASM_EXTABLE(1234b, 2f)
20798+#endif
20799+
20800+ movl %eax, (v)
20801+ movl %edx, 4(v)
20802+
20803+#ifdef CONFIG_PAX_REFCOUNT
20804+2:
20805+#endif
20806+
20807+RET_ENDP
20808+BEGIN(sub_return_unchecked)
20809+ negl %edx
20810+ negl %eax
20811+ sbbl $0, %edx
20812+ addl (v), %eax
20813+ adcl 4(v), %edx
20814 movl %eax, (v)
20815 movl %edx, 4(v)
20816 RET_ENDP
20817@@ -105,6 +180,20 @@ RET_ENDP
20818 BEGIN(inc)
20819 addl $1, (v)
20820 adcl $0, 4(v)
20821+
20822+#ifdef CONFIG_PAX_REFCOUNT
20823+ jno 0f
20824+ subl $1, (v)
20825+ sbbl $0, 4(v)
20826+ int $4
20827+0:
20828+ _ASM_EXTABLE(0b, 0b)
20829+#endif
20830+
20831+RET_ENDP
20832+BEGIN(inc_unchecked)
20833+ addl $1, (v)
20834+ adcl $0, 4(v)
20835 RET_ENDP
20836 #undef v
20837
20838@@ -114,6 +203,26 @@ BEGIN(inc_return)
20839 movl 4(v), %edx
20840 addl $1, %eax
20841 adcl $0, %edx
20842+
20843+#ifdef CONFIG_PAX_REFCOUNT
20844+ into
20845+1234:
20846+ _ASM_EXTABLE(1234b, 2f)
20847+#endif
20848+
20849+ movl %eax, (v)
20850+ movl %edx, 4(v)
20851+
20852+#ifdef CONFIG_PAX_REFCOUNT
20853+2:
20854+#endif
20855+
20856+RET_ENDP
20857+BEGIN(inc_return_unchecked)
20858+ movl (v), %eax
20859+ movl 4(v), %edx
20860+ addl $1, %eax
20861+ adcl $0, %edx
20862 movl %eax, (v)
20863 movl %edx, 4(v)
20864 RET_ENDP
20865@@ -123,6 +232,20 @@ RET_ENDP
20866 BEGIN(dec)
20867 subl $1, (v)
20868 sbbl $0, 4(v)
20869+
20870+#ifdef CONFIG_PAX_REFCOUNT
20871+ jno 0f
20872+ addl $1, (v)
20873+ adcl $0, 4(v)
20874+ int $4
20875+0:
20876+ _ASM_EXTABLE(0b, 0b)
20877+#endif
20878+
20879+RET_ENDP
20880+BEGIN(dec_unchecked)
20881+ subl $1, (v)
20882+ sbbl $0, 4(v)
20883 RET_ENDP
20884 #undef v
20885
20886@@ -132,6 +255,26 @@ BEGIN(dec_return)
20887 movl 4(v), %edx
20888 subl $1, %eax
20889 sbbl $0, %edx
20890+
20891+#ifdef CONFIG_PAX_REFCOUNT
20892+ into
20893+1234:
20894+ _ASM_EXTABLE(1234b, 2f)
20895+#endif
20896+
20897+ movl %eax, (v)
20898+ movl %edx, 4(v)
20899+
20900+#ifdef CONFIG_PAX_REFCOUNT
20901+2:
20902+#endif
20903+
20904+RET_ENDP
20905+BEGIN(dec_return_unchecked)
20906+ movl (v), %eax
20907+ movl 4(v), %edx
20908+ subl $1, %eax
20909+ sbbl $0, %edx
20910 movl %eax, (v)
20911 movl %edx, 4(v)
20912 RET_ENDP
20913@@ -143,6 +286,13 @@ BEGIN(add_unless)
20914 adcl %edx, %edi
20915 addl (v), %eax
20916 adcl 4(v), %edx
20917+
20918+#ifdef CONFIG_PAX_REFCOUNT
20919+ into
20920+1234:
20921+ _ASM_EXTABLE(1234b, 2f)
20922+#endif
20923+
20924 cmpl %eax, %ecx
20925 je 3f
20926 1:
20927@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
20928 1:
20929 addl $1, %eax
20930 adcl $0, %edx
20931+
20932+#ifdef CONFIG_PAX_REFCOUNT
20933+ into
20934+1234:
20935+ _ASM_EXTABLE(1234b, 2f)
20936+#endif
20937+
20938 movl %eax, (v)
20939 movl %edx, 4(v)
20940 movl $1, %eax
20941@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
20942 movl 4(v), %edx
20943 subl $1, %eax
20944 sbbl $0, %edx
20945+
20946+#ifdef CONFIG_PAX_REFCOUNT
20947+ into
20948+1234:
20949+ _ASM_EXTABLE(1234b, 1f)
20950+#endif
20951+
20952 js 1f
20953 movl %eax, (v)
20954 movl %edx, 4(v)
20955diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
20956index f5cc9eb..51fa319 100644
20957--- a/arch/x86/lib/atomic64_cx8_32.S
20958+++ b/arch/x86/lib/atomic64_cx8_32.S
20959@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
20960 CFI_STARTPROC
20961
20962 read64 %ecx
20963+ pax_force_retaddr
20964 ret
20965 CFI_ENDPROC
20966 ENDPROC(atomic64_read_cx8)
20967
20968+ENTRY(atomic64_read_unchecked_cx8)
20969+ CFI_STARTPROC
20970+
20971+ read64 %ecx
20972+ pax_force_retaddr
20973+ ret
20974+ CFI_ENDPROC
20975+ENDPROC(atomic64_read_unchecked_cx8)
20976+
20977 ENTRY(atomic64_set_cx8)
20978 CFI_STARTPROC
20979
20980@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
20981 cmpxchg8b (%esi)
20982 jne 1b
20983
20984+ pax_force_retaddr
20985 ret
20986 CFI_ENDPROC
20987 ENDPROC(atomic64_set_cx8)
20988
20989+ENTRY(atomic64_set_unchecked_cx8)
20990+ CFI_STARTPROC
20991+
20992+1:
20993+/* we don't need LOCK_PREFIX since aligned 64-bit writes
20994+ * are atomic on 586 and newer */
20995+ cmpxchg8b (%esi)
20996+ jne 1b
20997+
20998+ pax_force_retaddr
20999+ ret
21000+ CFI_ENDPROC
21001+ENDPROC(atomic64_set_unchecked_cx8)
21002+
21003 ENTRY(atomic64_xchg_cx8)
21004 CFI_STARTPROC
21005
21006@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
21007 cmpxchg8b (%esi)
21008 jne 1b
21009
21010+ pax_force_retaddr
21011 ret
21012 CFI_ENDPROC
21013 ENDPROC(atomic64_xchg_cx8)
21014
21015-.macro addsub_return func ins insc
21016-ENTRY(atomic64_\func\()_return_cx8)
21017+.macro addsub_return func ins insc unchecked=""
21018+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21019 CFI_STARTPROC
21020 SAVE ebp
21021 SAVE ebx
21022@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
21023 movl %edx, %ecx
21024 \ins\()l %esi, %ebx
21025 \insc\()l %edi, %ecx
21026+
21027+.ifb \unchecked
21028+#ifdef CONFIG_PAX_REFCOUNT
21029+ into
21030+2:
21031+ _ASM_EXTABLE(2b, 3f)
21032+#endif
21033+.endif
21034+
21035 LOCK_PREFIX
21036 cmpxchg8b (%ebp)
21037 jne 1b
21038-
21039-10:
21040 movl %ebx, %eax
21041 movl %ecx, %edx
21042+
21043+.ifb \unchecked
21044+#ifdef CONFIG_PAX_REFCOUNT
21045+3:
21046+#endif
21047+.endif
21048+
21049 RESTORE edi
21050 RESTORE esi
21051 RESTORE ebx
21052 RESTORE ebp
21053+ pax_force_retaddr
21054 ret
21055 CFI_ENDPROC
21056-ENDPROC(atomic64_\func\()_return_cx8)
21057+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21058 .endm
21059
21060 addsub_return add add adc
21061 addsub_return sub sub sbb
21062+addsub_return add add adc _unchecked
21063+addsub_return sub sub sbb _unchecked
21064
21065-.macro incdec_return func ins insc
21066-ENTRY(atomic64_\func\()_return_cx8)
21067+.macro incdec_return func ins insc unchecked=""
21068+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21069 CFI_STARTPROC
21070 SAVE ebx
21071
21072@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
21073 movl %edx, %ecx
21074 \ins\()l $1, %ebx
21075 \insc\()l $0, %ecx
21076+
21077+.ifb \unchecked
21078+#ifdef CONFIG_PAX_REFCOUNT
21079+ into
21080+2:
21081+ _ASM_EXTABLE(2b, 3f)
21082+#endif
21083+.endif
21084+
21085 LOCK_PREFIX
21086 cmpxchg8b (%esi)
21087 jne 1b
21088
21089-10:
21090 movl %ebx, %eax
21091 movl %ecx, %edx
21092+
21093+.ifb \unchecked
21094+#ifdef CONFIG_PAX_REFCOUNT
21095+3:
21096+#endif
21097+.endif
21098+
21099 RESTORE ebx
21100+ pax_force_retaddr
21101 ret
21102 CFI_ENDPROC
21103-ENDPROC(atomic64_\func\()_return_cx8)
21104+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21105 .endm
21106
21107 incdec_return inc add adc
21108 incdec_return dec sub sbb
21109+incdec_return inc add adc _unchecked
21110+incdec_return dec sub sbb _unchecked
21111
21112 ENTRY(atomic64_dec_if_positive_cx8)
21113 CFI_STARTPROC
21114@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
21115 movl %edx, %ecx
21116 subl $1, %ebx
21117 sbb $0, %ecx
21118+
21119+#ifdef CONFIG_PAX_REFCOUNT
21120+ into
21121+1234:
21122+ _ASM_EXTABLE(1234b, 2f)
21123+#endif
21124+
21125 js 2f
21126 LOCK_PREFIX
21127 cmpxchg8b (%esi)
21128@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
21129 movl %ebx, %eax
21130 movl %ecx, %edx
21131 RESTORE ebx
21132+ pax_force_retaddr
21133 ret
21134 CFI_ENDPROC
21135 ENDPROC(atomic64_dec_if_positive_cx8)
21136@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
21137 movl %edx, %ecx
21138 addl %ebp, %ebx
21139 adcl %edi, %ecx
21140+
21141+#ifdef CONFIG_PAX_REFCOUNT
21142+ into
21143+1234:
21144+ _ASM_EXTABLE(1234b, 3f)
21145+#endif
21146+
21147 LOCK_PREFIX
21148 cmpxchg8b (%esi)
21149 jne 1b
21150@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
21151 CFI_ADJUST_CFA_OFFSET -8
21152 RESTORE ebx
21153 RESTORE ebp
21154+ pax_force_retaddr
21155 ret
21156 4:
21157 cmpl %edx, 4(%esp)
21158@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
21159 xorl %ecx, %ecx
21160 addl $1, %ebx
21161 adcl %edx, %ecx
21162+
21163+#ifdef CONFIG_PAX_REFCOUNT
21164+ into
21165+1234:
21166+ _ASM_EXTABLE(1234b, 3f)
21167+#endif
21168+
21169 LOCK_PREFIX
21170 cmpxchg8b (%esi)
21171 jne 1b
21172@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
21173 movl $1, %eax
21174 3:
21175 RESTORE ebx
21176+ pax_force_retaddr
21177 ret
21178 CFI_ENDPROC
21179 ENDPROC(atomic64_inc_not_zero_cx8)
21180diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21181index 78d16a5..fbcf666 100644
21182--- a/arch/x86/lib/checksum_32.S
21183+++ b/arch/x86/lib/checksum_32.S
21184@@ -28,7 +28,8 @@
21185 #include <linux/linkage.h>
21186 #include <asm/dwarf2.h>
21187 #include <asm/errno.h>
21188-
21189+#include <asm/segment.h>
21190+
21191 /*
21192 * computes a partial checksum, e.g. for TCP/UDP fragments
21193 */
21194@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21195
21196 #define ARGBASE 16
21197 #define FP 12
21198-
21199-ENTRY(csum_partial_copy_generic)
21200+
21201+ENTRY(csum_partial_copy_generic_to_user)
21202 CFI_STARTPROC
21203+
21204+#ifdef CONFIG_PAX_MEMORY_UDEREF
21205+ pushl_cfi %gs
21206+ popl_cfi %es
21207+ jmp csum_partial_copy_generic
21208+#endif
21209+
21210+ENTRY(csum_partial_copy_generic_from_user)
21211+
21212+#ifdef CONFIG_PAX_MEMORY_UDEREF
21213+ pushl_cfi %gs
21214+ popl_cfi %ds
21215+#endif
21216+
21217+ENTRY(csum_partial_copy_generic)
21218 subl $4,%esp
21219 CFI_ADJUST_CFA_OFFSET 4
21220 pushl_cfi %edi
21221@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
21222 jmp 4f
21223 SRC(1: movw (%esi), %bx )
21224 addl $2, %esi
21225-DST( movw %bx, (%edi) )
21226+DST( movw %bx, %es:(%edi) )
21227 addl $2, %edi
21228 addw %bx, %ax
21229 adcl $0, %eax
21230@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
21231 SRC(1: movl (%esi), %ebx )
21232 SRC( movl 4(%esi), %edx )
21233 adcl %ebx, %eax
21234-DST( movl %ebx, (%edi) )
21235+DST( movl %ebx, %es:(%edi) )
21236 adcl %edx, %eax
21237-DST( movl %edx, 4(%edi) )
21238+DST( movl %edx, %es:4(%edi) )
21239
21240 SRC( movl 8(%esi), %ebx )
21241 SRC( movl 12(%esi), %edx )
21242 adcl %ebx, %eax
21243-DST( movl %ebx, 8(%edi) )
21244+DST( movl %ebx, %es:8(%edi) )
21245 adcl %edx, %eax
21246-DST( movl %edx, 12(%edi) )
21247+DST( movl %edx, %es:12(%edi) )
21248
21249 SRC( movl 16(%esi), %ebx )
21250 SRC( movl 20(%esi), %edx )
21251 adcl %ebx, %eax
21252-DST( movl %ebx, 16(%edi) )
21253+DST( movl %ebx, %es:16(%edi) )
21254 adcl %edx, %eax
21255-DST( movl %edx, 20(%edi) )
21256+DST( movl %edx, %es:20(%edi) )
21257
21258 SRC( movl 24(%esi), %ebx )
21259 SRC( movl 28(%esi), %edx )
21260 adcl %ebx, %eax
21261-DST( movl %ebx, 24(%edi) )
21262+DST( movl %ebx, %es:24(%edi) )
21263 adcl %edx, %eax
21264-DST( movl %edx, 28(%edi) )
21265+DST( movl %edx, %es:28(%edi) )
21266
21267 lea 32(%esi), %esi
21268 lea 32(%edi), %edi
21269@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
21270 shrl $2, %edx # This clears CF
21271 SRC(3: movl (%esi), %ebx )
21272 adcl %ebx, %eax
21273-DST( movl %ebx, (%edi) )
21274+DST( movl %ebx, %es:(%edi) )
21275 lea 4(%esi), %esi
21276 lea 4(%edi), %edi
21277 dec %edx
21278@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
21279 jb 5f
21280 SRC( movw (%esi), %cx )
21281 leal 2(%esi), %esi
21282-DST( movw %cx, (%edi) )
21283+DST( movw %cx, %es:(%edi) )
21284 leal 2(%edi), %edi
21285 je 6f
21286 shll $16,%ecx
21287 SRC(5: movb (%esi), %cl )
21288-DST( movb %cl, (%edi) )
21289+DST( movb %cl, %es:(%edi) )
21290 6: addl %ecx, %eax
21291 adcl $0, %eax
21292 7:
21293@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
21294
21295 6001:
21296 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21297- movl $-EFAULT, (%ebx)
21298+ movl $-EFAULT, %ss:(%ebx)
21299
21300 # zero the complete destination - computing the rest
21301 # is too much work
21302@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
21303
21304 6002:
21305 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21306- movl $-EFAULT,(%ebx)
21307+ movl $-EFAULT,%ss:(%ebx)
21308 jmp 5000b
21309
21310 .previous
21311
21312+ pushl_cfi %ss
21313+ popl_cfi %ds
21314+ pushl_cfi %ss
21315+ popl_cfi %es
21316 popl_cfi %ebx
21317 CFI_RESTORE ebx
21318 popl_cfi %esi
21319@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
21320 popl_cfi %ecx # equivalent to addl $4,%esp
21321 ret
21322 CFI_ENDPROC
21323-ENDPROC(csum_partial_copy_generic)
21324+ENDPROC(csum_partial_copy_generic_to_user)
21325
21326 #else
21327
21328 /* Version for PentiumII/PPro */
21329
21330 #define ROUND1(x) \
21331+ nop; nop; nop; \
21332 SRC(movl x(%esi), %ebx ) ; \
21333 addl %ebx, %eax ; \
21334- DST(movl %ebx, x(%edi) ) ;
21335+ DST(movl %ebx, %es:x(%edi)) ;
21336
21337 #define ROUND(x) \
21338+ nop; nop; nop; \
21339 SRC(movl x(%esi), %ebx ) ; \
21340 adcl %ebx, %eax ; \
21341- DST(movl %ebx, x(%edi) ) ;
21342+ DST(movl %ebx, %es:x(%edi)) ;
21343
21344 #define ARGBASE 12
21345-
21346-ENTRY(csum_partial_copy_generic)
21347+
21348+ENTRY(csum_partial_copy_generic_to_user)
21349 CFI_STARTPROC
21350+
21351+#ifdef CONFIG_PAX_MEMORY_UDEREF
21352+ pushl_cfi %gs
21353+ popl_cfi %es
21354+ jmp csum_partial_copy_generic
21355+#endif
21356+
21357+ENTRY(csum_partial_copy_generic_from_user)
21358+
21359+#ifdef CONFIG_PAX_MEMORY_UDEREF
21360+ pushl_cfi %gs
21361+ popl_cfi %ds
21362+#endif
21363+
21364+ENTRY(csum_partial_copy_generic)
21365 pushl_cfi %ebx
21366 CFI_REL_OFFSET ebx, 0
21367 pushl_cfi %edi
21368@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
21369 subl %ebx, %edi
21370 lea -1(%esi),%edx
21371 andl $-32,%edx
21372- lea 3f(%ebx,%ebx), %ebx
21373+ lea 3f(%ebx,%ebx,2), %ebx
21374 testl %esi, %esi
21375 jmp *%ebx
21376 1: addl $64,%esi
21377@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
21378 jb 5f
21379 SRC( movw (%esi), %dx )
21380 leal 2(%esi), %esi
21381-DST( movw %dx, (%edi) )
21382+DST( movw %dx, %es:(%edi) )
21383 leal 2(%edi), %edi
21384 je 6f
21385 shll $16,%edx
21386 5:
21387 SRC( movb (%esi), %dl )
21388-DST( movb %dl, (%edi) )
21389+DST( movb %dl, %es:(%edi) )
21390 6: addl %edx, %eax
21391 adcl $0, %eax
21392 7:
21393 .section .fixup, "ax"
21394 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21395- movl $-EFAULT, (%ebx)
21396+ movl $-EFAULT, %ss:(%ebx)
21397 # zero the complete destination (computing the rest is too much work)
21398 movl ARGBASE+8(%esp),%edi # dst
21399 movl ARGBASE+12(%esp),%ecx # len
21400@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
21401 rep; stosb
21402 jmp 7b
21403 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21404- movl $-EFAULT, (%ebx)
21405+ movl $-EFAULT, %ss:(%ebx)
21406 jmp 7b
21407 .previous
21408
21409+#ifdef CONFIG_PAX_MEMORY_UDEREF
21410+ pushl_cfi %ss
21411+ popl_cfi %ds
21412+ pushl_cfi %ss
21413+ popl_cfi %es
21414+#endif
21415+
21416 popl_cfi %esi
21417 CFI_RESTORE esi
21418 popl_cfi %edi
21419@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
21420 CFI_RESTORE ebx
21421 ret
21422 CFI_ENDPROC
21423-ENDPROC(csum_partial_copy_generic)
21424+ENDPROC(csum_partial_copy_generic_to_user)
21425
21426 #undef ROUND
21427 #undef ROUND1
21428diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21429index f2145cf..cea889d 100644
21430--- a/arch/x86/lib/clear_page_64.S
21431+++ b/arch/x86/lib/clear_page_64.S
21432@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
21433 movl $4096/8,%ecx
21434 xorl %eax,%eax
21435 rep stosq
21436+ pax_force_retaddr
21437 ret
21438 CFI_ENDPROC
21439 ENDPROC(clear_page_c)
21440@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
21441 movl $4096,%ecx
21442 xorl %eax,%eax
21443 rep stosb
21444+ pax_force_retaddr
21445 ret
21446 CFI_ENDPROC
21447 ENDPROC(clear_page_c_e)
21448@@ -43,6 +45,7 @@ ENTRY(clear_page)
21449 leaq 64(%rdi),%rdi
21450 jnz .Lloop
21451 nop
21452+ pax_force_retaddr
21453 ret
21454 CFI_ENDPROC
21455 .Lclear_page_end:
21456@@ -58,7 +61,7 @@ ENDPROC(clear_page)
21457
21458 #include <asm/cpufeature.h>
21459
21460- .section .altinstr_replacement,"ax"
21461+ .section .altinstr_replacement,"a"
21462 1: .byte 0xeb /* jmp <disp8> */
21463 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21464 2: .byte 0xeb /* jmp <disp8> */
21465diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
21466index 1e572c5..2a162cd 100644
21467--- a/arch/x86/lib/cmpxchg16b_emu.S
21468+++ b/arch/x86/lib/cmpxchg16b_emu.S
21469@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
21470
21471 popf
21472 mov $1, %al
21473+ pax_force_retaddr
21474 ret
21475
21476 not_same:
21477 popf
21478 xor %al,%al
21479+ pax_force_retaddr
21480 ret
21481
21482 CFI_ENDPROC
21483diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21484index 6b34d04..dccb07f 100644
21485--- a/arch/x86/lib/copy_page_64.S
21486+++ b/arch/x86/lib/copy_page_64.S
21487@@ -9,6 +9,7 @@ copy_page_c:
21488 CFI_STARTPROC
21489 movl $4096/8,%ecx
21490 rep movsq
21491+ pax_force_retaddr
21492 ret
21493 CFI_ENDPROC
21494 ENDPROC(copy_page_c)
21495@@ -20,12 +21,14 @@ ENDPROC(copy_page_c)
21496
21497 ENTRY(copy_page)
21498 CFI_STARTPROC
21499- subq $2*8,%rsp
21500- CFI_ADJUST_CFA_OFFSET 2*8
21501+ subq $3*8,%rsp
21502+ CFI_ADJUST_CFA_OFFSET 3*8
21503 movq %rbx,(%rsp)
21504 CFI_REL_OFFSET rbx, 0
21505 movq %r12,1*8(%rsp)
21506 CFI_REL_OFFSET r12, 1*8
21507+ movq %r13,2*8(%rsp)
21508+ CFI_REL_OFFSET r13, 2*8
21509
21510 movl $(4096/64)-5,%ecx
21511 .p2align 4
21512@@ -37,7 +40,7 @@ ENTRY(copy_page)
21513 movq 16 (%rsi), %rdx
21514 movq 24 (%rsi), %r8
21515 movq 32 (%rsi), %r9
21516- movq 40 (%rsi), %r10
21517+ movq 40 (%rsi), %r13
21518 movq 48 (%rsi), %r11
21519 movq 56 (%rsi), %r12
21520
21521@@ -48,7 +51,7 @@ ENTRY(copy_page)
21522 movq %rdx, 16 (%rdi)
21523 movq %r8, 24 (%rdi)
21524 movq %r9, 32 (%rdi)
21525- movq %r10, 40 (%rdi)
21526+ movq %r13, 40 (%rdi)
21527 movq %r11, 48 (%rdi)
21528 movq %r12, 56 (%rdi)
21529
21530@@ -67,7 +70,7 @@ ENTRY(copy_page)
21531 movq 16 (%rsi), %rdx
21532 movq 24 (%rsi), %r8
21533 movq 32 (%rsi), %r9
21534- movq 40 (%rsi), %r10
21535+ movq 40 (%rsi), %r13
21536 movq 48 (%rsi), %r11
21537 movq 56 (%rsi), %r12
21538
21539@@ -76,7 +79,7 @@ ENTRY(copy_page)
21540 movq %rdx, 16 (%rdi)
21541 movq %r8, 24 (%rdi)
21542 movq %r9, 32 (%rdi)
21543- movq %r10, 40 (%rdi)
21544+ movq %r13, 40 (%rdi)
21545 movq %r11, 48 (%rdi)
21546 movq %r12, 56 (%rdi)
21547
21548@@ -89,8 +92,11 @@ ENTRY(copy_page)
21549 CFI_RESTORE rbx
21550 movq 1*8(%rsp),%r12
21551 CFI_RESTORE r12
21552- addq $2*8,%rsp
21553- CFI_ADJUST_CFA_OFFSET -2*8
21554+ movq 2*8(%rsp),%r13
21555+ CFI_RESTORE r13
21556+ addq $3*8,%rsp
21557+ CFI_ADJUST_CFA_OFFSET -3*8
21558+ pax_force_retaddr
21559 ret
21560 .Lcopy_page_end:
21561 CFI_ENDPROC
21562@@ -101,7 +107,7 @@ ENDPROC(copy_page)
21563
21564 #include <asm/cpufeature.h>
21565
21566- .section .altinstr_replacement,"ax"
21567+ .section .altinstr_replacement,"a"
21568 1: .byte 0xeb /* jmp <disp8> */
21569 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21570 2:
21571diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21572index 0248402..821c786 100644
21573--- a/arch/x86/lib/copy_user_64.S
21574+++ b/arch/x86/lib/copy_user_64.S
21575@@ -16,6 +16,7 @@
21576 #include <asm/thread_info.h>
21577 #include <asm/cpufeature.h>
21578 #include <asm/alternative-asm.h>
21579+#include <asm/pgtable.h>
21580
21581 /*
21582 * By placing feature2 after feature1 in altinstructions section, we logically
21583@@ -29,7 +30,7 @@
21584 .byte 0xe9 /* 32bit jump */
21585 .long \orig-1f /* by default jump to orig */
21586 1:
21587- .section .altinstr_replacement,"ax"
21588+ .section .altinstr_replacement,"a"
21589 2: .byte 0xe9 /* near jump with 32bit immediate */
21590 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
21591 3: .byte 0xe9 /* near jump with 32bit immediate */
21592@@ -71,47 +72,20 @@
21593 #endif
21594 .endm
21595
21596-/* Standard copy_to_user with segment limit checking */
21597-ENTRY(_copy_to_user)
21598- CFI_STARTPROC
21599- GET_THREAD_INFO(%rax)
21600- movq %rdi,%rcx
21601- addq %rdx,%rcx
21602- jc bad_to_user
21603- cmpq TI_addr_limit(%rax),%rcx
21604- ja bad_to_user
21605- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21606- copy_user_generic_unrolled,copy_user_generic_string, \
21607- copy_user_enhanced_fast_string
21608- CFI_ENDPROC
21609-ENDPROC(_copy_to_user)
21610-
21611-/* Standard copy_from_user with segment limit checking */
21612-ENTRY(_copy_from_user)
21613- CFI_STARTPROC
21614- GET_THREAD_INFO(%rax)
21615- movq %rsi,%rcx
21616- addq %rdx,%rcx
21617- jc bad_from_user
21618- cmpq TI_addr_limit(%rax),%rcx
21619- ja bad_from_user
21620- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21621- copy_user_generic_unrolled,copy_user_generic_string, \
21622- copy_user_enhanced_fast_string
21623- CFI_ENDPROC
21624-ENDPROC(_copy_from_user)
21625-
21626 .section .fixup,"ax"
21627 /* must zero dest */
21628 ENTRY(bad_from_user)
21629 bad_from_user:
21630 CFI_STARTPROC
21631+ testl %edx,%edx
21632+ js bad_to_user
21633 movl %edx,%ecx
21634 xorl %eax,%eax
21635 rep
21636 stosb
21637 bad_to_user:
21638 movl %edx,%eax
21639+ pax_force_retaddr
21640 ret
21641 CFI_ENDPROC
21642 ENDPROC(bad_from_user)
21643@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
21644 jz 17f
21645 1: movq (%rsi),%r8
21646 2: movq 1*8(%rsi),%r9
21647-3: movq 2*8(%rsi),%r10
21648+3: movq 2*8(%rsi),%rax
21649 4: movq 3*8(%rsi),%r11
21650 5: movq %r8,(%rdi)
21651 6: movq %r9,1*8(%rdi)
21652-7: movq %r10,2*8(%rdi)
21653+7: movq %rax,2*8(%rdi)
21654 8: movq %r11,3*8(%rdi)
21655 9: movq 4*8(%rsi),%r8
21656 10: movq 5*8(%rsi),%r9
21657-11: movq 6*8(%rsi),%r10
21658+11: movq 6*8(%rsi),%rax
21659 12: movq 7*8(%rsi),%r11
21660 13: movq %r8,4*8(%rdi)
21661 14: movq %r9,5*8(%rdi)
21662-15: movq %r10,6*8(%rdi)
21663+15: movq %rax,6*8(%rdi)
21664 16: movq %r11,7*8(%rdi)
21665 leaq 64(%rsi),%rsi
21666 leaq 64(%rdi),%rdi
21667@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
21668 decl %ecx
21669 jnz 21b
21670 23: xor %eax,%eax
21671+ pax_force_retaddr
21672 ret
21673
21674 .section .fixup,"ax"
21675@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
21676 3: rep
21677 movsb
21678 4: xorl %eax,%eax
21679+ pax_force_retaddr
21680 ret
21681
21682 .section .fixup,"ax"
21683@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
21684 1: rep
21685 movsb
21686 2: xorl %eax,%eax
21687+ pax_force_retaddr
21688 ret
21689
21690 .section .fixup,"ax"
21691diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21692index cb0c112..e3a6895 100644
21693--- a/arch/x86/lib/copy_user_nocache_64.S
21694+++ b/arch/x86/lib/copy_user_nocache_64.S
21695@@ -8,12 +8,14 @@
21696
21697 #include <linux/linkage.h>
21698 #include <asm/dwarf2.h>
21699+#include <asm/alternative-asm.h>
21700
21701 #define FIX_ALIGNMENT 1
21702
21703 #include <asm/current.h>
21704 #include <asm/asm-offsets.h>
21705 #include <asm/thread_info.h>
21706+#include <asm/pgtable.h>
21707
21708 .macro ALIGN_DESTINATION
21709 #ifdef FIX_ALIGNMENT
21710@@ -50,6 +52,15 @@
21711 */
21712 ENTRY(__copy_user_nocache)
21713 CFI_STARTPROC
21714+
21715+#ifdef CONFIG_PAX_MEMORY_UDEREF
21716+ mov $PAX_USER_SHADOW_BASE,%rcx
21717+ cmp %rcx,%rsi
21718+ jae 1f
21719+ add %rcx,%rsi
21720+1:
21721+#endif
21722+
21723 cmpl $8,%edx
21724 jb 20f /* less then 8 bytes, go to byte copy loop */
21725 ALIGN_DESTINATION
21726@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
21727 jz 17f
21728 1: movq (%rsi),%r8
21729 2: movq 1*8(%rsi),%r9
21730-3: movq 2*8(%rsi),%r10
21731+3: movq 2*8(%rsi),%rax
21732 4: movq 3*8(%rsi),%r11
21733 5: movnti %r8,(%rdi)
21734 6: movnti %r9,1*8(%rdi)
21735-7: movnti %r10,2*8(%rdi)
21736+7: movnti %rax,2*8(%rdi)
21737 8: movnti %r11,3*8(%rdi)
21738 9: movq 4*8(%rsi),%r8
21739 10: movq 5*8(%rsi),%r9
21740-11: movq 6*8(%rsi),%r10
21741+11: movq 6*8(%rsi),%rax
21742 12: movq 7*8(%rsi),%r11
21743 13: movnti %r8,4*8(%rdi)
21744 14: movnti %r9,5*8(%rdi)
21745-15: movnti %r10,6*8(%rdi)
21746+15: movnti %rax,6*8(%rdi)
21747 16: movnti %r11,7*8(%rdi)
21748 leaq 64(%rsi),%rsi
21749 leaq 64(%rdi),%rdi
21750@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
21751 jnz 21b
21752 23: xorl %eax,%eax
21753 sfence
21754+ pax_force_retaddr
21755 ret
21756
21757 .section .fixup,"ax"
21758diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
21759index fb903b7..c92b7f7 100644
21760--- a/arch/x86/lib/csum-copy_64.S
21761+++ b/arch/x86/lib/csum-copy_64.S
21762@@ -8,6 +8,7 @@
21763 #include <linux/linkage.h>
21764 #include <asm/dwarf2.h>
21765 #include <asm/errno.h>
21766+#include <asm/alternative-asm.h>
21767
21768 /*
21769 * Checksum copy with exception handling.
21770@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
21771 CFI_RESTORE rbp
21772 addq $7*8, %rsp
21773 CFI_ADJUST_CFA_OFFSET -7*8
21774+ pax_force_retaddr 0, 1
21775 ret
21776 CFI_RESTORE_STATE
21777
21778diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
21779index 459b58a..9570bc7 100644
21780--- a/arch/x86/lib/csum-wrappers_64.c
21781+++ b/arch/x86/lib/csum-wrappers_64.c
21782@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
21783 len -= 2;
21784 }
21785 }
21786- isum = csum_partial_copy_generic((__force const void *)src,
21787+
21788+#ifdef CONFIG_PAX_MEMORY_UDEREF
21789+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21790+ src += PAX_USER_SHADOW_BASE;
21791+#endif
21792+
21793+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
21794 dst, len, isum, errp, NULL);
21795 if (unlikely(*errp))
21796 goto out_err;
21797@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
21798 }
21799
21800 *errp = 0;
21801- return csum_partial_copy_generic(src, (void __force *)dst,
21802+
21803+#ifdef CONFIG_PAX_MEMORY_UDEREF
21804+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
21805+ dst += PAX_USER_SHADOW_BASE;
21806+#endif
21807+
21808+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
21809 len, isum, NULL, errp);
21810 }
21811 EXPORT_SYMBOL(csum_partial_copy_to_user);
21812diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
21813index 51f1504..ddac4c1 100644
21814--- a/arch/x86/lib/getuser.S
21815+++ b/arch/x86/lib/getuser.S
21816@@ -33,15 +33,38 @@
21817 #include <asm/asm-offsets.h>
21818 #include <asm/thread_info.h>
21819 #include <asm/asm.h>
21820+#include <asm/segment.h>
21821+#include <asm/pgtable.h>
21822+#include <asm/alternative-asm.h>
21823+
21824+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21825+#define __copyuser_seg gs;
21826+#else
21827+#define __copyuser_seg
21828+#endif
21829
21830 .text
21831 ENTRY(__get_user_1)
21832 CFI_STARTPROC
21833+
21834+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21835 GET_THREAD_INFO(%_ASM_DX)
21836 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21837 jae bad_get_user
21838-1: movzb (%_ASM_AX),%edx
21839+
21840+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21841+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21842+ cmp %_ASM_DX,%_ASM_AX
21843+ jae 1234f
21844+ add %_ASM_DX,%_ASM_AX
21845+1234:
21846+#endif
21847+
21848+#endif
21849+
21850+1: __copyuser_seg movzb (%_ASM_AX),%edx
21851 xor %eax,%eax
21852+ pax_force_retaddr
21853 ret
21854 CFI_ENDPROC
21855 ENDPROC(__get_user_1)
21856@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
21857 ENTRY(__get_user_2)
21858 CFI_STARTPROC
21859 add $1,%_ASM_AX
21860+
21861+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21862 jc bad_get_user
21863 GET_THREAD_INFO(%_ASM_DX)
21864 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21865 jae bad_get_user
21866-2: movzwl -1(%_ASM_AX),%edx
21867+
21868+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21869+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21870+ cmp %_ASM_DX,%_ASM_AX
21871+ jae 1234f
21872+ add %_ASM_DX,%_ASM_AX
21873+1234:
21874+#endif
21875+
21876+#endif
21877+
21878+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
21879 xor %eax,%eax
21880+ pax_force_retaddr
21881 ret
21882 CFI_ENDPROC
21883 ENDPROC(__get_user_2)
21884@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
21885 ENTRY(__get_user_4)
21886 CFI_STARTPROC
21887 add $3,%_ASM_AX
21888+
21889+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21890 jc bad_get_user
21891 GET_THREAD_INFO(%_ASM_DX)
21892 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21893 jae bad_get_user
21894-3: mov -3(%_ASM_AX),%edx
21895+
21896+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21897+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21898+ cmp %_ASM_DX,%_ASM_AX
21899+ jae 1234f
21900+ add %_ASM_DX,%_ASM_AX
21901+1234:
21902+#endif
21903+
21904+#endif
21905+
21906+3: __copyuser_seg mov -3(%_ASM_AX),%edx
21907 xor %eax,%eax
21908+ pax_force_retaddr
21909 ret
21910 CFI_ENDPROC
21911 ENDPROC(__get_user_4)
21912@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
21913 GET_THREAD_INFO(%_ASM_DX)
21914 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21915 jae bad_get_user
21916+
21917+#ifdef CONFIG_PAX_MEMORY_UDEREF
21918+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21919+ cmp %_ASM_DX,%_ASM_AX
21920+ jae 1234f
21921+ add %_ASM_DX,%_ASM_AX
21922+1234:
21923+#endif
21924+
21925 4: movq -7(%_ASM_AX),%_ASM_DX
21926 xor %eax,%eax
21927+ pax_force_retaddr
21928 ret
21929 CFI_ENDPROC
21930 ENDPROC(__get_user_8)
21931@@ -91,6 +152,7 @@ bad_get_user:
21932 CFI_STARTPROC
21933 xor %edx,%edx
21934 mov $(-EFAULT),%_ASM_AX
21935+ pax_force_retaddr
21936 ret
21937 CFI_ENDPROC
21938 END(bad_get_user)
21939diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
21940index b1e6c4b..21ae8fc 100644
21941--- a/arch/x86/lib/insn.c
21942+++ b/arch/x86/lib/insn.c
21943@@ -21,6 +21,11 @@
21944 #include <linux/string.h>
21945 #include <asm/inat.h>
21946 #include <asm/insn.h>
21947+#ifdef __KERNEL__
21948+#include <asm/pgtable_types.h>
21949+#else
21950+#define ktla_ktva(addr) addr
21951+#endif
21952
21953 /* Verify next sizeof(t) bytes can be on the same instruction */
21954 #define validate_next(t, insn, n) \
21955@@ -49,8 +54,8 @@
21956 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
21957 {
21958 memset(insn, 0, sizeof(*insn));
21959- insn->kaddr = kaddr;
21960- insn->next_byte = kaddr;
21961+ insn->kaddr = ktla_ktva(kaddr);
21962+ insn->next_byte = ktla_ktva(kaddr);
21963 insn->x86_64 = x86_64 ? 1 : 0;
21964 insn->opnd_bytes = 4;
21965 if (x86_64)
21966diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
21967index 05a95e7..326f2fa 100644
21968--- a/arch/x86/lib/iomap_copy_64.S
21969+++ b/arch/x86/lib/iomap_copy_64.S
21970@@ -17,6 +17,7 @@
21971
21972 #include <linux/linkage.h>
21973 #include <asm/dwarf2.h>
21974+#include <asm/alternative-asm.h>
21975
21976 /*
21977 * override generic version in lib/iomap_copy.c
21978@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
21979 CFI_STARTPROC
21980 movl %edx,%ecx
21981 rep movsd
21982+ pax_force_retaddr
21983 ret
21984 CFI_ENDPROC
21985 ENDPROC(__iowrite32_copy)
21986diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
21987index 1c273be..da9cc0e 100644
21988--- a/arch/x86/lib/memcpy_64.S
21989+++ b/arch/x86/lib/memcpy_64.S
21990@@ -33,6 +33,7 @@
21991 rep movsq
21992 movl %edx, %ecx
21993 rep movsb
21994+ pax_force_retaddr
21995 ret
21996 .Lmemcpy_e:
21997 .previous
21998@@ -49,6 +50,7 @@
21999 movq %rdi, %rax
22000 movq %rdx, %rcx
22001 rep movsb
22002+ pax_force_retaddr
22003 ret
22004 .Lmemcpy_e_e:
22005 .previous
22006@@ -76,13 +78,13 @@ ENTRY(memcpy)
22007 */
22008 movq 0*8(%rsi), %r8
22009 movq 1*8(%rsi), %r9
22010- movq 2*8(%rsi), %r10
22011+ movq 2*8(%rsi), %rcx
22012 movq 3*8(%rsi), %r11
22013 leaq 4*8(%rsi), %rsi
22014
22015 movq %r8, 0*8(%rdi)
22016 movq %r9, 1*8(%rdi)
22017- movq %r10, 2*8(%rdi)
22018+ movq %rcx, 2*8(%rdi)
22019 movq %r11, 3*8(%rdi)
22020 leaq 4*8(%rdi), %rdi
22021 jae .Lcopy_forward_loop
22022@@ -105,12 +107,12 @@ ENTRY(memcpy)
22023 subq $0x20, %rdx
22024 movq -1*8(%rsi), %r8
22025 movq -2*8(%rsi), %r9
22026- movq -3*8(%rsi), %r10
22027+ movq -3*8(%rsi), %rcx
22028 movq -4*8(%rsi), %r11
22029 leaq -4*8(%rsi), %rsi
22030 movq %r8, -1*8(%rdi)
22031 movq %r9, -2*8(%rdi)
22032- movq %r10, -3*8(%rdi)
22033+ movq %rcx, -3*8(%rdi)
22034 movq %r11, -4*8(%rdi)
22035 leaq -4*8(%rdi), %rdi
22036 jae .Lcopy_backward_loop
22037@@ -130,12 +132,13 @@ ENTRY(memcpy)
22038 */
22039 movq 0*8(%rsi), %r8
22040 movq 1*8(%rsi), %r9
22041- movq -2*8(%rsi, %rdx), %r10
22042+ movq -2*8(%rsi, %rdx), %rcx
22043 movq -1*8(%rsi, %rdx), %r11
22044 movq %r8, 0*8(%rdi)
22045 movq %r9, 1*8(%rdi)
22046- movq %r10, -2*8(%rdi, %rdx)
22047+ movq %rcx, -2*8(%rdi, %rdx)
22048 movq %r11, -1*8(%rdi, %rdx)
22049+ pax_force_retaddr
22050 retq
22051 .p2align 4
22052 .Lless_16bytes:
22053@@ -148,6 +151,7 @@ ENTRY(memcpy)
22054 movq -1*8(%rsi, %rdx), %r9
22055 movq %r8, 0*8(%rdi)
22056 movq %r9, -1*8(%rdi, %rdx)
22057+ pax_force_retaddr
22058 retq
22059 .p2align 4
22060 .Lless_8bytes:
22061@@ -161,6 +165,7 @@ ENTRY(memcpy)
22062 movl -4(%rsi, %rdx), %r8d
22063 movl %ecx, (%rdi)
22064 movl %r8d, -4(%rdi, %rdx)
22065+ pax_force_retaddr
22066 retq
22067 .p2align 4
22068 .Lless_3bytes:
22069@@ -179,6 +184,7 @@ ENTRY(memcpy)
22070 movb %cl, (%rdi)
22071
22072 .Lend:
22073+ pax_force_retaddr
22074 retq
22075 CFI_ENDPROC
22076 ENDPROC(memcpy)
22077diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
22078index ee16461..c39c199 100644
22079--- a/arch/x86/lib/memmove_64.S
22080+++ b/arch/x86/lib/memmove_64.S
22081@@ -61,13 +61,13 @@ ENTRY(memmove)
22082 5:
22083 sub $0x20, %rdx
22084 movq 0*8(%rsi), %r11
22085- movq 1*8(%rsi), %r10
22086+ movq 1*8(%rsi), %rcx
22087 movq 2*8(%rsi), %r9
22088 movq 3*8(%rsi), %r8
22089 leaq 4*8(%rsi), %rsi
22090
22091 movq %r11, 0*8(%rdi)
22092- movq %r10, 1*8(%rdi)
22093+ movq %rcx, 1*8(%rdi)
22094 movq %r9, 2*8(%rdi)
22095 movq %r8, 3*8(%rdi)
22096 leaq 4*8(%rdi), %rdi
22097@@ -81,10 +81,10 @@ ENTRY(memmove)
22098 4:
22099 movq %rdx, %rcx
22100 movq -8(%rsi, %rdx), %r11
22101- lea -8(%rdi, %rdx), %r10
22102+ lea -8(%rdi, %rdx), %r9
22103 shrq $3, %rcx
22104 rep movsq
22105- movq %r11, (%r10)
22106+ movq %r11, (%r9)
22107 jmp 13f
22108 .Lmemmove_end_forward:
22109
22110@@ -95,14 +95,14 @@ ENTRY(memmove)
22111 7:
22112 movq %rdx, %rcx
22113 movq (%rsi), %r11
22114- movq %rdi, %r10
22115+ movq %rdi, %r9
22116 leaq -8(%rsi, %rdx), %rsi
22117 leaq -8(%rdi, %rdx), %rdi
22118 shrq $3, %rcx
22119 std
22120 rep movsq
22121 cld
22122- movq %r11, (%r10)
22123+ movq %r11, (%r9)
22124 jmp 13f
22125
22126 /*
22127@@ -127,13 +127,13 @@ ENTRY(memmove)
22128 8:
22129 subq $0x20, %rdx
22130 movq -1*8(%rsi), %r11
22131- movq -2*8(%rsi), %r10
22132+ movq -2*8(%rsi), %rcx
22133 movq -3*8(%rsi), %r9
22134 movq -4*8(%rsi), %r8
22135 leaq -4*8(%rsi), %rsi
22136
22137 movq %r11, -1*8(%rdi)
22138- movq %r10, -2*8(%rdi)
22139+ movq %rcx, -2*8(%rdi)
22140 movq %r9, -3*8(%rdi)
22141 movq %r8, -4*8(%rdi)
22142 leaq -4*8(%rdi), %rdi
22143@@ -151,11 +151,11 @@ ENTRY(memmove)
22144 * Move data from 16 bytes to 31 bytes.
22145 */
22146 movq 0*8(%rsi), %r11
22147- movq 1*8(%rsi), %r10
22148+ movq 1*8(%rsi), %rcx
22149 movq -2*8(%rsi, %rdx), %r9
22150 movq -1*8(%rsi, %rdx), %r8
22151 movq %r11, 0*8(%rdi)
22152- movq %r10, 1*8(%rdi)
22153+ movq %rcx, 1*8(%rdi)
22154 movq %r9, -2*8(%rdi, %rdx)
22155 movq %r8, -1*8(%rdi, %rdx)
22156 jmp 13f
22157@@ -167,9 +167,9 @@ ENTRY(memmove)
22158 * Move data from 8 bytes to 15 bytes.
22159 */
22160 movq 0*8(%rsi), %r11
22161- movq -1*8(%rsi, %rdx), %r10
22162+ movq -1*8(%rsi, %rdx), %r9
22163 movq %r11, 0*8(%rdi)
22164- movq %r10, -1*8(%rdi, %rdx)
22165+ movq %r9, -1*8(%rdi, %rdx)
22166 jmp 13f
22167 10:
22168 cmpq $4, %rdx
22169@@ -178,9 +178,9 @@ ENTRY(memmove)
22170 * Move data from 4 bytes to 7 bytes.
22171 */
22172 movl (%rsi), %r11d
22173- movl -4(%rsi, %rdx), %r10d
22174+ movl -4(%rsi, %rdx), %r9d
22175 movl %r11d, (%rdi)
22176- movl %r10d, -4(%rdi, %rdx)
22177+ movl %r9d, -4(%rdi, %rdx)
22178 jmp 13f
22179 11:
22180 cmp $2, %rdx
22181@@ -189,9 +189,9 @@ ENTRY(memmove)
22182 * Move data from 2 bytes to 3 bytes.
22183 */
22184 movw (%rsi), %r11w
22185- movw -2(%rsi, %rdx), %r10w
22186+ movw -2(%rsi, %rdx), %r9w
22187 movw %r11w, (%rdi)
22188- movw %r10w, -2(%rdi, %rdx)
22189+ movw %r9w, -2(%rdi, %rdx)
22190 jmp 13f
22191 12:
22192 cmp $1, %rdx
22193@@ -202,6 +202,7 @@ ENTRY(memmove)
22194 movb (%rsi), %r11b
22195 movb %r11b, (%rdi)
22196 13:
22197+ pax_force_retaddr
22198 retq
22199 CFI_ENDPROC
22200
22201@@ -210,6 +211,7 @@ ENTRY(memmove)
22202 /* Forward moving data. */
22203 movq %rdx, %rcx
22204 rep movsb
22205+ pax_force_retaddr
22206 retq
22207 .Lmemmove_end_forward_efs:
22208 .previous
22209diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22210index 2dcb380..963660a 100644
22211--- a/arch/x86/lib/memset_64.S
22212+++ b/arch/x86/lib/memset_64.S
22213@@ -30,6 +30,7 @@
22214 movl %edx,%ecx
22215 rep stosb
22216 movq %r9,%rax
22217+ pax_force_retaddr
22218 ret
22219 .Lmemset_e:
22220 .previous
22221@@ -52,6 +53,7 @@
22222 movq %rdx,%rcx
22223 rep stosb
22224 movq %r9,%rax
22225+ pax_force_retaddr
22226 ret
22227 .Lmemset_e_e:
22228 .previous
22229@@ -59,7 +61,7 @@
22230 ENTRY(memset)
22231 ENTRY(__memset)
22232 CFI_STARTPROC
22233- movq %rdi,%r10
22234+ movq %rdi,%r11
22235
22236 /* expand byte value */
22237 movzbl %sil,%ecx
22238@@ -117,7 +119,8 @@ ENTRY(__memset)
22239 jnz .Lloop_1
22240
22241 .Lende:
22242- movq %r10,%rax
22243+ movq %r11,%rax
22244+ pax_force_retaddr
22245 ret
22246
22247 CFI_RESTORE_STATE
22248diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22249index c9f2d9b..e7fd2c0 100644
22250--- a/arch/x86/lib/mmx_32.c
22251+++ b/arch/x86/lib/mmx_32.c
22252@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22253 {
22254 void *p;
22255 int i;
22256+ unsigned long cr0;
22257
22258 if (unlikely(in_interrupt()))
22259 return __memcpy(to, from, len);
22260@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22261 kernel_fpu_begin();
22262
22263 __asm__ __volatile__ (
22264- "1: prefetch (%0)\n" /* This set is 28 bytes */
22265- " prefetch 64(%0)\n"
22266- " prefetch 128(%0)\n"
22267- " prefetch 192(%0)\n"
22268- " prefetch 256(%0)\n"
22269+ "1: prefetch (%1)\n" /* This set is 28 bytes */
22270+ " prefetch 64(%1)\n"
22271+ " prefetch 128(%1)\n"
22272+ " prefetch 192(%1)\n"
22273+ " prefetch 256(%1)\n"
22274 "2: \n"
22275 ".section .fixup, \"ax\"\n"
22276- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22277+ "3: \n"
22278+
22279+#ifdef CONFIG_PAX_KERNEXEC
22280+ " movl %%cr0, %0\n"
22281+ " movl %0, %%eax\n"
22282+ " andl $0xFFFEFFFF, %%eax\n"
22283+ " movl %%eax, %%cr0\n"
22284+#endif
22285+
22286+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22287+
22288+#ifdef CONFIG_PAX_KERNEXEC
22289+ " movl %0, %%cr0\n"
22290+#endif
22291+
22292 " jmp 2b\n"
22293 ".previous\n"
22294 _ASM_EXTABLE(1b, 3b)
22295- : : "r" (from));
22296+ : "=&r" (cr0) : "r" (from) : "ax");
22297
22298 for ( ; i > 5; i--) {
22299 __asm__ __volatile__ (
22300- "1: prefetch 320(%0)\n"
22301- "2: movq (%0), %%mm0\n"
22302- " movq 8(%0), %%mm1\n"
22303- " movq 16(%0), %%mm2\n"
22304- " movq 24(%0), %%mm3\n"
22305- " movq %%mm0, (%1)\n"
22306- " movq %%mm1, 8(%1)\n"
22307- " movq %%mm2, 16(%1)\n"
22308- " movq %%mm3, 24(%1)\n"
22309- " movq 32(%0), %%mm0\n"
22310- " movq 40(%0), %%mm1\n"
22311- " movq 48(%0), %%mm2\n"
22312- " movq 56(%0), %%mm3\n"
22313- " movq %%mm0, 32(%1)\n"
22314- " movq %%mm1, 40(%1)\n"
22315- " movq %%mm2, 48(%1)\n"
22316- " movq %%mm3, 56(%1)\n"
22317+ "1: prefetch 320(%1)\n"
22318+ "2: movq (%1), %%mm0\n"
22319+ " movq 8(%1), %%mm1\n"
22320+ " movq 16(%1), %%mm2\n"
22321+ " movq 24(%1), %%mm3\n"
22322+ " movq %%mm0, (%2)\n"
22323+ " movq %%mm1, 8(%2)\n"
22324+ " movq %%mm2, 16(%2)\n"
22325+ " movq %%mm3, 24(%2)\n"
22326+ " movq 32(%1), %%mm0\n"
22327+ " movq 40(%1), %%mm1\n"
22328+ " movq 48(%1), %%mm2\n"
22329+ " movq 56(%1), %%mm3\n"
22330+ " movq %%mm0, 32(%2)\n"
22331+ " movq %%mm1, 40(%2)\n"
22332+ " movq %%mm2, 48(%2)\n"
22333+ " movq %%mm3, 56(%2)\n"
22334 ".section .fixup, \"ax\"\n"
22335- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22336+ "3:\n"
22337+
22338+#ifdef CONFIG_PAX_KERNEXEC
22339+ " movl %%cr0, %0\n"
22340+ " movl %0, %%eax\n"
22341+ " andl $0xFFFEFFFF, %%eax\n"
22342+ " movl %%eax, %%cr0\n"
22343+#endif
22344+
22345+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22346+
22347+#ifdef CONFIG_PAX_KERNEXEC
22348+ " movl %0, %%cr0\n"
22349+#endif
22350+
22351 " jmp 2b\n"
22352 ".previous\n"
22353 _ASM_EXTABLE(1b, 3b)
22354- : : "r" (from), "r" (to) : "memory");
22355+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22356
22357 from += 64;
22358 to += 64;
22359@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22360 static void fast_copy_page(void *to, void *from)
22361 {
22362 int i;
22363+ unsigned long cr0;
22364
22365 kernel_fpu_begin();
22366
22367@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22368 * but that is for later. -AV
22369 */
22370 __asm__ __volatile__(
22371- "1: prefetch (%0)\n"
22372- " prefetch 64(%0)\n"
22373- " prefetch 128(%0)\n"
22374- " prefetch 192(%0)\n"
22375- " prefetch 256(%0)\n"
22376+ "1: prefetch (%1)\n"
22377+ " prefetch 64(%1)\n"
22378+ " prefetch 128(%1)\n"
22379+ " prefetch 192(%1)\n"
22380+ " prefetch 256(%1)\n"
22381 "2: \n"
22382 ".section .fixup, \"ax\"\n"
22383- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22384+ "3: \n"
22385+
22386+#ifdef CONFIG_PAX_KERNEXEC
22387+ " movl %%cr0, %0\n"
22388+ " movl %0, %%eax\n"
22389+ " andl $0xFFFEFFFF, %%eax\n"
22390+ " movl %%eax, %%cr0\n"
22391+#endif
22392+
22393+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22394+
22395+#ifdef CONFIG_PAX_KERNEXEC
22396+ " movl %0, %%cr0\n"
22397+#endif
22398+
22399 " jmp 2b\n"
22400 ".previous\n"
22401- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22402+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22403
22404 for (i = 0; i < (4096-320)/64; i++) {
22405 __asm__ __volatile__ (
22406- "1: prefetch 320(%0)\n"
22407- "2: movq (%0), %%mm0\n"
22408- " movntq %%mm0, (%1)\n"
22409- " movq 8(%0), %%mm1\n"
22410- " movntq %%mm1, 8(%1)\n"
22411- " movq 16(%0), %%mm2\n"
22412- " movntq %%mm2, 16(%1)\n"
22413- " movq 24(%0), %%mm3\n"
22414- " movntq %%mm3, 24(%1)\n"
22415- " movq 32(%0), %%mm4\n"
22416- " movntq %%mm4, 32(%1)\n"
22417- " movq 40(%0), %%mm5\n"
22418- " movntq %%mm5, 40(%1)\n"
22419- " movq 48(%0), %%mm6\n"
22420- " movntq %%mm6, 48(%1)\n"
22421- " movq 56(%0), %%mm7\n"
22422- " movntq %%mm7, 56(%1)\n"
22423+ "1: prefetch 320(%1)\n"
22424+ "2: movq (%1), %%mm0\n"
22425+ " movntq %%mm0, (%2)\n"
22426+ " movq 8(%1), %%mm1\n"
22427+ " movntq %%mm1, 8(%2)\n"
22428+ " movq 16(%1), %%mm2\n"
22429+ " movntq %%mm2, 16(%2)\n"
22430+ " movq 24(%1), %%mm3\n"
22431+ " movntq %%mm3, 24(%2)\n"
22432+ " movq 32(%1), %%mm4\n"
22433+ " movntq %%mm4, 32(%2)\n"
22434+ " movq 40(%1), %%mm5\n"
22435+ " movntq %%mm5, 40(%2)\n"
22436+ " movq 48(%1), %%mm6\n"
22437+ " movntq %%mm6, 48(%2)\n"
22438+ " movq 56(%1), %%mm7\n"
22439+ " movntq %%mm7, 56(%2)\n"
22440 ".section .fixup, \"ax\"\n"
22441- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22442+ "3:\n"
22443+
22444+#ifdef CONFIG_PAX_KERNEXEC
22445+ " movl %%cr0, %0\n"
22446+ " movl %0, %%eax\n"
22447+ " andl $0xFFFEFFFF, %%eax\n"
22448+ " movl %%eax, %%cr0\n"
22449+#endif
22450+
22451+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22452+
22453+#ifdef CONFIG_PAX_KERNEXEC
22454+ " movl %0, %%cr0\n"
22455+#endif
22456+
22457 " jmp 2b\n"
22458 ".previous\n"
22459- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22460+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22461
22462 from += 64;
22463 to += 64;
22464@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22465 static void fast_copy_page(void *to, void *from)
22466 {
22467 int i;
22468+ unsigned long cr0;
22469
22470 kernel_fpu_begin();
22471
22472 __asm__ __volatile__ (
22473- "1: prefetch (%0)\n"
22474- " prefetch 64(%0)\n"
22475- " prefetch 128(%0)\n"
22476- " prefetch 192(%0)\n"
22477- " prefetch 256(%0)\n"
22478+ "1: prefetch (%1)\n"
22479+ " prefetch 64(%1)\n"
22480+ " prefetch 128(%1)\n"
22481+ " prefetch 192(%1)\n"
22482+ " prefetch 256(%1)\n"
22483 "2: \n"
22484 ".section .fixup, \"ax\"\n"
22485- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22486+ "3: \n"
22487+
22488+#ifdef CONFIG_PAX_KERNEXEC
22489+ " movl %%cr0, %0\n"
22490+ " movl %0, %%eax\n"
22491+ " andl $0xFFFEFFFF, %%eax\n"
22492+ " movl %%eax, %%cr0\n"
22493+#endif
22494+
22495+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22496+
22497+#ifdef CONFIG_PAX_KERNEXEC
22498+ " movl %0, %%cr0\n"
22499+#endif
22500+
22501 " jmp 2b\n"
22502 ".previous\n"
22503- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22504+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22505
22506 for (i = 0; i < 4096/64; i++) {
22507 __asm__ __volatile__ (
22508- "1: prefetch 320(%0)\n"
22509- "2: movq (%0), %%mm0\n"
22510- " movq 8(%0), %%mm1\n"
22511- " movq 16(%0), %%mm2\n"
22512- " movq 24(%0), %%mm3\n"
22513- " movq %%mm0, (%1)\n"
22514- " movq %%mm1, 8(%1)\n"
22515- " movq %%mm2, 16(%1)\n"
22516- " movq %%mm3, 24(%1)\n"
22517- " movq 32(%0), %%mm0\n"
22518- " movq 40(%0), %%mm1\n"
22519- " movq 48(%0), %%mm2\n"
22520- " movq 56(%0), %%mm3\n"
22521- " movq %%mm0, 32(%1)\n"
22522- " movq %%mm1, 40(%1)\n"
22523- " movq %%mm2, 48(%1)\n"
22524- " movq %%mm3, 56(%1)\n"
22525+ "1: prefetch 320(%1)\n"
22526+ "2: movq (%1), %%mm0\n"
22527+ " movq 8(%1), %%mm1\n"
22528+ " movq 16(%1), %%mm2\n"
22529+ " movq 24(%1), %%mm3\n"
22530+ " movq %%mm0, (%2)\n"
22531+ " movq %%mm1, 8(%2)\n"
22532+ " movq %%mm2, 16(%2)\n"
22533+ " movq %%mm3, 24(%2)\n"
22534+ " movq 32(%1), %%mm0\n"
22535+ " movq 40(%1), %%mm1\n"
22536+ " movq 48(%1), %%mm2\n"
22537+ " movq 56(%1), %%mm3\n"
22538+ " movq %%mm0, 32(%2)\n"
22539+ " movq %%mm1, 40(%2)\n"
22540+ " movq %%mm2, 48(%2)\n"
22541+ " movq %%mm3, 56(%2)\n"
22542 ".section .fixup, \"ax\"\n"
22543- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22544+ "3:\n"
22545+
22546+#ifdef CONFIG_PAX_KERNEXEC
22547+ " movl %%cr0, %0\n"
22548+ " movl %0, %%eax\n"
22549+ " andl $0xFFFEFFFF, %%eax\n"
22550+ " movl %%eax, %%cr0\n"
22551+#endif
22552+
22553+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22554+
22555+#ifdef CONFIG_PAX_KERNEXEC
22556+ " movl %0, %%cr0\n"
22557+#endif
22558+
22559 " jmp 2b\n"
22560 ".previous\n"
22561 _ASM_EXTABLE(1b, 3b)
22562- : : "r" (from), "r" (to) : "memory");
22563+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22564
22565 from += 64;
22566 to += 64;
22567diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22568index 69fa106..adda88b 100644
22569--- a/arch/x86/lib/msr-reg.S
22570+++ b/arch/x86/lib/msr-reg.S
22571@@ -3,6 +3,7 @@
22572 #include <asm/dwarf2.h>
22573 #include <asm/asm.h>
22574 #include <asm/msr.h>
22575+#include <asm/alternative-asm.h>
22576
22577 #ifdef CONFIG_X86_64
22578 /*
22579@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22580 CFI_STARTPROC
22581 pushq_cfi %rbx
22582 pushq_cfi %rbp
22583- movq %rdi, %r10 /* Save pointer */
22584+ movq %rdi, %r9 /* Save pointer */
22585 xorl %r11d, %r11d /* Return value */
22586 movl (%rdi), %eax
22587 movl 4(%rdi), %ecx
22588@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22589 movl 28(%rdi), %edi
22590 CFI_REMEMBER_STATE
22591 1: \op
22592-2: movl %eax, (%r10)
22593+2: movl %eax, (%r9)
22594 movl %r11d, %eax /* Return value */
22595- movl %ecx, 4(%r10)
22596- movl %edx, 8(%r10)
22597- movl %ebx, 12(%r10)
22598- movl %ebp, 20(%r10)
22599- movl %esi, 24(%r10)
22600- movl %edi, 28(%r10)
22601+ movl %ecx, 4(%r9)
22602+ movl %edx, 8(%r9)
22603+ movl %ebx, 12(%r9)
22604+ movl %ebp, 20(%r9)
22605+ movl %esi, 24(%r9)
22606+ movl %edi, 28(%r9)
22607 popq_cfi %rbp
22608 popq_cfi %rbx
22609+ pax_force_retaddr
22610 ret
22611 3:
22612 CFI_RESTORE_STATE
22613diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22614index 36b0d15..d381858 100644
22615--- a/arch/x86/lib/putuser.S
22616+++ b/arch/x86/lib/putuser.S
22617@@ -15,7 +15,9 @@
22618 #include <asm/thread_info.h>
22619 #include <asm/errno.h>
22620 #include <asm/asm.h>
22621-
22622+#include <asm/segment.h>
22623+#include <asm/pgtable.h>
22624+#include <asm/alternative-asm.h>
22625
22626 /*
22627 * __put_user_X
22628@@ -29,52 +31,119 @@
22629 * as they get called from within inline assembly.
22630 */
22631
22632-#define ENTER CFI_STARTPROC ; \
22633- GET_THREAD_INFO(%_ASM_BX)
22634-#define EXIT ret ; \
22635+#define ENTER CFI_STARTPROC
22636+#define EXIT pax_force_retaddr; ret ; \
22637 CFI_ENDPROC
22638
22639+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22640+#define _DEST %_ASM_CX,%_ASM_BX
22641+#else
22642+#define _DEST %_ASM_CX
22643+#endif
22644+
22645+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22646+#define __copyuser_seg gs;
22647+#else
22648+#define __copyuser_seg
22649+#endif
22650+
22651 .text
22652 ENTRY(__put_user_1)
22653 ENTER
22654+
22655+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22656+ GET_THREAD_INFO(%_ASM_BX)
22657 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22658 jae bad_put_user
22659-1: movb %al,(%_ASM_CX)
22660+
22661+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22662+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22663+ cmp %_ASM_BX,%_ASM_CX
22664+ jb 1234f
22665+ xor %ebx,%ebx
22666+1234:
22667+#endif
22668+
22669+#endif
22670+
22671+1: __copyuser_seg movb %al,(_DEST)
22672 xor %eax,%eax
22673 EXIT
22674 ENDPROC(__put_user_1)
22675
22676 ENTRY(__put_user_2)
22677 ENTER
22678+
22679+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22680+ GET_THREAD_INFO(%_ASM_BX)
22681 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22682 sub $1,%_ASM_BX
22683 cmp %_ASM_BX,%_ASM_CX
22684 jae bad_put_user
22685-2: movw %ax,(%_ASM_CX)
22686+
22687+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22688+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22689+ cmp %_ASM_BX,%_ASM_CX
22690+ jb 1234f
22691+ xor %ebx,%ebx
22692+1234:
22693+#endif
22694+
22695+#endif
22696+
22697+2: __copyuser_seg movw %ax,(_DEST)
22698 xor %eax,%eax
22699 EXIT
22700 ENDPROC(__put_user_2)
22701
22702 ENTRY(__put_user_4)
22703 ENTER
22704+
22705+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22706+ GET_THREAD_INFO(%_ASM_BX)
22707 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22708 sub $3,%_ASM_BX
22709 cmp %_ASM_BX,%_ASM_CX
22710 jae bad_put_user
22711-3: movl %eax,(%_ASM_CX)
22712+
22713+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22714+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22715+ cmp %_ASM_BX,%_ASM_CX
22716+ jb 1234f
22717+ xor %ebx,%ebx
22718+1234:
22719+#endif
22720+
22721+#endif
22722+
22723+3: __copyuser_seg movl %eax,(_DEST)
22724 xor %eax,%eax
22725 EXIT
22726 ENDPROC(__put_user_4)
22727
22728 ENTRY(__put_user_8)
22729 ENTER
22730+
22731+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22732+ GET_THREAD_INFO(%_ASM_BX)
22733 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22734 sub $7,%_ASM_BX
22735 cmp %_ASM_BX,%_ASM_CX
22736 jae bad_put_user
22737-4: mov %_ASM_AX,(%_ASM_CX)
22738+
22739+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22740+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22741+ cmp %_ASM_BX,%_ASM_CX
22742+ jb 1234f
22743+ xor %ebx,%ebx
22744+1234:
22745+#endif
22746+
22747+#endif
22748+
22749+4: __copyuser_seg mov %_ASM_AX,(_DEST)
22750 #ifdef CONFIG_X86_32
22751-5: movl %edx,4(%_ASM_CX)
22752+5: __copyuser_seg movl %edx,4(_DEST)
22753 #endif
22754 xor %eax,%eax
22755 EXIT
22756diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
22757index 1cad221..de671ee 100644
22758--- a/arch/x86/lib/rwlock.S
22759+++ b/arch/x86/lib/rwlock.S
22760@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
22761 FRAME
22762 0: LOCK_PREFIX
22763 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22764+
22765+#ifdef CONFIG_PAX_REFCOUNT
22766+ jno 1234f
22767+ LOCK_PREFIX
22768+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22769+ int $4
22770+1234:
22771+ _ASM_EXTABLE(1234b, 1234b)
22772+#endif
22773+
22774 1: rep; nop
22775 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
22776 jne 1b
22777 LOCK_PREFIX
22778 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22779+
22780+#ifdef CONFIG_PAX_REFCOUNT
22781+ jno 1234f
22782+ LOCK_PREFIX
22783+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22784+ int $4
22785+1234:
22786+ _ASM_EXTABLE(1234b, 1234b)
22787+#endif
22788+
22789 jnz 0b
22790 ENDFRAME
22791+ pax_force_retaddr
22792 ret
22793 CFI_ENDPROC
22794 END(__write_lock_failed)
22795@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
22796 FRAME
22797 0: LOCK_PREFIX
22798 READ_LOCK_SIZE(inc) (%__lock_ptr)
22799+
22800+#ifdef CONFIG_PAX_REFCOUNT
22801+ jno 1234f
22802+ LOCK_PREFIX
22803+ READ_LOCK_SIZE(dec) (%__lock_ptr)
22804+ int $4
22805+1234:
22806+ _ASM_EXTABLE(1234b, 1234b)
22807+#endif
22808+
22809 1: rep; nop
22810 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
22811 js 1b
22812 LOCK_PREFIX
22813 READ_LOCK_SIZE(dec) (%__lock_ptr)
22814+
22815+#ifdef CONFIG_PAX_REFCOUNT
22816+ jno 1234f
22817+ LOCK_PREFIX
22818+ READ_LOCK_SIZE(inc) (%__lock_ptr)
22819+ int $4
22820+1234:
22821+ _ASM_EXTABLE(1234b, 1234b)
22822+#endif
22823+
22824 js 0b
22825 ENDFRAME
22826+ pax_force_retaddr
22827 ret
22828 CFI_ENDPROC
22829 END(__read_lock_failed)
22830diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
22831index 5dff5f0..cadebf4 100644
22832--- a/arch/x86/lib/rwsem.S
22833+++ b/arch/x86/lib/rwsem.S
22834@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
22835 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22836 CFI_RESTORE __ASM_REG(dx)
22837 restore_common_regs
22838+ pax_force_retaddr
22839 ret
22840 CFI_ENDPROC
22841 ENDPROC(call_rwsem_down_read_failed)
22842@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
22843 movq %rax,%rdi
22844 call rwsem_down_write_failed
22845 restore_common_regs
22846+ pax_force_retaddr
22847 ret
22848 CFI_ENDPROC
22849 ENDPROC(call_rwsem_down_write_failed)
22850@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
22851 movq %rax,%rdi
22852 call rwsem_wake
22853 restore_common_regs
22854-1: ret
22855+1: pax_force_retaddr
22856+ ret
22857 CFI_ENDPROC
22858 ENDPROC(call_rwsem_wake)
22859
22860@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
22861 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22862 CFI_RESTORE __ASM_REG(dx)
22863 restore_common_regs
22864+ pax_force_retaddr
22865 ret
22866 CFI_ENDPROC
22867 ENDPROC(call_rwsem_downgrade_wake)
22868diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22869index a63efd6..ccecad8 100644
22870--- a/arch/x86/lib/thunk_64.S
22871+++ b/arch/x86/lib/thunk_64.S
22872@@ -8,6 +8,7 @@
22873 #include <linux/linkage.h>
22874 #include <asm/dwarf2.h>
22875 #include <asm/calling.h>
22876+#include <asm/alternative-asm.h>
22877
22878 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22879 .macro THUNK name, func, put_ret_addr_in_rdi=0
22880@@ -41,5 +42,6 @@
22881 SAVE_ARGS
22882 restore:
22883 RESTORE_ARGS
22884+ pax_force_retaddr
22885 ret
22886 CFI_ENDPROC
22887diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22888index ef2a6a5..3b28862 100644
22889--- a/arch/x86/lib/usercopy_32.c
22890+++ b/arch/x86/lib/usercopy_32.c
22891@@ -41,10 +41,12 @@ do { \
22892 int __d0; \
22893 might_fault(); \
22894 __asm__ __volatile__( \
22895+ __COPYUSER_SET_ES \
22896 "0: rep; stosl\n" \
22897 " movl %2,%0\n" \
22898 "1: rep; stosb\n" \
22899 "2:\n" \
22900+ __COPYUSER_RESTORE_ES \
22901 ".section .fixup,\"ax\"\n" \
22902 "3: lea 0(%2,%0,4),%0\n" \
22903 " jmp 2b\n" \
22904@@ -113,6 +115,7 @@ long strnlen_user(const char __user *s, long n)
22905 might_fault();
22906
22907 __asm__ __volatile__(
22908+ __COPYUSER_SET_ES
22909 " testl %0, %0\n"
22910 " jz 3f\n"
22911 " andl %0,%%ecx\n"
22912@@ -121,6 +124,7 @@ long strnlen_user(const char __user *s, long n)
22913 " subl %%ecx,%0\n"
22914 " addl %0,%%eax\n"
22915 "1:\n"
22916+ __COPYUSER_RESTORE_ES
22917 ".section .fixup,\"ax\"\n"
22918 "2: xorl %%eax,%%eax\n"
22919 " jmp 1b\n"
22920@@ -140,7 +144,7 @@ EXPORT_SYMBOL(strnlen_user);
22921
22922 #ifdef CONFIG_X86_INTEL_USERCOPY
22923 static unsigned long
22924-__copy_user_intel(void __user *to, const void *from, unsigned long size)
22925+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
22926 {
22927 int d0, d1;
22928 __asm__ __volatile__(
22929@@ -152,36 +156,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22930 " .align 2,0x90\n"
22931 "3: movl 0(%4), %%eax\n"
22932 "4: movl 4(%4), %%edx\n"
22933- "5: movl %%eax, 0(%3)\n"
22934- "6: movl %%edx, 4(%3)\n"
22935+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22936+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
22937 "7: movl 8(%4), %%eax\n"
22938 "8: movl 12(%4),%%edx\n"
22939- "9: movl %%eax, 8(%3)\n"
22940- "10: movl %%edx, 12(%3)\n"
22941+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22942+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
22943 "11: movl 16(%4), %%eax\n"
22944 "12: movl 20(%4), %%edx\n"
22945- "13: movl %%eax, 16(%3)\n"
22946- "14: movl %%edx, 20(%3)\n"
22947+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22948+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
22949 "15: movl 24(%4), %%eax\n"
22950 "16: movl 28(%4), %%edx\n"
22951- "17: movl %%eax, 24(%3)\n"
22952- "18: movl %%edx, 28(%3)\n"
22953+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22954+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
22955 "19: movl 32(%4), %%eax\n"
22956 "20: movl 36(%4), %%edx\n"
22957- "21: movl %%eax, 32(%3)\n"
22958- "22: movl %%edx, 36(%3)\n"
22959+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
22960+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
22961 "23: movl 40(%4), %%eax\n"
22962 "24: movl 44(%4), %%edx\n"
22963- "25: movl %%eax, 40(%3)\n"
22964- "26: movl %%edx, 44(%3)\n"
22965+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
22966+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
22967 "27: movl 48(%4), %%eax\n"
22968 "28: movl 52(%4), %%edx\n"
22969- "29: movl %%eax, 48(%3)\n"
22970- "30: movl %%edx, 52(%3)\n"
22971+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
22972+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
22973 "31: movl 56(%4), %%eax\n"
22974 "32: movl 60(%4), %%edx\n"
22975- "33: movl %%eax, 56(%3)\n"
22976- "34: movl %%edx, 60(%3)\n"
22977+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
22978+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
22979 " addl $-64, %0\n"
22980 " addl $64, %4\n"
22981 " addl $64, %3\n"
22982@@ -191,10 +195,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22983 " shrl $2, %0\n"
22984 " andl $3, %%eax\n"
22985 " cld\n"
22986+ __COPYUSER_SET_ES
22987 "99: rep; movsl\n"
22988 "36: movl %%eax, %0\n"
22989 "37: rep; movsb\n"
22990 "100:\n"
22991+ __COPYUSER_RESTORE_ES
22992 ".section .fixup,\"ax\"\n"
22993 "101: lea 0(%%eax,%0,4),%0\n"
22994 " jmp 100b\n"
22995@@ -247,46 +253,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22996 }
22997
22998 static unsigned long
22999+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23000+{
23001+ int d0, d1;
23002+ __asm__ __volatile__(
23003+ " .align 2,0x90\n"
23004+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23005+ " cmpl $67, %0\n"
23006+ " jbe 3f\n"
23007+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23008+ " .align 2,0x90\n"
23009+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23010+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23011+ "5: movl %%eax, 0(%3)\n"
23012+ "6: movl %%edx, 4(%3)\n"
23013+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23014+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23015+ "9: movl %%eax, 8(%3)\n"
23016+ "10: movl %%edx, 12(%3)\n"
23017+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23018+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23019+ "13: movl %%eax, 16(%3)\n"
23020+ "14: movl %%edx, 20(%3)\n"
23021+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23022+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23023+ "17: movl %%eax, 24(%3)\n"
23024+ "18: movl %%edx, 28(%3)\n"
23025+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23026+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23027+ "21: movl %%eax, 32(%3)\n"
23028+ "22: movl %%edx, 36(%3)\n"
23029+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23030+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23031+ "25: movl %%eax, 40(%3)\n"
23032+ "26: movl %%edx, 44(%3)\n"
23033+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23034+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23035+ "29: movl %%eax, 48(%3)\n"
23036+ "30: movl %%edx, 52(%3)\n"
23037+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23038+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23039+ "33: movl %%eax, 56(%3)\n"
23040+ "34: movl %%edx, 60(%3)\n"
23041+ " addl $-64, %0\n"
23042+ " addl $64, %4\n"
23043+ " addl $64, %3\n"
23044+ " cmpl $63, %0\n"
23045+ " ja 1b\n"
23046+ "35: movl %0, %%eax\n"
23047+ " shrl $2, %0\n"
23048+ " andl $3, %%eax\n"
23049+ " cld\n"
23050+ "99: rep; "__copyuser_seg" movsl\n"
23051+ "36: movl %%eax, %0\n"
23052+ "37: rep; "__copyuser_seg" movsb\n"
23053+ "100:\n"
23054+ ".section .fixup,\"ax\"\n"
23055+ "101: lea 0(%%eax,%0,4),%0\n"
23056+ " jmp 100b\n"
23057+ ".previous\n"
23058+ ".section __ex_table,\"a\"\n"
23059+ " .align 4\n"
23060+ " .long 1b,100b\n"
23061+ " .long 2b,100b\n"
23062+ " .long 3b,100b\n"
23063+ " .long 4b,100b\n"
23064+ " .long 5b,100b\n"
23065+ " .long 6b,100b\n"
23066+ " .long 7b,100b\n"
23067+ " .long 8b,100b\n"
23068+ " .long 9b,100b\n"
23069+ " .long 10b,100b\n"
23070+ " .long 11b,100b\n"
23071+ " .long 12b,100b\n"
23072+ " .long 13b,100b\n"
23073+ " .long 14b,100b\n"
23074+ " .long 15b,100b\n"
23075+ " .long 16b,100b\n"
23076+ " .long 17b,100b\n"
23077+ " .long 18b,100b\n"
23078+ " .long 19b,100b\n"
23079+ " .long 20b,100b\n"
23080+ " .long 21b,100b\n"
23081+ " .long 22b,100b\n"
23082+ " .long 23b,100b\n"
23083+ " .long 24b,100b\n"
23084+ " .long 25b,100b\n"
23085+ " .long 26b,100b\n"
23086+ " .long 27b,100b\n"
23087+ " .long 28b,100b\n"
23088+ " .long 29b,100b\n"
23089+ " .long 30b,100b\n"
23090+ " .long 31b,100b\n"
23091+ " .long 32b,100b\n"
23092+ " .long 33b,100b\n"
23093+ " .long 34b,100b\n"
23094+ " .long 35b,100b\n"
23095+ " .long 36b,100b\n"
23096+ " .long 37b,100b\n"
23097+ " .long 99b,101b\n"
23098+ ".previous"
23099+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
23100+ : "1"(to), "2"(from), "0"(size)
23101+ : "eax", "edx", "memory");
23102+ return size;
23103+}
23104+
23105+static unsigned long
23106+__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
23107+static unsigned long
23108 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23109 {
23110 int d0, d1;
23111 __asm__ __volatile__(
23112 " .align 2,0x90\n"
23113- "0: movl 32(%4), %%eax\n"
23114+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23115 " cmpl $67, %0\n"
23116 " jbe 2f\n"
23117- "1: movl 64(%4), %%eax\n"
23118+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23119 " .align 2,0x90\n"
23120- "2: movl 0(%4), %%eax\n"
23121- "21: movl 4(%4), %%edx\n"
23122+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23123+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23124 " movl %%eax, 0(%3)\n"
23125 " movl %%edx, 4(%3)\n"
23126- "3: movl 8(%4), %%eax\n"
23127- "31: movl 12(%4),%%edx\n"
23128+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23129+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23130 " movl %%eax, 8(%3)\n"
23131 " movl %%edx, 12(%3)\n"
23132- "4: movl 16(%4), %%eax\n"
23133- "41: movl 20(%4), %%edx\n"
23134+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23135+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23136 " movl %%eax, 16(%3)\n"
23137 " movl %%edx, 20(%3)\n"
23138- "10: movl 24(%4), %%eax\n"
23139- "51: movl 28(%4), %%edx\n"
23140+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23141+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23142 " movl %%eax, 24(%3)\n"
23143 " movl %%edx, 28(%3)\n"
23144- "11: movl 32(%4), %%eax\n"
23145- "61: movl 36(%4), %%edx\n"
23146+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23147+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23148 " movl %%eax, 32(%3)\n"
23149 " movl %%edx, 36(%3)\n"
23150- "12: movl 40(%4), %%eax\n"
23151- "71: movl 44(%4), %%edx\n"
23152+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23153+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23154 " movl %%eax, 40(%3)\n"
23155 " movl %%edx, 44(%3)\n"
23156- "13: movl 48(%4), %%eax\n"
23157- "81: movl 52(%4), %%edx\n"
23158+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23159+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23160 " movl %%eax, 48(%3)\n"
23161 " movl %%edx, 52(%3)\n"
23162- "14: movl 56(%4), %%eax\n"
23163- "91: movl 60(%4), %%edx\n"
23164+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23165+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23166 " movl %%eax, 56(%3)\n"
23167 " movl %%edx, 60(%3)\n"
23168 " addl $-64, %0\n"
23169@@ -298,9 +413,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23170 " shrl $2, %0\n"
23171 " andl $3, %%eax\n"
23172 " cld\n"
23173- "6: rep; movsl\n"
23174+ "6: rep; "__copyuser_seg" movsl\n"
23175 " movl %%eax,%0\n"
23176- "7: rep; movsb\n"
23177+ "7: rep; "__copyuser_seg" movsb\n"
23178 "8:\n"
23179 ".section .fixup,\"ax\"\n"
23180 "9: lea 0(%%eax,%0,4),%0\n"
23181@@ -347,47 +462,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23182 */
23183
23184 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23185+ const void __user *from, unsigned long size) __size_overflow(3);
23186+static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23187 const void __user *from, unsigned long size)
23188 {
23189 int d0, d1;
23190
23191 __asm__ __volatile__(
23192 " .align 2,0x90\n"
23193- "0: movl 32(%4), %%eax\n"
23194+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23195 " cmpl $67, %0\n"
23196 " jbe 2f\n"
23197- "1: movl 64(%4), %%eax\n"
23198+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23199 " .align 2,0x90\n"
23200- "2: movl 0(%4), %%eax\n"
23201- "21: movl 4(%4), %%edx\n"
23202+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23203+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23204 " movnti %%eax, 0(%3)\n"
23205 " movnti %%edx, 4(%3)\n"
23206- "3: movl 8(%4), %%eax\n"
23207- "31: movl 12(%4),%%edx\n"
23208+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23209+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23210 " movnti %%eax, 8(%3)\n"
23211 " movnti %%edx, 12(%3)\n"
23212- "4: movl 16(%4), %%eax\n"
23213- "41: movl 20(%4), %%edx\n"
23214+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23215+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23216 " movnti %%eax, 16(%3)\n"
23217 " movnti %%edx, 20(%3)\n"
23218- "10: movl 24(%4), %%eax\n"
23219- "51: movl 28(%4), %%edx\n"
23220+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23221+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23222 " movnti %%eax, 24(%3)\n"
23223 " movnti %%edx, 28(%3)\n"
23224- "11: movl 32(%4), %%eax\n"
23225- "61: movl 36(%4), %%edx\n"
23226+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23227+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23228 " movnti %%eax, 32(%3)\n"
23229 " movnti %%edx, 36(%3)\n"
23230- "12: movl 40(%4), %%eax\n"
23231- "71: movl 44(%4), %%edx\n"
23232+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23233+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23234 " movnti %%eax, 40(%3)\n"
23235 " movnti %%edx, 44(%3)\n"
23236- "13: movl 48(%4), %%eax\n"
23237- "81: movl 52(%4), %%edx\n"
23238+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23239+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23240 " movnti %%eax, 48(%3)\n"
23241 " movnti %%edx, 52(%3)\n"
23242- "14: movl 56(%4), %%eax\n"
23243- "91: movl 60(%4), %%edx\n"
23244+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23245+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23246 " movnti %%eax, 56(%3)\n"
23247 " movnti %%edx, 60(%3)\n"
23248 " addl $-64, %0\n"
23249@@ -400,9 +517,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23250 " shrl $2, %0\n"
23251 " andl $3, %%eax\n"
23252 " cld\n"
23253- "6: rep; movsl\n"
23254+ "6: rep; "__copyuser_seg" movsl\n"
23255 " movl %%eax,%0\n"
23256- "7: rep; movsb\n"
23257+ "7: rep; "__copyuser_seg" movsb\n"
23258 "8:\n"
23259 ".section .fixup,\"ax\"\n"
23260 "9: lea 0(%%eax,%0,4),%0\n"
23261@@ -444,47 +561,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23262 }
23263
23264 static unsigned long __copy_user_intel_nocache(void *to,
23265+ const void __user *from, unsigned long size) __size_overflow(3);
23266+static unsigned long __copy_user_intel_nocache(void *to,
23267 const void __user *from, unsigned long size)
23268 {
23269 int d0, d1;
23270
23271 __asm__ __volatile__(
23272 " .align 2,0x90\n"
23273- "0: movl 32(%4), %%eax\n"
23274+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23275 " cmpl $67, %0\n"
23276 " jbe 2f\n"
23277- "1: movl 64(%4), %%eax\n"
23278+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23279 " .align 2,0x90\n"
23280- "2: movl 0(%4), %%eax\n"
23281- "21: movl 4(%4), %%edx\n"
23282+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23283+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23284 " movnti %%eax, 0(%3)\n"
23285 " movnti %%edx, 4(%3)\n"
23286- "3: movl 8(%4), %%eax\n"
23287- "31: movl 12(%4),%%edx\n"
23288+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23289+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23290 " movnti %%eax, 8(%3)\n"
23291 " movnti %%edx, 12(%3)\n"
23292- "4: movl 16(%4), %%eax\n"
23293- "41: movl 20(%4), %%edx\n"
23294+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23295+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23296 " movnti %%eax, 16(%3)\n"
23297 " movnti %%edx, 20(%3)\n"
23298- "10: movl 24(%4), %%eax\n"
23299- "51: movl 28(%4), %%edx\n"
23300+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23301+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23302 " movnti %%eax, 24(%3)\n"
23303 " movnti %%edx, 28(%3)\n"
23304- "11: movl 32(%4), %%eax\n"
23305- "61: movl 36(%4), %%edx\n"
23306+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23307+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23308 " movnti %%eax, 32(%3)\n"
23309 " movnti %%edx, 36(%3)\n"
23310- "12: movl 40(%4), %%eax\n"
23311- "71: movl 44(%4), %%edx\n"
23312+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23313+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23314 " movnti %%eax, 40(%3)\n"
23315 " movnti %%edx, 44(%3)\n"
23316- "13: movl 48(%4), %%eax\n"
23317- "81: movl 52(%4), %%edx\n"
23318+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23319+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23320 " movnti %%eax, 48(%3)\n"
23321 " movnti %%edx, 52(%3)\n"
23322- "14: movl 56(%4), %%eax\n"
23323- "91: movl 60(%4), %%edx\n"
23324+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23325+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23326 " movnti %%eax, 56(%3)\n"
23327 " movnti %%edx, 60(%3)\n"
23328 " addl $-64, %0\n"
23329@@ -497,9 +616,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23330 " shrl $2, %0\n"
23331 " andl $3, %%eax\n"
23332 " cld\n"
23333- "6: rep; movsl\n"
23334+ "6: rep; "__copyuser_seg" movsl\n"
23335 " movl %%eax,%0\n"
23336- "7: rep; movsb\n"
23337+ "7: rep; "__copyuser_seg" movsb\n"
23338 "8:\n"
23339 ".section .fixup,\"ax\"\n"
23340 "9: lea 0(%%eax,%0,4),%0\n"
23341@@ -542,32 +661,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23342 */
23343 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23344 unsigned long size);
23345-unsigned long __copy_user_intel(void __user *to, const void *from,
23346+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23347+ unsigned long size);
23348+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23349 unsigned long size);
23350 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23351 const void __user *from, unsigned long size);
23352 #endif /* CONFIG_X86_INTEL_USERCOPY */
23353
23354 /* Generic arbitrary sized copy. */
23355-#define __copy_user(to, from, size) \
23356+#define __copy_user(to, from, size, prefix, set, restore) \
23357 do { \
23358 int __d0, __d1, __d2; \
23359 __asm__ __volatile__( \
23360+ set \
23361 " cmp $7,%0\n" \
23362 " jbe 1f\n" \
23363 " movl %1,%0\n" \
23364 " negl %0\n" \
23365 " andl $7,%0\n" \
23366 " subl %0,%3\n" \
23367- "4: rep; movsb\n" \
23368+ "4: rep; "prefix"movsb\n" \
23369 " movl %3,%0\n" \
23370 " shrl $2,%0\n" \
23371 " andl $3,%3\n" \
23372 " .align 2,0x90\n" \
23373- "0: rep; movsl\n" \
23374+ "0: rep; "prefix"movsl\n" \
23375 " movl %3,%0\n" \
23376- "1: rep; movsb\n" \
23377+ "1: rep; "prefix"movsb\n" \
23378 "2:\n" \
23379+ restore \
23380 ".section .fixup,\"ax\"\n" \
23381 "5: addl %3,%0\n" \
23382 " jmp 2b\n" \
23383@@ -595,14 +718,14 @@ do { \
23384 " negl %0\n" \
23385 " andl $7,%0\n" \
23386 " subl %0,%3\n" \
23387- "4: rep; movsb\n" \
23388+ "4: rep; "__copyuser_seg"movsb\n" \
23389 " movl %3,%0\n" \
23390 " shrl $2,%0\n" \
23391 " andl $3,%3\n" \
23392 " .align 2,0x90\n" \
23393- "0: rep; movsl\n" \
23394+ "0: rep; "__copyuser_seg"movsl\n" \
23395 " movl %3,%0\n" \
23396- "1: rep; movsb\n" \
23397+ "1: rep; "__copyuser_seg"movsb\n" \
23398 "2:\n" \
23399 ".section .fixup,\"ax\"\n" \
23400 "5: addl %3,%0\n" \
23401@@ -688,9 +811,9 @@ survive:
23402 }
23403 #endif
23404 if (movsl_is_ok(to, from, n))
23405- __copy_user(to, from, n);
23406+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23407 else
23408- n = __copy_user_intel(to, from, n);
23409+ n = __generic_copy_to_user_intel(to, from, n);
23410 return n;
23411 }
23412 EXPORT_SYMBOL(__copy_to_user_ll);
23413@@ -710,10 +833,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23414 unsigned long n)
23415 {
23416 if (movsl_is_ok(to, from, n))
23417- __copy_user(to, from, n);
23418+ __copy_user(to, from, n, __copyuser_seg, "", "");
23419 else
23420- n = __copy_user_intel((void __user *)to,
23421- (const void *)from, n);
23422+ n = __generic_copy_from_user_intel(to, from, n);
23423 return n;
23424 }
23425 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23426@@ -740,65 +862,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23427 if (n > 64 && cpu_has_xmm2)
23428 n = __copy_user_intel_nocache(to, from, n);
23429 else
23430- __copy_user(to, from, n);
23431+ __copy_user(to, from, n, __copyuser_seg, "", "");
23432 #else
23433- __copy_user(to, from, n);
23434+ __copy_user(to, from, n, __copyuser_seg, "", "");
23435 #endif
23436 return n;
23437 }
23438 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23439
23440-/**
23441- * copy_to_user: - Copy a block of data into user space.
23442- * @to: Destination address, in user space.
23443- * @from: Source address, in kernel space.
23444- * @n: Number of bytes to copy.
23445- *
23446- * Context: User context only. This function may sleep.
23447- *
23448- * Copy data from kernel space to user space.
23449- *
23450- * Returns number of bytes that could not be copied.
23451- * On success, this will be zero.
23452- */
23453-unsigned long
23454-copy_to_user(void __user *to, const void *from, unsigned long n)
23455-{
23456- if (access_ok(VERIFY_WRITE, to, n))
23457- n = __copy_to_user(to, from, n);
23458- return n;
23459-}
23460-EXPORT_SYMBOL(copy_to_user);
23461-
23462-/**
23463- * copy_from_user: - Copy a block of data from user space.
23464- * @to: Destination address, in kernel space.
23465- * @from: Source address, in user space.
23466- * @n: Number of bytes to copy.
23467- *
23468- * Context: User context only. This function may sleep.
23469- *
23470- * Copy data from user space to kernel space.
23471- *
23472- * Returns number of bytes that could not be copied.
23473- * On success, this will be zero.
23474- *
23475- * If some data could not be copied, this function will pad the copied
23476- * data to the requested size using zero bytes.
23477- */
23478-unsigned long
23479-_copy_from_user(void *to, const void __user *from, unsigned long n)
23480-{
23481- if (access_ok(VERIFY_READ, from, n))
23482- n = __copy_from_user(to, from, n);
23483- else
23484- memset(to, 0, n);
23485- return n;
23486-}
23487-EXPORT_SYMBOL(_copy_from_user);
23488-
23489 void copy_from_user_overflow(void)
23490 {
23491 WARN(1, "Buffer overflow detected!\n");
23492 }
23493 EXPORT_SYMBOL(copy_from_user_overflow);
23494+
23495+void copy_to_user_overflow(void)
23496+{
23497+ WARN(1, "Buffer overflow detected!\n");
23498+}
23499+EXPORT_SYMBOL(copy_to_user_overflow);
23500+
23501+#ifdef CONFIG_PAX_MEMORY_UDEREF
23502+void __set_fs(mm_segment_t x)
23503+{
23504+ switch (x.seg) {
23505+ case 0:
23506+ loadsegment(gs, 0);
23507+ break;
23508+ case TASK_SIZE_MAX:
23509+ loadsegment(gs, __USER_DS);
23510+ break;
23511+ case -1UL:
23512+ loadsegment(gs, __KERNEL_DS);
23513+ break;
23514+ default:
23515+ BUG();
23516+ }
23517+ return;
23518+}
23519+EXPORT_SYMBOL(__set_fs);
23520+
23521+void set_fs(mm_segment_t x)
23522+{
23523+ current_thread_info()->addr_limit = x;
23524+ __set_fs(x);
23525+}
23526+EXPORT_SYMBOL(set_fs);
23527+#endif
23528diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23529index 0d0326f..6a6155b 100644
23530--- a/arch/x86/lib/usercopy_64.c
23531+++ b/arch/x86/lib/usercopy_64.c
23532@@ -16,6 +16,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23533 {
23534 long __d0;
23535 might_fault();
23536+
23537+#ifdef CONFIG_PAX_MEMORY_UDEREF
23538+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23539+ addr += PAX_USER_SHADOW_BASE;
23540+#endif
23541+
23542 /* no memory constraint because it doesn't change any memory gcc knows
23543 about */
23544 asm volatile(
23545@@ -100,12 +106,20 @@ long strlen_user(const char __user *s)
23546 }
23547 EXPORT_SYMBOL(strlen_user);
23548
23549-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23550+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23551 {
23552- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23553- return copy_user_generic((__force void *)to, (__force void *)from, len);
23554- }
23555- return len;
23556+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23557+
23558+#ifdef CONFIG_PAX_MEMORY_UDEREF
23559+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23560+ to += PAX_USER_SHADOW_BASE;
23561+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23562+ from += PAX_USER_SHADOW_BASE;
23563+#endif
23564+
23565+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23566+ }
23567+ return len;
23568 }
23569 EXPORT_SYMBOL(copy_in_user);
23570
23571@@ -115,7 +129,7 @@ EXPORT_SYMBOL(copy_in_user);
23572 * it is not necessary to optimize tail handling.
23573 */
23574 unsigned long
23575-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23576+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23577 {
23578 char c;
23579 unsigned zero_len;
23580@@ -132,3 +146,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23581 break;
23582 return len;
23583 }
23584+
23585+void copy_from_user_overflow(void)
23586+{
23587+ WARN(1, "Buffer overflow detected!\n");
23588+}
23589+EXPORT_SYMBOL(copy_from_user_overflow);
23590+
23591+void copy_to_user_overflow(void)
23592+{
23593+ WARN(1, "Buffer overflow detected!\n");
23594+}
23595+EXPORT_SYMBOL(copy_to_user_overflow);
23596diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23597index 1fb85db..8b3540b 100644
23598--- a/arch/x86/mm/extable.c
23599+++ b/arch/x86/mm/extable.c
23600@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
23601 const struct exception_table_entry *fixup;
23602
23603 #ifdef CONFIG_PNPBIOS
23604- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23605+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23606 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23607 extern u32 pnp_bios_is_utter_crap;
23608 pnp_bios_is_utter_crap = 1;
23609diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23610index 3ecfd1a..304d554 100644
23611--- a/arch/x86/mm/fault.c
23612+++ b/arch/x86/mm/fault.c
23613@@ -13,11 +13,18 @@
23614 #include <linux/perf_event.h> /* perf_sw_event */
23615 #include <linux/hugetlb.h> /* hstate_index_to_shift */
23616 #include <linux/prefetch.h> /* prefetchw */
23617+#include <linux/unistd.h>
23618+#include <linux/compiler.h>
23619
23620 #include <asm/traps.h> /* dotraplinkage, ... */
23621 #include <asm/pgalloc.h> /* pgd_*(), ... */
23622 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23623 #include <asm/fixmap.h> /* VSYSCALL_START */
23624+#include <asm/tlbflush.h>
23625+
23626+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23627+#include <asm/stacktrace.h>
23628+#endif
23629
23630 /*
23631 * Page fault error code bits:
23632@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
23633 int ret = 0;
23634
23635 /* kprobe_running() needs smp_processor_id() */
23636- if (kprobes_built_in() && !user_mode_vm(regs)) {
23637+ if (kprobes_built_in() && !user_mode(regs)) {
23638 preempt_disable();
23639 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23640 ret = 1;
23641@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23642 return !instr_lo || (instr_lo>>1) == 1;
23643 case 0x00:
23644 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23645- if (probe_kernel_address(instr, opcode))
23646+ if (user_mode(regs)) {
23647+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23648+ return 0;
23649+ } else if (probe_kernel_address(instr, opcode))
23650 return 0;
23651
23652 *prefetch = (instr_lo == 0xF) &&
23653@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23654 while (instr < max_instr) {
23655 unsigned char opcode;
23656
23657- if (probe_kernel_address(instr, opcode))
23658+ if (user_mode(regs)) {
23659+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23660+ break;
23661+ } else if (probe_kernel_address(instr, opcode))
23662 break;
23663
23664 instr++;
23665@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23666 force_sig_info(si_signo, &info, tsk);
23667 }
23668
23669+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23670+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23671+#endif
23672+
23673+#ifdef CONFIG_PAX_EMUTRAMP
23674+static int pax_handle_fetch_fault(struct pt_regs *regs);
23675+#endif
23676+
23677+#ifdef CONFIG_PAX_PAGEEXEC
23678+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23679+{
23680+ pgd_t *pgd;
23681+ pud_t *pud;
23682+ pmd_t *pmd;
23683+
23684+ pgd = pgd_offset(mm, address);
23685+ if (!pgd_present(*pgd))
23686+ return NULL;
23687+ pud = pud_offset(pgd, address);
23688+ if (!pud_present(*pud))
23689+ return NULL;
23690+ pmd = pmd_offset(pud, address);
23691+ if (!pmd_present(*pmd))
23692+ return NULL;
23693+ return pmd;
23694+}
23695+#endif
23696+
23697 DEFINE_SPINLOCK(pgd_lock);
23698 LIST_HEAD(pgd_list);
23699
23700@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
23701 for (address = VMALLOC_START & PMD_MASK;
23702 address >= TASK_SIZE && address < FIXADDR_TOP;
23703 address += PMD_SIZE) {
23704+
23705+#ifdef CONFIG_PAX_PER_CPU_PGD
23706+ unsigned long cpu;
23707+#else
23708 struct page *page;
23709+#endif
23710
23711 spin_lock(&pgd_lock);
23712+
23713+#ifdef CONFIG_PAX_PER_CPU_PGD
23714+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23715+ pgd_t *pgd = get_cpu_pgd(cpu);
23716+ pmd_t *ret;
23717+#else
23718 list_for_each_entry(page, &pgd_list, lru) {
23719+ pgd_t *pgd = page_address(page);
23720 spinlock_t *pgt_lock;
23721 pmd_t *ret;
23722
23723@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
23724 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23725
23726 spin_lock(pgt_lock);
23727- ret = vmalloc_sync_one(page_address(page), address);
23728+#endif
23729+
23730+ ret = vmalloc_sync_one(pgd, address);
23731+
23732+#ifndef CONFIG_PAX_PER_CPU_PGD
23733 spin_unlock(pgt_lock);
23734+#endif
23735
23736 if (!ret)
23737 break;
23738@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23739 * an interrupt in the middle of a task switch..
23740 */
23741 pgd_paddr = read_cr3();
23742+
23743+#ifdef CONFIG_PAX_PER_CPU_PGD
23744+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23745+#endif
23746+
23747 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23748 if (!pmd_k)
23749 return -1;
23750@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23751 * happen within a race in page table update. In the later
23752 * case just flush:
23753 */
23754+
23755+#ifdef CONFIG_PAX_PER_CPU_PGD
23756+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23757+ pgd = pgd_offset_cpu(smp_processor_id(), address);
23758+#else
23759 pgd = pgd_offset(current->active_mm, address);
23760+#endif
23761+
23762 pgd_ref = pgd_offset_k(address);
23763 if (pgd_none(*pgd_ref))
23764 return -1;
23765@@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23766 static int is_errata100(struct pt_regs *regs, unsigned long address)
23767 {
23768 #ifdef CONFIG_X86_64
23769- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23770+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23771 return 1;
23772 #endif
23773 return 0;
23774@@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23775 }
23776
23777 static const char nx_warning[] = KERN_CRIT
23778-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23779+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23780
23781 static void
23782 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23783@@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23784 if (!oops_may_print())
23785 return;
23786
23787- if (error_code & PF_INSTR) {
23788+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
23789 unsigned int level;
23790
23791 pte_t *pte = lookup_address(address, &level);
23792
23793 if (pte && pte_present(*pte) && !pte_exec(*pte))
23794- printk(nx_warning, current_uid());
23795+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
23796 }
23797
23798+#ifdef CONFIG_PAX_KERNEXEC
23799+ if (init_mm.start_code <= address && address < init_mm.end_code) {
23800+ if (current->signal->curr_ip)
23801+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23802+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23803+ else
23804+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23805+ current->comm, task_pid_nr(current), current_uid(), current_euid());
23806+ }
23807+#endif
23808+
23809 printk(KERN_ALERT "BUG: unable to handle kernel ");
23810 if (address < PAGE_SIZE)
23811 printk(KERN_CONT "NULL pointer dereference");
23812@@ -748,6 +829,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23813 }
23814 #endif
23815
23816+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23817+ if (pax_is_fetch_fault(regs, error_code, address)) {
23818+
23819+#ifdef CONFIG_PAX_EMUTRAMP
23820+ switch (pax_handle_fetch_fault(regs)) {
23821+ case 2:
23822+ return;
23823+ }
23824+#endif
23825+
23826+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23827+ do_group_exit(SIGKILL);
23828+ }
23829+#endif
23830+
23831 if (unlikely(show_unhandled_signals))
23832 show_signal_msg(regs, error_code, address, tsk);
23833
23834@@ -844,7 +940,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23835 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
23836 printk(KERN_ERR
23837 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23838- tsk->comm, tsk->pid, address);
23839+ tsk->comm, task_pid_nr(tsk), address);
23840 code = BUS_MCEERR_AR;
23841 }
23842 #endif
23843@@ -900,6 +996,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23844 return 1;
23845 }
23846
23847+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23848+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23849+{
23850+ pte_t *pte;
23851+ pmd_t *pmd;
23852+ spinlock_t *ptl;
23853+ unsigned char pte_mask;
23854+
23855+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23856+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
23857+ return 0;
23858+
23859+ /* PaX: it's our fault, let's handle it if we can */
23860+
23861+ /* PaX: take a look at read faults before acquiring any locks */
23862+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23863+ /* instruction fetch attempt from a protected page in user mode */
23864+ up_read(&mm->mmap_sem);
23865+
23866+#ifdef CONFIG_PAX_EMUTRAMP
23867+ switch (pax_handle_fetch_fault(regs)) {
23868+ case 2:
23869+ return 1;
23870+ }
23871+#endif
23872+
23873+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23874+ do_group_exit(SIGKILL);
23875+ }
23876+
23877+ pmd = pax_get_pmd(mm, address);
23878+ if (unlikely(!pmd))
23879+ return 0;
23880+
23881+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23882+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23883+ pte_unmap_unlock(pte, ptl);
23884+ return 0;
23885+ }
23886+
23887+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23888+ /* write attempt to a protected page in user mode */
23889+ pte_unmap_unlock(pte, ptl);
23890+ return 0;
23891+ }
23892+
23893+#ifdef CONFIG_SMP
23894+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23895+#else
23896+ if (likely(address > get_limit(regs->cs)))
23897+#endif
23898+ {
23899+ set_pte(pte, pte_mkread(*pte));
23900+ __flush_tlb_one(address);
23901+ pte_unmap_unlock(pte, ptl);
23902+ up_read(&mm->mmap_sem);
23903+ return 1;
23904+ }
23905+
23906+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
23907+
23908+ /*
23909+ * PaX: fill DTLB with user rights and retry
23910+ */
23911+ __asm__ __volatile__ (
23912+ "orb %2,(%1)\n"
23913+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
23914+/*
23915+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
23916+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
23917+ * page fault when examined during a TLB load attempt. this is true not only
23918+ * for PTEs holding a non-present entry but also present entries that will
23919+ * raise a page fault (such as those set up by PaX, or the copy-on-write
23920+ * mechanism). in effect it means that we do *not* need to flush the TLBs
23921+ * for our target pages since their PTEs are simply not in the TLBs at all.
23922+
23923+ * the best thing in omitting it is that we gain around 15-20% speed in the
23924+ * fast path of the page fault handler and can get rid of tracing since we
23925+ * can no longer flush unintended entries.
23926+ */
23927+ "invlpg (%0)\n"
23928+#endif
23929+ __copyuser_seg"testb $0,(%0)\n"
23930+ "xorb %3,(%1)\n"
23931+ :
23932+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
23933+ : "memory", "cc");
23934+ pte_unmap_unlock(pte, ptl);
23935+ up_read(&mm->mmap_sem);
23936+ return 1;
23937+}
23938+#endif
23939+
23940 /*
23941 * Handle a spurious fault caused by a stale TLB entry.
23942 *
23943@@ -972,6 +1161,9 @@ int show_unhandled_signals = 1;
23944 static inline int
23945 access_error(unsigned long error_code, struct vm_area_struct *vma)
23946 {
23947+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
23948+ return 1;
23949+
23950 if (error_code & PF_WRITE) {
23951 /* write, present and write, not present: */
23952 if (unlikely(!(vma->vm_flags & VM_WRITE)))
23953@@ -1005,18 +1197,33 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23954 {
23955 struct vm_area_struct *vma;
23956 struct task_struct *tsk;
23957- unsigned long address;
23958 struct mm_struct *mm;
23959 int fault;
23960 int write = error_code & PF_WRITE;
23961 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
23962 (write ? FAULT_FLAG_WRITE : 0);
23963
23964- tsk = current;
23965- mm = tsk->mm;
23966-
23967 /* Get the faulting address: */
23968- address = read_cr2();
23969+ unsigned long address = read_cr2();
23970+
23971+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23972+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
23973+ if (!search_exception_tables(regs->ip)) {
23974+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23975+ bad_area_nosemaphore(regs, error_code, address);
23976+ return;
23977+ }
23978+ if (address < PAX_USER_SHADOW_BASE) {
23979+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23980+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
23981+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
23982+ } else
23983+ address -= PAX_USER_SHADOW_BASE;
23984+ }
23985+#endif
23986+
23987+ tsk = current;
23988+ mm = tsk->mm;
23989
23990 /*
23991 * Detect and handle instructions that would cause a page fault for
23992@@ -1077,7 +1284,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23993 * User-mode registers count as a user access even for any
23994 * potential system fault or CPU buglet:
23995 */
23996- if (user_mode_vm(regs)) {
23997+ if (user_mode(regs)) {
23998 local_irq_enable();
23999 error_code |= PF_USER;
24000 } else {
24001@@ -1132,6 +1339,11 @@ retry:
24002 might_sleep();
24003 }
24004
24005+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24006+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24007+ return;
24008+#endif
24009+
24010 vma = find_vma(mm, address);
24011 if (unlikely(!vma)) {
24012 bad_area(regs, error_code, address);
24013@@ -1143,18 +1355,24 @@ retry:
24014 bad_area(regs, error_code, address);
24015 return;
24016 }
24017- if (error_code & PF_USER) {
24018- /*
24019- * Accessing the stack below %sp is always a bug.
24020- * The large cushion allows instructions like enter
24021- * and pusha to work. ("enter $65535, $31" pushes
24022- * 32 pointers and then decrements %sp by 65535.)
24023- */
24024- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24025- bad_area(regs, error_code, address);
24026- return;
24027- }
24028+ /*
24029+ * Accessing the stack below %sp is always a bug.
24030+ * The large cushion allows instructions like enter
24031+ * and pusha to work. ("enter $65535, $31" pushes
24032+ * 32 pointers and then decrements %sp by 65535.)
24033+ */
24034+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24035+ bad_area(regs, error_code, address);
24036+ return;
24037 }
24038+
24039+#ifdef CONFIG_PAX_SEGMEXEC
24040+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24041+ bad_area(regs, error_code, address);
24042+ return;
24043+ }
24044+#endif
24045+
24046 if (unlikely(expand_stack(vma, address))) {
24047 bad_area(regs, error_code, address);
24048 return;
24049@@ -1209,3 +1427,292 @@ good_area:
24050
24051 up_read(&mm->mmap_sem);
24052 }
24053+
24054+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24055+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24056+{
24057+ struct mm_struct *mm = current->mm;
24058+ unsigned long ip = regs->ip;
24059+
24060+ if (v8086_mode(regs))
24061+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24062+
24063+#ifdef CONFIG_PAX_PAGEEXEC
24064+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24065+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24066+ return true;
24067+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24068+ return true;
24069+ return false;
24070+ }
24071+#endif
24072+
24073+#ifdef CONFIG_PAX_SEGMEXEC
24074+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24075+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24076+ return true;
24077+ return false;
24078+ }
24079+#endif
24080+
24081+ return false;
24082+}
24083+#endif
24084+
24085+#ifdef CONFIG_PAX_EMUTRAMP
24086+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24087+{
24088+ int err;
24089+
24090+ do { /* PaX: libffi trampoline emulation */
24091+ unsigned char mov, jmp;
24092+ unsigned int addr1, addr2;
24093+
24094+#ifdef CONFIG_X86_64
24095+ if ((regs->ip + 9) >> 32)
24096+ break;
24097+#endif
24098+
24099+ err = get_user(mov, (unsigned char __user *)regs->ip);
24100+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24101+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24102+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24103+
24104+ if (err)
24105+ break;
24106+
24107+ if (mov == 0xB8 && jmp == 0xE9) {
24108+ regs->ax = addr1;
24109+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24110+ return 2;
24111+ }
24112+ } while (0);
24113+
24114+ do { /* PaX: gcc trampoline emulation #1 */
24115+ unsigned char mov1, mov2;
24116+ unsigned short jmp;
24117+ unsigned int addr1, addr2;
24118+
24119+#ifdef CONFIG_X86_64
24120+ if ((regs->ip + 11) >> 32)
24121+ break;
24122+#endif
24123+
24124+ err = get_user(mov1, (unsigned char __user *)regs->ip);
24125+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24126+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24127+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24128+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24129+
24130+ if (err)
24131+ break;
24132+
24133+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24134+ regs->cx = addr1;
24135+ regs->ax = addr2;
24136+ regs->ip = addr2;
24137+ return 2;
24138+ }
24139+ } while (0);
24140+
24141+ do { /* PaX: gcc trampoline emulation #2 */
24142+ unsigned char mov, jmp;
24143+ unsigned int addr1, addr2;
24144+
24145+#ifdef CONFIG_X86_64
24146+ if ((regs->ip + 9) >> 32)
24147+ break;
24148+#endif
24149+
24150+ err = get_user(mov, (unsigned char __user *)regs->ip);
24151+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24152+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24153+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24154+
24155+ if (err)
24156+ break;
24157+
24158+ if (mov == 0xB9 && jmp == 0xE9) {
24159+ regs->cx = addr1;
24160+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24161+ return 2;
24162+ }
24163+ } while (0);
24164+
24165+ return 1; /* PaX in action */
24166+}
24167+
24168+#ifdef CONFIG_X86_64
24169+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24170+{
24171+ int err;
24172+
24173+ do { /* PaX: libffi trampoline emulation */
24174+ unsigned short mov1, mov2, jmp1;
24175+ unsigned char stcclc, jmp2;
24176+ unsigned long addr1, addr2;
24177+
24178+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24179+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24180+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24181+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24182+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24183+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24184+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24185+
24186+ if (err)
24187+ break;
24188+
24189+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24190+ regs->r11 = addr1;
24191+ regs->r10 = addr2;
24192+ if (stcclc == 0xF8)
24193+ regs->flags &= ~X86_EFLAGS_CF;
24194+ else
24195+ regs->flags |= X86_EFLAGS_CF;
24196+ regs->ip = addr1;
24197+ return 2;
24198+ }
24199+ } while (0);
24200+
24201+ do { /* PaX: gcc trampoline emulation #1 */
24202+ unsigned short mov1, mov2, jmp1;
24203+ unsigned char jmp2;
24204+ unsigned int addr1;
24205+ unsigned long addr2;
24206+
24207+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24208+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24209+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24210+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24211+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24212+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24213+
24214+ if (err)
24215+ break;
24216+
24217+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24218+ regs->r11 = addr1;
24219+ regs->r10 = addr2;
24220+ regs->ip = addr1;
24221+ return 2;
24222+ }
24223+ } while (0);
24224+
24225+ do { /* PaX: gcc trampoline emulation #2 */
24226+ unsigned short mov1, mov2, jmp1;
24227+ unsigned char jmp2;
24228+ unsigned long addr1, addr2;
24229+
24230+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24231+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24232+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24233+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24234+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24235+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24236+
24237+ if (err)
24238+ break;
24239+
24240+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24241+ regs->r11 = addr1;
24242+ regs->r10 = addr2;
24243+ regs->ip = addr1;
24244+ return 2;
24245+ }
24246+ } while (0);
24247+
24248+ return 1; /* PaX in action */
24249+}
24250+#endif
24251+
24252+/*
24253+ * PaX: decide what to do with offenders (regs->ip = fault address)
24254+ *
24255+ * returns 1 when task should be killed
24256+ * 2 when gcc trampoline was detected
24257+ */
24258+static int pax_handle_fetch_fault(struct pt_regs *regs)
24259+{
24260+ if (v8086_mode(regs))
24261+ return 1;
24262+
24263+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24264+ return 1;
24265+
24266+#ifdef CONFIG_X86_32
24267+ return pax_handle_fetch_fault_32(regs);
24268+#else
24269+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24270+ return pax_handle_fetch_fault_32(regs);
24271+ else
24272+ return pax_handle_fetch_fault_64(regs);
24273+#endif
24274+}
24275+#endif
24276+
24277+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24278+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24279+{
24280+ long i;
24281+
24282+ printk(KERN_ERR "PAX: bytes at PC: ");
24283+ for (i = 0; i < 20; i++) {
24284+ unsigned char c;
24285+ if (get_user(c, (unsigned char __force_user *)pc+i))
24286+ printk(KERN_CONT "?? ");
24287+ else
24288+ printk(KERN_CONT "%02x ", c);
24289+ }
24290+ printk("\n");
24291+
24292+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24293+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
24294+ unsigned long c;
24295+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
24296+#ifdef CONFIG_X86_32
24297+ printk(KERN_CONT "???????? ");
24298+#else
24299+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24300+ printk(KERN_CONT "???????? ???????? ");
24301+ else
24302+ printk(KERN_CONT "???????????????? ");
24303+#endif
24304+ } else {
24305+#ifdef CONFIG_X86_64
24306+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24307+ printk(KERN_CONT "%08x ", (unsigned int)c);
24308+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24309+ } else
24310+#endif
24311+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24312+ }
24313+ }
24314+ printk("\n");
24315+}
24316+#endif
24317+
24318+/**
24319+ * probe_kernel_write(): safely attempt to write to a location
24320+ * @dst: address to write to
24321+ * @src: pointer to the data that shall be written
24322+ * @size: size of the data chunk
24323+ *
24324+ * Safely write to address @dst from the buffer at @src. If a kernel fault
24325+ * happens, handle that and return -EFAULT.
24326+ */
24327+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24328+{
24329+ long ret;
24330+ mm_segment_t old_fs = get_fs();
24331+
24332+ set_fs(KERNEL_DS);
24333+ pagefault_disable();
24334+ pax_open_kernel();
24335+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24336+ pax_close_kernel();
24337+ pagefault_enable();
24338+ set_fs(old_fs);
24339+
24340+ return ret ? -EFAULT : 0;
24341+}
24342diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24343index dd74e46..7d26398 100644
24344--- a/arch/x86/mm/gup.c
24345+++ b/arch/x86/mm/gup.c
24346@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24347 addr = start;
24348 len = (unsigned long) nr_pages << PAGE_SHIFT;
24349 end = start + len;
24350- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24351+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24352 (void __user *)start, len)))
24353 return 0;
24354
24355diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24356index 6f31ee5..8ee4164 100644
24357--- a/arch/x86/mm/highmem_32.c
24358+++ b/arch/x86/mm/highmem_32.c
24359@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
24360 idx = type + KM_TYPE_NR*smp_processor_id();
24361 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24362 BUG_ON(!pte_none(*(kmap_pte-idx)));
24363+
24364+ pax_open_kernel();
24365 set_pte(kmap_pte-idx, mk_pte(page, prot));
24366+ pax_close_kernel();
24367+
24368 arch_flush_lazy_mmu_mode();
24369
24370 return (void *)vaddr;
24371diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24372index f6679a7..8f795a3 100644
24373--- a/arch/x86/mm/hugetlbpage.c
24374+++ b/arch/x86/mm/hugetlbpage.c
24375@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24376 struct hstate *h = hstate_file(file);
24377 struct mm_struct *mm = current->mm;
24378 struct vm_area_struct *vma;
24379- unsigned long start_addr;
24380+ unsigned long start_addr, pax_task_size = TASK_SIZE;
24381+
24382+#ifdef CONFIG_PAX_SEGMEXEC
24383+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24384+ pax_task_size = SEGMEXEC_TASK_SIZE;
24385+#endif
24386+
24387+ pax_task_size -= PAGE_SIZE;
24388
24389 if (len > mm->cached_hole_size) {
24390- start_addr = mm->free_area_cache;
24391+ start_addr = mm->free_area_cache;
24392 } else {
24393- start_addr = TASK_UNMAPPED_BASE;
24394- mm->cached_hole_size = 0;
24395+ start_addr = mm->mmap_base;
24396+ mm->cached_hole_size = 0;
24397 }
24398
24399 full_search:
24400@@ -280,26 +287,27 @@ full_search:
24401
24402 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24403 /* At this point: (!vma || addr < vma->vm_end). */
24404- if (TASK_SIZE - len < addr) {
24405+ if (pax_task_size - len < addr) {
24406 /*
24407 * Start a new search - just in case we missed
24408 * some holes.
24409 */
24410- if (start_addr != TASK_UNMAPPED_BASE) {
24411- start_addr = TASK_UNMAPPED_BASE;
24412+ if (start_addr != mm->mmap_base) {
24413+ start_addr = mm->mmap_base;
24414 mm->cached_hole_size = 0;
24415 goto full_search;
24416 }
24417 return -ENOMEM;
24418 }
24419- if (!vma || addr + len <= vma->vm_start) {
24420- mm->free_area_cache = addr + len;
24421- return addr;
24422- }
24423+ if (check_heap_stack_gap(vma, addr, len))
24424+ break;
24425 if (addr + mm->cached_hole_size < vma->vm_start)
24426 mm->cached_hole_size = vma->vm_start - addr;
24427 addr = ALIGN(vma->vm_end, huge_page_size(h));
24428 }
24429+
24430+ mm->free_area_cache = addr + len;
24431+ return addr;
24432 }
24433
24434 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24435@@ -310,9 +318,8 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24436 struct mm_struct *mm = current->mm;
24437 struct vm_area_struct *vma;
24438 unsigned long base = mm->mmap_base;
24439- unsigned long addr = addr0;
24440+ unsigned long addr;
24441 unsigned long largest_hole = mm->cached_hole_size;
24442- unsigned long start_addr;
24443
24444 /* don't allow allocations above current base */
24445 if (mm->free_area_cache > base)
24446@@ -322,16 +329,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24447 largest_hole = 0;
24448 mm->free_area_cache = base;
24449 }
24450-try_again:
24451- start_addr = mm->free_area_cache;
24452
24453 /* make sure it can fit in the remaining address space */
24454 if (mm->free_area_cache < len)
24455 goto fail;
24456
24457 /* either no address requested or can't fit in requested address hole */
24458- addr = (mm->free_area_cache - len) & huge_page_mask(h);
24459+ addr = mm->free_area_cache - len;
24460 do {
24461+ addr &= huge_page_mask(h);
24462 /*
24463 * Lookup failure means no vma is above this address,
24464 * i.e. return with success:
24465@@ -340,10 +346,10 @@ try_again:
24466 if (!vma)
24467 return addr;
24468
24469- if (addr + len <= vma->vm_start) {
24470+ if (check_heap_stack_gap(vma, addr, len)) {
24471 /* remember the address as a hint for next time */
24472- mm->cached_hole_size = largest_hole;
24473- return (mm->free_area_cache = addr);
24474+ mm->cached_hole_size = largest_hole;
24475+ return (mm->free_area_cache = addr);
24476 } else if (mm->free_area_cache == vma->vm_end) {
24477 /* pull free_area_cache down to the first hole */
24478 mm->free_area_cache = vma->vm_start;
24479@@ -352,29 +358,34 @@ try_again:
24480
24481 /* remember the largest hole we saw so far */
24482 if (addr + largest_hole < vma->vm_start)
24483- largest_hole = vma->vm_start - addr;
24484+ largest_hole = vma->vm_start - addr;
24485
24486 /* try just below the current vma->vm_start */
24487- addr = (vma->vm_start - len) & huge_page_mask(h);
24488- } while (len <= vma->vm_start);
24489+ addr = skip_heap_stack_gap(vma, len);
24490+ } while (!IS_ERR_VALUE(addr));
24491
24492 fail:
24493 /*
24494- * if hint left us with no space for the requested
24495- * mapping then try again:
24496- */
24497- if (start_addr != base) {
24498- mm->free_area_cache = base;
24499- largest_hole = 0;
24500- goto try_again;
24501- }
24502- /*
24503 * A failed mmap() very likely causes application failure,
24504 * so fall back to the bottom-up function here. This scenario
24505 * can happen with large stack limits and large mmap()
24506 * allocations.
24507 */
24508- mm->free_area_cache = TASK_UNMAPPED_BASE;
24509+
24510+#ifdef CONFIG_PAX_SEGMEXEC
24511+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24512+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24513+ else
24514+#endif
24515+
24516+ mm->mmap_base = TASK_UNMAPPED_BASE;
24517+
24518+#ifdef CONFIG_PAX_RANDMMAP
24519+ if (mm->pax_flags & MF_PAX_RANDMMAP)
24520+ mm->mmap_base += mm->delta_mmap;
24521+#endif
24522+
24523+ mm->free_area_cache = mm->mmap_base;
24524 mm->cached_hole_size = ~0UL;
24525 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24526 len, pgoff, flags);
24527@@ -382,6 +393,7 @@ fail:
24528 /*
24529 * Restore the topdown base:
24530 */
24531+ mm->mmap_base = base;
24532 mm->free_area_cache = base;
24533 mm->cached_hole_size = ~0UL;
24534
24535@@ -395,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24536 struct hstate *h = hstate_file(file);
24537 struct mm_struct *mm = current->mm;
24538 struct vm_area_struct *vma;
24539+ unsigned long pax_task_size = TASK_SIZE;
24540
24541 if (len & ~huge_page_mask(h))
24542 return -EINVAL;
24543- if (len > TASK_SIZE)
24544+
24545+#ifdef CONFIG_PAX_SEGMEXEC
24546+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24547+ pax_task_size = SEGMEXEC_TASK_SIZE;
24548+#endif
24549+
24550+ pax_task_size -= PAGE_SIZE;
24551+
24552+ if (len > pax_task_size)
24553 return -ENOMEM;
24554
24555 if (flags & MAP_FIXED) {
24556@@ -410,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24557 if (addr) {
24558 addr = ALIGN(addr, huge_page_size(h));
24559 vma = find_vma(mm, addr);
24560- if (TASK_SIZE - len >= addr &&
24561- (!vma || addr + len <= vma->vm_start))
24562+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24563 return addr;
24564 }
24565 if (mm->get_unmapped_area == arch_get_unmapped_area)
24566diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24567index 4f0cec7..00976ce 100644
24568--- a/arch/x86/mm/init.c
24569+++ b/arch/x86/mm/init.c
24570@@ -16,6 +16,8 @@
24571 #include <asm/tlb.h>
24572 #include <asm/proto.h>
24573 #include <asm/dma.h> /* for MAX_DMA_PFN */
24574+#include <asm/desc.h>
24575+#include <asm/bios_ebda.h>
24576
24577 unsigned long __initdata pgt_buf_start;
24578 unsigned long __meminitdata pgt_buf_end;
24579@@ -32,7 +34,7 @@ int direct_gbpages
24580 static void __init find_early_table_space(unsigned long end, int use_pse,
24581 int use_gbpages)
24582 {
24583- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
24584+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
24585 phys_addr_t base;
24586
24587 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
24588@@ -311,10 +313,37 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24589 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24590 * mmio resources as well as potential bios/acpi data regions.
24591 */
24592+
24593+#ifdef CONFIG_GRKERNSEC_KMEM
24594+static unsigned int ebda_start __read_only;
24595+static unsigned int ebda_end __read_only;
24596+#endif
24597+
24598 int devmem_is_allowed(unsigned long pagenr)
24599 {
24600+#ifdef CONFIG_GRKERNSEC_KMEM
24601+ /* allow BDA */
24602+ if (!pagenr)
24603+ return 1;
24604+ /* allow EBDA */
24605+ if (pagenr >= ebda_start && pagenr < ebda_end)
24606+ return 1;
24607+#else
24608+ if (!pagenr)
24609+ return 1;
24610+#ifdef CONFIG_VM86
24611+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
24612+ return 1;
24613+#endif
24614+#endif
24615+
24616+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24617+ return 1;
24618+#ifdef CONFIG_GRKERNSEC_KMEM
24619+ /* throw out everything else below 1MB */
24620 if (pagenr <= 256)
24621- return 1;
24622+ return 0;
24623+#endif
24624 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24625 return 0;
24626 if (!page_is_ram(pagenr))
24627@@ -371,8 +400,116 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24628 #endif
24629 }
24630
24631+#ifdef CONFIG_GRKERNSEC_KMEM
24632+static inline void gr_init_ebda(void)
24633+{
24634+ unsigned int ebda_addr;
24635+ unsigned int ebda_size = 0;
24636+
24637+ ebda_addr = get_bios_ebda();
24638+ if (ebda_addr) {
24639+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
24640+ ebda_size <<= 10;
24641+ }
24642+ if (ebda_addr && ebda_size) {
24643+ ebda_start = ebda_addr >> PAGE_SHIFT;
24644+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
24645+ } else {
24646+ ebda_start = 0x9f000 >> PAGE_SHIFT;
24647+ ebda_end = 0xa0000 >> PAGE_SHIFT;
24648+ }
24649+}
24650+#else
24651+static inline void gr_init_ebda(void) { }
24652+#endif
24653+
24654 void free_initmem(void)
24655 {
24656+#ifdef CONFIG_PAX_KERNEXEC
24657+#ifdef CONFIG_X86_32
24658+ /* PaX: limit KERNEL_CS to actual size */
24659+ unsigned long addr, limit;
24660+ struct desc_struct d;
24661+ int cpu;
24662+#else
24663+ pgd_t *pgd;
24664+ pud_t *pud;
24665+ pmd_t *pmd;
24666+ unsigned long addr, end;
24667+#endif
24668+#endif
24669+
24670+ gr_init_ebda();
24671+
24672+#ifdef CONFIG_PAX_KERNEXEC
24673+#ifdef CONFIG_X86_32
24674+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24675+ limit = (limit - 1UL) >> PAGE_SHIFT;
24676+
24677+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24678+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
24679+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24680+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24681+ }
24682+
24683+ /* PaX: make KERNEL_CS read-only */
24684+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24685+ if (!paravirt_enabled())
24686+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24687+/*
24688+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24689+ pgd = pgd_offset_k(addr);
24690+ pud = pud_offset(pgd, addr);
24691+ pmd = pmd_offset(pud, addr);
24692+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24693+ }
24694+*/
24695+#ifdef CONFIG_X86_PAE
24696+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24697+/*
24698+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24699+ pgd = pgd_offset_k(addr);
24700+ pud = pud_offset(pgd, addr);
24701+ pmd = pmd_offset(pud, addr);
24702+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24703+ }
24704+*/
24705+#endif
24706+
24707+#ifdef CONFIG_MODULES
24708+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24709+#endif
24710+
24711+#else
24712+ /* PaX: make kernel code/rodata read-only, rest non-executable */
24713+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24714+ pgd = pgd_offset_k(addr);
24715+ pud = pud_offset(pgd, addr);
24716+ pmd = pmd_offset(pud, addr);
24717+ if (!pmd_present(*pmd))
24718+ continue;
24719+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24720+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24721+ else
24722+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24723+ }
24724+
24725+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24726+ end = addr + KERNEL_IMAGE_SIZE;
24727+ for (; addr < end; addr += PMD_SIZE) {
24728+ pgd = pgd_offset_k(addr);
24729+ pud = pud_offset(pgd, addr);
24730+ pmd = pmd_offset(pud, addr);
24731+ if (!pmd_present(*pmd))
24732+ continue;
24733+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24734+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24735+ }
24736+#endif
24737+
24738+ flush_tlb_all();
24739+#endif
24740+
24741 free_init_pages("unused kernel memory",
24742 (unsigned long)(&__init_begin),
24743 (unsigned long)(&__init_end));
24744diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24745index 575d86f..4987469 100644
24746--- a/arch/x86/mm/init_32.c
24747+++ b/arch/x86/mm/init_32.c
24748@@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
24749 }
24750
24751 /*
24752- * Creates a middle page table and puts a pointer to it in the
24753- * given global directory entry. This only returns the gd entry
24754- * in non-PAE compilation mode, since the middle layer is folded.
24755- */
24756-static pmd_t * __init one_md_table_init(pgd_t *pgd)
24757-{
24758- pud_t *pud;
24759- pmd_t *pmd_table;
24760-
24761-#ifdef CONFIG_X86_PAE
24762- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24763- if (after_bootmem)
24764- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24765- else
24766- pmd_table = (pmd_t *)alloc_low_page();
24767- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24768- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24769- pud = pud_offset(pgd, 0);
24770- BUG_ON(pmd_table != pmd_offset(pud, 0));
24771-
24772- return pmd_table;
24773- }
24774-#endif
24775- pud = pud_offset(pgd, 0);
24776- pmd_table = pmd_offset(pud, 0);
24777-
24778- return pmd_table;
24779-}
24780-
24781-/*
24782 * Create a page table and place a pointer to it in a middle page
24783 * directory entry:
24784 */
24785@@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24786 page_table = (pte_t *)alloc_low_page();
24787
24788 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24789+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24790+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24791+#else
24792 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24793+#endif
24794 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24795 }
24796
24797 return pte_offset_kernel(pmd, 0);
24798 }
24799
24800+static pmd_t * __init one_md_table_init(pgd_t *pgd)
24801+{
24802+ pud_t *pud;
24803+ pmd_t *pmd_table;
24804+
24805+ pud = pud_offset(pgd, 0);
24806+ pmd_table = pmd_offset(pud, 0);
24807+
24808+ return pmd_table;
24809+}
24810+
24811 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24812 {
24813 int pgd_idx = pgd_index(vaddr);
24814@@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24815 int pgd_idx, pmd_idx;
24816 unsigned long vaddr;
24817 pgd_t *pgd;
24818+ pud_t *pud;
24819 pmd_t *pmd;
24820 pte_t *pte = NULL;
24821
24822@@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24823 pgd = pgd_base + pgd_idx;
24824
24825 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24826- pmd = one_md_table_init(pgd);
24827- pmd = pmd + pmd_index(vaddr);
24828+ pud = pud_offset(pgd, vaddr);
24829+ pmd = pmd_offset(pud, vaddr);
24830+
24831+#ifdef CONFIG_X86_PAE
24832+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24833+#endif
24834+
24835 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24836 pmd++, pmd_idx++) {
24837 pte = page_table_kmap_check(one_page_table_init(pmd),
24838@@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24839 }
24840 }
24841
24842-static inline int is_kernel_text(unsigned long addr)
24843+static inline int is_kernel_text(unsigned long start, unsigned long end)
24844 {
24845- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
24846- return 1;
24847- return 0;
24848+ if ((start > ktla_ktva((unsigned long)_etext) ||
24849+ end <= ktla_ktva((unsigned long)_stext)) &&
24850+ (start > ktla_ktva((unsigned long)_einittext) ||
24851+ end <= ktla_ktva((unsigned long)_sinittext)) &&
24852+
24853+#ifdef CONFIG_ACPI_SLEEP
24854+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24855+#endif
24856+
24857+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24858+ return 0;
24859+ return 1;
24860 }
24861
24862 /*
24863@@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
24864 unsigned long last_map_addr = end;
24865 unsigned long start_pfn, end_pfn;
24866 pgd_t *pgd_base = swapper_pg_dir;
24867- int pgd_idx, pmd_idx, pte_ofs;
24868+ unsigned int pgd_idx, pmd_idx, pte_ofs;
24869 unsigned long pfn;
24870 pgd_t *pgd;
24871+ pud_t *pud;
24872 pmd_t *pmd;
24873 pte_t *pte;
24874 unsigned pages_2m, pages_4k;
24875@@ -280,8 +281,13 @@ repeat:
24876 pfn = start_pfn;
24877 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24878 pgd = pgd_base + pgd_idx;
24879- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24880- pmd = one_md_table_init(pgd);
24881+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24882+ pud = pud_offset(pgd, 0);
24883+ pmd = pmd_offset(pud, 0);
24884+
24885+#ifdef CONFIG_X86_PAE
24886+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24887+#endif
24888
24889 if (pfn >= end_pfn)
24890 continue;
24891@@ -293,14 +299,13 @@ repeat:
24892 #endif
24893 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24894 pmd++, pmd_idx++) {
24895- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24896+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24897
24898 /*
24899 * Map with big pages if possible, otherwise
24900 * create normal page tables:
24901 */
24902 if (use_pse) {
24903- unsigned int addr2;
24904 pgprot_t prot = PAGE_KERNEL_LARGE;
24905 /*
24906 * first pass will use the same initial
24907@@ -310,11 +315,7 @@ repeat:
24908 __pgprot(PTE_IDENT_ATTR |
24909 _PAGE_PSE);
24910
24911- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24912- PAGE_OFFSET + PAGE_SIZE-1;
24913-
24914- if (is_kernel_text(addr) ||
24915- is_kernel_text(addr2))
24916+ if (is_kernel_text(address, address + PMD_SIZE))
24917 prot = PAGE_KERNEL_LARGE_EXEC;
24918
24919 pages_2m++;
24920@@ -331,7 +332,7 @@ repeat:
24921 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24922 pte += pte_ofs;
24923 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24924- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24925+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24926 pgprot_t prot = PAGE_KERNEL;
24927 /*
24928 * first pass will use the same initial
24929@@ -339,7 +340,7 @@ repeat:
24930 */
24931 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24932
24933- if (is_kernel_text(addr))
24934+ if (is_kernel_text(address, address + PAGE_SIZE))
24935 prot = PAGE_KERNEL_EXEC;
24936
24937 pages_4k++;
24938@@ -465,7 +466,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
24939
24940 pud = pud_offset(pgd, va);
24941 pmd = pmd_offset(pud, va);
24942- if (!pmd_present(*pmd))
24943+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
24944 break;
24945
24946 pte = pte_offset_kernel(pmd, va);
24947@@ -517,12 +518,10 @@ void __init early_ioremap_page_table_range_init(void)
24948
24949 static void __init pagetable_init(void)
24950 {
24951- pgd_t *pgd_base = swapper_pg_dir;
24952-
24953- permanent_kmaps_init(pgd_base);
24954+ permanent_kmaps_init(swapper_pg_dir);
24955 }
24956
24957-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24958+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24959 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24960
24961 /* user-defined highmem size */
24962@@ -734,6 +733,12 @@ void __init mem_init(void)
24963
24964 pci_iommu_alloc();
24965
24966+#ifdef CONFIG_PAX_PER_CPU_PGD
24967+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24968+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24969+ KERNEL_PGD_PTRS);
24970+#endif
24971+
24972 #ifdef CONFIG_FLATMEM
24973 BUG_ON(!mem_map);
24974 #endif
24975@@ -760,7 +765,7 @@ void __init mem_init(void)
24976 reservedpages++;
24977
24978 codesize = (unsigned long) &_etext - (unsigned long) &_text;
24979- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
24980+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
24981 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
24982
24983 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
24984@@ -801,10 +806,10 @@ void __init mem_init(void)
24985 ((unsigned long)&__init_end -
24986 (unsigned long)&__init_begin) >> 10,
24987
24988- (unsigned long)&_etext, (unsigned long)&_edata,
24989- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
24990+ (unsigned long)&_sdata, (unsigned long)&_edata,
24991+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
24992
24993- (unsigned long)&_text, (unsigned long)&_etext,
24994+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
24995 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
24996
24997 /*
24998@@ -882,6 +887,7 @@ void set_kernel_text_rw(void)
24999 if (!kernel_set_to_readonly)
25000 return;
25001
25002+ start = ktla_ktva(start);
25003 pr_debug("Set kernel text: %lx - %lx for read write\n",
25004 start, start+size);
25005
25006@@ -896,6 +902,7 @@ void set_kernel_text_ro(void)
25007 if (!kernel_set_to_readonly)
25008 return;
25009
25010+ start = ktla_ktva(start);
25011 pr_debug("Set kernel text: %lx - %lx for read only\n",
25012 start, start+size);
25013
25014@@ -924,6 +931,7 @@ void mark_rodata_ro(void)
25015 unsigned long start = PFN_ALIGN(_text);
25016 unsigned long size = PFN_ALIGN(_etext) - start;
25017
25018+ start = ktla_ktva(start);
25019 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25020 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25021 size >> 10);
25022diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25023index fc18be0..e539653 100644
25024--- a/arch/x86/mm/init_64.c
25025+++ b/arch/x86/mm/init_64.c
25026@@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
25027 * around without checking the pgd every time.
25028 */
25029
25030-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
25031+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
25032 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25033
25034 int force_personality32;
25035@@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25036
25037 for (address = start; address <= end; address += PGDIR_SIZE) {
25038 const pgd_t *pgd_ref = pgd_offset_k(address);
25039+
25040+#ifdef CONFIG_PAX_PER_CPU_PGD
25041+ unsigned long cpu;
25042+#else
25043 struct page *page;
25044+#endif
25045
25046 if (pgd_none(*pgd_ref))
25047 continue;
25048
25049 spin_lock(&pgd_lock);
25050+
25051+#ifdef CONFIG_PAX_PER_CPU_PGD
25052+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25053+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
25054+#else
25055 list_for_each_entry(page, &pgd_list, lru) {
25056 pgd_t *pgd;
25057 spinlock_t *pgt_lock;
25058@@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25059 /* the pgt_lock only for Xen */
25060 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
25061 spin_lock(pgt_lock);
25062+#endif
25063
25064 if (pgd_none(*pgd))
25065 set_pgd(pgd, *pgd_ref);
25066@@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25067 BUG_ON(pgd_page_vaddr(*pgd)
25068 != pgd_page_vaddr(*pgd_ref));
25069
25070+#ifndef CONFIG_PAX_PER_CPU_PGD
25071 spin_unlock(pgt_lock);
25072+#endif
25073+
25074 }
25075 spin_unlock(&pgd_lock);
25076 }
25077@@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
25078 {
25079 if (pgd_none(*pgd)) {
25080 pud_t *pud = (pud_t *)spp_getpage();
25081- pgd_populate(&init_mm, pgd, pud);
25082+ pgd_populate_kernel(&init_mm, pgd, pud);
25083 if (pud != pud_offset(pgd, 0))
25084 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
25085 pud, pud_offset(pgd, 0));
25086@@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
25087 {
25088 if (pud_none(*pud)) {
25089 pmd_t *pmd = (pmd_t *) spp_getpage();
25090- pud_populate(&init_mm, pud, pmd);
25091+ pud_populate_kernel(&init_mm, pud, pmd);
25092 if (pmd != pmd_offset(pud, 0))
25093 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
25094 pmd, pmd_offset(pud, 0));
25095@@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25096 pmd = fill_pmd(pud, vaddr);
25097 pte = fill_pte(pmd, vaddr);
25098
25099+ pax_open_kernel();
25100 set_pte(pte, new_pte);
25101+ pax_close_kernel();
25102
25103 /*
25104 * It's enough to flush this one mapping.
25105@@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25106 pgd = pgd_offset_k((unsigned long)__va(phys));
25107 if (pgd_none(*pgd)) {
25108 pud = (pud_t *) spp_getpage();
25109- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25110- _PAGE_USER));
25111+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25112 }
25113 pud = pud_offset(pgd, (unsigned long)__va(phys));
25114 if (pud_none(*pud)) {
25115 pmd = (pmd_t *) spp_getpage();
25116- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25117- _PAGE_USER));
25118+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25119 }
25120 pmd = pmd_offset(pud, phys);
25121 BUG_ON(!pmd_none(*pmd));
25122@@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
25123 if (pfn >= pgt_buf_top)
25124 panic("alloc_low_page: ran out of memory");
25125
25126- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25127+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25128 clear_page(adr);
25129 *phys = pfn * PAGE_SIZE;
25130 return adr;
25131@@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
25132
25133 phys = __pa(virt);
25134 left = phys & (PAGE_SIZE - 1);
25135- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25136+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25137 adr = (void *)(((unsigned long)adr) | left);
25138
25139 return adr;
25140@@ -545,7 +559,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
25141 unmap_low_page(pmd);
25142
25143 spin_lock(&init_mm.page_table_lock);
25144- pud_populate(&init_mm, pud, __va(pmd_phys));
25145+ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
25146 spin_unlock(&init_mm.page_table_lock);
25147 }
25148 __flush_tlb_all();
25149@@ -591,7 +605,7 @@ kernel_physical_mapping_init(unsigned long start,
25150 unmap_low_page(pud);
25151
25152 spin_lock(&init_mm.page_table_lock);
25153- pgd_populate(&init_mm, pgd, __va(pud_phys));
25154+ pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
25155 spin_unlock(&init_mm.page_table_lock);
25156 pgd_changed = true;
25157 }
25158@@ -683,6 +697,12 @@ void __init mem_init(void)
25159
25160 pci_iommu_alloc();
25161
25162+#ifdef CONFIG_PAX_PER_CPU_PGD
25163+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25164+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25165+ KERNEL_PGD_PTRS);
25166+#endif
25167+
25168 /* clear_bss() already clear the empty_zero_page */
25169
25170 reservedpages = 0;
25171@@ -843,8 +863,8 @@ int kern_addr_valid(unsigned long addr)
25172 static struct vm_area_struct gate_vma = {
25173 .vm_start = VSYSCALL_START,
25174 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25175- .vm_page_prot = PAGE_READONLY_EXEC,
25176- .vm_flags = VM_READ | VM_EXEC
25177+ .vm_page_prot = PAGE_READONLY,
25178+ .vm_flags = VM_READ
25179 };
25180
25181 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
25182@@ -878,7 +898,7 @@ int in_gate_area_no_mm(unsigned long addr)
25183
25184 const char *arch_vma_name(struct vm_area_struct *vma)
25185 {
25186- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25187+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25188 return "[vdso]";
25189 if (vma == &gate_vma)
25190 return "[vsyscall]";
25191diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25192index 7b179b4..6bd1777 100644
25193--- a/arch/x86/mm/iomap_32.c
25194+++ b/arch/x86/mm/iomap_32.c
25195@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
25196 type = kmap_atomic_idx_push();
25197 idx = type + KM_TYPE_NR * smp_processor_id();
25198 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25199+
25200+ pax_open_kernel();
25201 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25202+ pax_close_kernel();
25203+
25204 arch_flush_lazy_mmu_mode();
25205
25206 return (void *)vaddr;
25207diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25208index be1ef57..55f0160 100644
25209--- a/arch/x86/mm/ioremap.c
25210+++ b/arch/x86/mm/ioremap.c
25211@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25212 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
25213 int is_ram = page_is_ram(pfn);
25214
25215- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25216+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25217 return NULL;
25218 WARN_ON_ONCE(is_ram);
25219 }
25220@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25221
25222 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25223 if (page_is_ram(start >> PAGE_SHIFT))
25224+#ifdef CONFIG_HIGHMEM
25225+ if ((start >> PAGE_SHIFT) < max_low_pfn)
25226+#endif
25227 return __va(phys);
25228
25229 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
25230@@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
25231 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25232
25233 static __initdata int after_paging_init;
25234-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25235+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25236
25237 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25238 {
25239@@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
25240 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25241
25242 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25243- memset(bm_pte, 0, sizeof(bm_pte));
25244- pmd_populate_kernel(&init_mm, pmd, bm_pte);
25245+ pmd_populate_user(&init_mm, pmd, bm_pte);
25246
25247 /*
25248 * The boot-ioremap range spans multiple pmds, for which
25249diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25250index d87dd6d..bf3fa66 100644
25251--- a/arch/x86/mm/kmemcheck/kmemcheck.c
25252+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25253@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25254 * memory (e.g. tracked pages)? For now, we need this to avoid
25255 * invoking kmemcheck for PnP BIOS calls.
25256 */
25257- if (regs->flags & X86_VM_MASK)
25258+ if (v8086_mode(regs))
25259 return false;
25260- if (regs->cs != __KERNEL_CS)
25261+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25262 return false;
25263
25264 pte = kmemcheck_pte_lookup(address);
25265diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25266index 845df68..1d8d29f 100644
25267--- a/arch/x86/mm/mmap.c
25268+++ b/arch/x86/mm/mmap.c
25269@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
25270 * Leave an at least ~128 MB hole with possible stack randomization.
25271 */
25272 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25273-#define MAX_GAP (TASK_SIZE/6*5)
25274+#define MAX_GAP (pax_task_size/6*5)
25275
25276 static int mmap_is_legacy(void)
25277 {
25278@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
25279 return rnd << PAGE_SHIFT;
25280 }
25281
25282-static unsigned long mmap_base(void)
25283+static unsigned long mmap_base(struct mm_struct *mm)
25284 {
25285 unsigned long gap = rlimit(RLIMIT_STACK);
25286+ unsigned long pax_task_size = TASK_SIZE;
25287+
25288+#ifdef CONFIG_PAX_SEGMEXEC
25289+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25290+ pax_task_size = SEGMEXEC_TASK_SIZE;
25291+#endif
25292
25293 if (gap < MIN_GAP)
25294 gap = MIN_GAP;
25295 else if (gap > MAX_GAP)
25296 gap = MAX_GAP;
25297
25298- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25299+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25300 }
25301
25302 /*
25303 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25304 * does, but not when emulating X86_32
25305 */
25306-static unsigned long mmap_legacy_base(void)
25307+static unsigned long mmap_legacy_base(struct mm_struct *mm)
25308 {
25309- if (mmap_is_ia32())
25310+ if (mmap_is_ia32()) {
25311+
25312+#ifdef CONFIG_PAX_SEGMEXEC
25313+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25314+ return SEGMEXEC_TASK_UNMAPPED_BASE;
25315+ else
25316+#endif
25317+
25318 return TASK_UNMAPPED_BASE;
25319- else
25320+ } else
25321 return TASK_UNMAPPED_BASE + mmap_rnd();
25322 }
25323
25324@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
25325 void arch_pick_mmap_layout(struct mm_struct *mm)
25326 {
25327 if (mmap_is_legacy()) {
25328- mm->mmap_base = mmap_legacy_base();
25329+ mm->mmap_base = mmap_legacy_base(mm);
25330+
25331+#ifdef CONFIG_PAX_RANDMMAP
25332+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25333+ mm->mmap_base += mm->delta_mmap;
25334+#endif
25335+
25336 mm->get_unmapped_area = arch_get_unmapped_area;
25337 mm->unmap_area = arch_unmap_area;
25338 } else {
25339- mm->mmap_base = mmap_base();
25340+ mm->mmap_base = mmap_base(mm);
25341+
25342+#ifdef CONFIG_PAX_RANDMMAP
25343+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25344+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25345+#endif
25346+
25347 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25348 mm->unmap_area = arch_unmap_area_topdown;
25349 }
25350diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25351index dc0b727..dc9d71a 100644
25352--- a/arch/x86/mm/mmio-mod.c
25353+++ b/arch/x86/mm/mmio-mod.c
25354@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25355 break;
25356 default:
25357 {
25358- unsigned char *ip = (unsigned char *)instptr;
25359+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25360 my_trace->opcode = MMIO_UNKNOWN_OP;
25361 my_trace->width = 0;
25362 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25363@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25364 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25365 void __iomem *addr)
25366 {
25367- static atomic_t next_id;
25368+ static atomic_unchecked_t next_id;
25369 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25370 /* These are page-unaligned. */
25371 struct mmiotrace_map map = {
25372@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25373 .private = trace
25374 },
25375 .phys = offset,
25376- .id = atomic_inc_return(&next_id)
25377+ .id = atomic_inc_return_unchecked(&next_id)
25378 };
25379 map.map_id = trace->id;
25380
25381diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25382index b008656..773eac2 100644
25383--- a/arch/x86/mm/pageattr-test.c
25384+++ b/arch/x86/mm/pageattr-test.c
25385@@ -36,7 +36,7 @@ enum {
25386
25387 static int pte_testbit(pte_t pte)
25388 {
25389- return pte_flags(pte) & _PAGE_UNUSED1;
25390+ return pte_flags(pte) & _PAGE_CPA_TEST;
25391 }
25392
25393 struct split_state {
25394diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25395index e1ebde3..b1e1db38 100644
25396--- a/arch/x86/mm/pageattr.c
25397+++ b/arch/x86/mm/pageattr.c
25398@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25399 */
25400 #ifdef CONFIG_PCI_BIOS
25401 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25402- pgprot_val(forbidden) |= _PAGE_NX;
25403+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25404 #endif
25405
25406 /*
25407@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25408 * Does not cover __inittext since that is gone later on. On
25409 * 64bit we do not enforce !NX on the low mapping
25410 */
25411- if (within(address, (unsigned long)_text, (unsigned long)_etext))
25412- pgprot_val(forbidden) |= _PAGE_NX;
25413+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25414+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25415
25416+#ifdef CONFIG_DEBUG_RODATA
25417 /*
25418 * The .rodata section needs to be read-only. Using the pfn
25419 * catches all aliases.
25420@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25421 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25422 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25423 pgprot_val(forbidden) |= _PAGE_RW;
25424+#endif
25425
25426 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
25427 /*
25428@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25429 }
25430 #endif
25431
25432+#ifdef CONFIG_PAX_KERNEXEC
25433+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25434+ pgprot_val(forbidden) |= _PAGE_RW;
25435+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25436+ }
25437+#endif
25438+
25439 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25440
25441 return prot;
25442@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25443 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25444 {
25445 /* change init_mm */
25446+ pax_open_kernel();
25447 set_pte_atomic(kpte, pte);
25448+
25449 #ifdef CONFIG_X86_32
25450 if (!SHARED_KERNEL_PMD) {
25451+
25452+#ifdef CONFIG_PAX_PER_CPU_PGD
25453+ unsigned long cpu;
25454+#else
25455 struct page *page;
25456+#endif
25457
25458+#ifdef CONFIG_PAX_PER_CPU_PGD
25459+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25460+ pgd_t *pgd = get_cpu_pgd(cpu);
25461+#else
25462 list_for_each_entry(page, &pgd_list, lru) {
25463- pgd_t *pgd;
25464+ pgd_t *pgd = (pgd_t *)page_address(page);
25465+#endif
25466+
25467 pud_t *pud;
25468 pmd_t *pmd;
25469
25470- pgd = (pgd_t *)page_address(page) + pgd_index(address);
25471+ pgd += pgd_index(address);
25472 pud = pud_offset(pgd, address);
25473 pmd = pmd_offset(pud, address);
25474 set_pte_atomic((pte_t *)pmd, pte);
25475 }
25476 }
25477 #endif
25478+ pax_close_kernel();
25479 }
25480
25481 static int
25482diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25483index f6ff57b..481690f 100644
25484--- a/arch/x86/mm/pat.c
25485+++ b/arch/x86/mm/pat.c
25486@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
25487
25488 if (!entry) {
25489 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25490- current->comm, current->pid, start, end);
25491+ current->comm, task_pid_nr(current), start, end);
25492 return -EINVAL;
25493 }
25494
25495@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25496 while (cursor < to) {
25497 if (!devmem_is_allowed(pfn)) {
25498 printk(KERN_INFO
25499- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25500- current->comm, from, to);
25501+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25502+ current->comm, from, to, cursor);
25503 return 0;
25504 }
25505 cursor += PAGE_SIZE;
25506@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25507 printk(KERN_INFO
25508 "%s:%d ioremap_change_attr failed %s "
25509 "for %Lx-%Lx\n",
25510- current->comm, current->pid,
25511+ current->comm, task_pid_nr(current),
25512 cattr_name(flags),
25513 base, (unsigned long long)(base + size));
25514 return -EINVAL;
25515@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25516 if (want_flags != flags) {
25517 printk(KERN_WARNING
25518 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
25519- current->comm, current->pid,
25520+ current->comm, task_pid_nr(current),
25521 cattr_name(want_flags),
25522 (unsigned long long)paddr,
25523 (unsigned long long)(paddr + size),
25524@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25525 free_memtype(paddr, paddr + size);
25526 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25527 " for %Lx-%Lx, got %s\n",
25528- current->comm, current->pid,
25529+ current->comm, task_pid_nr(current),
25530 cattr_name(want_flags),
25531 (unsigned long long)paddr,
25532 (unsigned long long)(paddr + size),
25533diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25534index 9f0614d..92ae64a 100644
25535--- a/arch/x86/mm/pf_in.c
25536+++ b/arch/x86/mm/pf_in.c
25537@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25538 int i;
25539 enum reason_type rv = OTHERS;
25540
25541- p = (unsigned char *)ins_addr;
25542+ p = (unsigned char *)ktla_ktva(ins_addr);
25543 p += skip_prefix(p, &prf);
25544 p += get_opcode(p, &opcode);
25545
25546@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25547 struct prefix_bits prf;
25548 int i;
25549
25550- p = (unsigned char *)ins_addr;
25551+ p = (unsigned char *)ktla_ktva(ins_addr);
25552 p += skip_prefix(p, &prf);
25553 p += get_opcode(p, &opcode);
25554
25555@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25556 struct prefix_bits prf;
25557 int i;
25558
25559- p = (unsigned char *)ins_addr;
25560+ p = (unsigned char *)ktla_ktva(ins_addr);
25561 p += skip_prefix(p, &prf);
25562 p += get_opcode(p, &opcode);
25563
25564@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25565 struct prefix_bits prf;
25566 int i;
25567
25568- p = (unsigned char *)ins_addr;
25569+ p = (unsigned char *)ktla_ktva(ins_addr);
25570 p += skip_prefix(p, &prf);
25571 p += get_opcode(p, &opcode);
25572 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25573@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25574 struct prefix_bits prf;
25575 int i;
25576
25577- p = (unsigned char *)ins_addr;
25578+ p = (unsigned char *)ktla_ktva(ins_addr);
25579 p += skip_prefix(p, &prf);
25580 p += get_opcode(p, &opcode);
25581 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25582diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25583index 8573b83..4f3ed7e 100644
25584--- a/arch/x86/mm/pgtable.c
25585+++ b/arch/x86/mm/pgtable.c
25586@@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
25587 list_del(&page->lru);
25588 }
25589
25590-#define UNSHARED_PTRS_PER_PGD \
25591- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25592+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25593+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25594
25595+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
25596+{
25597+ unsigned int count = USER_PGD_PTRS;
25598
25599+ while (count--)
25600+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25601+}
25602+#endif
25603+
25604+#ifdef CONFIG_PAX_PER_CPU_PGD
25605+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
25606+{
25607+ unsigned int count = USER_PGD_PTRS;
25608+
25609+ while (count--) {
25610+ pgd_t pgd;
25611+
25612+#ifdef CONFIG_X86_64
25613+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
25614+#else
25615+ pgd = *src++;
25616+#endif
25617+
25618+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25619+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
25620+#endif
25621+
25622+ *dst++ = pgd;
25623+ }
25624+
25625+}
25626+#endif
25627+
25628+#ifdef CONFIG_X86_64
25629+#define pxd_t pud_t
25630+#define pyd_t pgd_t
25631+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25632+#define pxd_free(mm, pud) pud_free((mm), (pud))
25633+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25634+#define pyd_offset(mm, address) pgd_offset((mm), (address))
25635+#define PYD_SIZE PGDIR_SIZE
25636+#else
25637+#define pxd_t pmd_t
25638+#define pyd_t pud_t
25639+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25640+#define pxd_free(mm, pud) pmd_free((mm), (pud))
25641+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25642+#define pyd_offset(mm, address) pud_offset((mm), (address))
25643+#define PYD_SIZE PUD_SIZE
25644+#endif
25645+
25646+#ifdef CONFIG_PAX_PER_CPU_PGD
25647+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
25648+static inline void pgd_dtor(pgd_t *pgd) {}
25649+#else
25650 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
25651 {
25652 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
25653@@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
25654 pgd_list_del(pgd);
25655 spin_unlock(&pgd_lock);
25656 }
25657+#endif
25658
25659 /*
25660 * List of all pgd's needed for non-PAE so it can invalidate entries
25661@@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
25662 * -- wli
25663 */
25664
25665-#ifdef CONFIG_X86_PAE
25666+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25667 /*
25668 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25669 * updating the top-level pagetable entries to guarantee the
25670@@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
25671 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25672 * and initialize the kernel pmds here.
25673 */
25674-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25675+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25676
25677 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25678 {
25679@@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25680 */
25681 flush_tlb_mm(mm);
25682 }
25683+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25684+#define PREALLOCATED_PXDS USER_PGD_PTRS
25685 #else /* !CONFIG_X86_PAE */
25686
25687 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25688-#define PREALLOCATED_PMDS 0
25689+#define PREALLOCATED_PXDS 0
25690
25691 #endif /* CONFIG_X86_PAE */
25692
25693-static void free_pmds(pmd_t *pmds[])
25694+static void free_pxds(pxd_t *pxds[])
25695 {
25696 int i;
25697
25698- for(i = 0; i < PREALLOCATED_PMDS; i++)
25699- if (pmds[i])
25700- free_page((unsigned long)pmds[i]);
25701+ for(i = 0; i < PREALLOCATED_PXDS; i++)
25702+ if (pxds[i])
25703+ free_page((unsigned long)pxds[i]);
25704 }
25705
25706-static int preallocate_pmds(pmd_t *pmds[])
25707+static int preallocate_pxds(pxd_t *pxds[])
25708 {
25709 int i;
25710 bool failed = false;
25711
25712- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25713- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25714- if (pmd == NULL)
25715+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25716+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25717+ if (pxd == NULL)
25718 failed = true;
25719- pmds[i] = pmd;
25720+ pxds[i] = pxd;
25721 }
25722
25723 if (failed) {
25724- free_pmds(pmds);
25725+ free_pxds(pxds);
25726 return -ENOMEM;
25727 }
25728
25729@@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
25730 * preallocate which never got a corresponding vma will need to be
25731 * freed manually.
25732 */
25733-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25734+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25735 {
25736 int i;
25737
25738- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25739+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25740 pgd_t pgd = pgdp[i];
25741
25742 if (pgd_val(pgd) != 0) {
25743- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25744+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25745
25746- pgdp[i] = native_make_pgd(0);
25747+ set_pgd(pgdp + i, native_make_pgd(0));
25748
25749- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25750- pmd_free(mm, pmd);
25751+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25752+ pxd_free(mm, pxd);
25753 }
25754 }
25755 }
25756
25757-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25758+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25759 {
25760- pud_t *pud;
25761+ pyd_t *pyd;
25762 unsigned long addr;
25763 int i;
25764
25765- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25766+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25767 return;
25768
25769- pud = pud_offset(pgd, 0);
25770+#ifdef CONFIG_X86_64
25771+ pyd = pyd_offset(mm, 0L);
25772+#else
25773+ pyd = pyd_offset(pgd, 0L);
25774+#endif
25775
25776- for (addr = i = 0; i < PREALLOCATED_PMDS;
25777- i++, pud++, addr += PUD_SIZE) {
25778- pmd_t *pmd = pmds[i];
25779+ for (addr = i = 0; i < PREALLOCATED_PXDS;
25780+ i++, pyd++, addr += PYD_SIZE) {
25781+ pxd_t *pxd = pxds[i];
25782
25783 if (i >= KERNEL_PGD_BOUNDARY)
25784- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25785- sizeof(pmd_t) * PTRS_PER_PMD);
25786+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25787+ sizeof(pxd_t) * PTRS_PER_PMD);
25788
25789- pud_populate(mm, pud, pmd);
25790+ pyd_populate(mm, pyd, pxd);
25791 }
25792 }
25793
25794 pgd_t *pgd_alloc(struct mm_struct *mm)
25795 {
25796 pgd_t *pgd;
25797- pmd_t *pmds[PREALLOCATED_PMDS];
25798+ pxd_t *pxds[PREALLOCATED_PXDS];
25799
25800 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25801
25802@@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25803
25804 mm->pgd = pgd;
25805
25806- if (preallocate_pmds(pmds) != 0)
25807+ if (preallocate_pxds(pxds) != 0)
25808 goto out_free_pgd;
25809
25810 if (paravirt_pgd_alloc(mm) != 0)
25811- goto out_free_pmds;
25812+ goto out_free_pxds;
25813
25814 /*
25815 * Make sure that pre-populating the pmds is atomic with
25816@@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25817 spin_lock(&pgd_lock);
25818
25819 pgd_ctor(mm, pgd);
25820- pgd_prepopulate_pmd(mm, pgd, pmds);
25821+ pgd_prepopulate_pxd(mm, pgd, pxds);
25822
25823 spin_unlock(&pgd_lock);
25824
25825 return pgd;
25826
25827-out_free_pmds:
25828- free_pmds(pmds);
25829+out_free_pxds:
25830+ free_pxds(pxds);
25831 out_free_pgd:
25832 free_page((unsigned long)pgd);
25833 out:
25834@@ -295,7 +356,7 @@ out:
25835
25836 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25837 {
25838- pgd_mop_up_pmds(mm, pgd);
25839+ pgd_mop_up_pxds(mm, pgd);
25840 pgd_dtor(pgd);
25841 paravirt_pgd_free(mm, pgd);
25842 free_page((unsigned long)pgd);
25843diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25844index a69bcb8..19068ab 100644
25845--- a/arch/x86/mm/pgtable_32.c
25846+++ b/arch/x86/mm/pgtable_32.c
25847@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25848 return;
25849 }
25850 pte = pte_offset_kernel(pmd, vaddr);
25851+
25852+ pax_open_kernel();
25853 if (pte_val(pteval))
25854 set_pte_at(&init_mm, vaddr, pte, pteval);
25855 else
25856 pte_clear(&init_mm, vaddr, pte);
25857+ pax_close_kernel();
25858
25859 /*
25860 * It's enough to flush this one mapping.
25861diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25862index 410531d..0f16030 100644
25863--- a/arch/x86/mm/setup_nx.c
25864+++ b/arch/x86/mm/setup_nx.c
25865@@ -5,8 +5,10 @@
25866 #include <asm/pgtable.h>
25867 #include <asm/proto.h>
25868
25869+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25870 static int disable_nx __cpuinitdata;
25871
25872+#ifndef CONFIG_PAX_PAGEEXEC
25873 /*
25874 * noexec = on|off
25875 *
25876@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
25877 return 0;
25878 }
25879 early_param("noexec", noexec_setup);
25880+#endif
25881+
25882+#endif
25883
25884 void __cpuinit x86_configure_nx(void)
25885 {
25886+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25887 if (cpu_has_nx && !disable_nx)
25888 __supported_pte_mask |= _PAGE_NX;
25889 else
25890+#endif
25891 __supported_pte_mask &= ~_PAGE_NX;
25892 }
25893
25894diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25895index d6c0418..06a0ad5 100644
25896--- a/arch/x86/mm/tlb.c
25897+++ b/arch/x86/mm/tlb.c
25898@@ -65,7 +65,11 @@ void leave_mm(int cpu)
25899 BUG();
25900 cpumask_clear_cpu(cpu,
25901 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
25902+
25903+#ifndef CONFIG_PAX_PER_CPU_PGD
25904 load_cr3(swapper_pg_dir);
25905+#endif
25906+
25907 }
25908 EXPORT_SYMBOL_GPL(leave_mm);
25909
25910diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
25911index 877b9a1..a8ecf42 100644
25912--- a/arch/x86/net/bpf_jit.S
25913+++ b/arch/x86/net/bpf_jit.S
25914@@ -9,6 +9,7 @@
25915 */
25916 #include <linux/linkage.h>
25917 #include <asm/dwarf2.h>
25918+#include <asm/alternative-asm.h>
25919
25920 /*
25921 * Calling convention :
25922@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
25923 jle bpf_slow_path_word
25924 mov (SKBDATA,%rsi),%eax
25925 bswap %eax /* ntohl() */
25926+ pax_force_retaddr
25927 ret
25928
25929 sk_load_half:
25930@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
25931 jle bpf_slow_path_half
25932 movzwl (SKBDATA,%rsi),%eax
25933 rol $8,%ax # ntohs()
25934+ pax_force_retaddr
25935 ret
25936
25937 sk_load_byte:
25938@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
25939 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
25940 jle bpf_slow_path_byte
25941 movzbl (SKBDATA,%rsi),%eax
25942+ pax_force_retaddr
25943 ret
25944
25945 /**
25946@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
25947 movzbl (SKBDATA,%rsi),%ebx
25948 and $15,%bl
25949 shl $2,%bl
25950+ pax_force_retaddr
25951 ret
25952
25953 /* rsi contains offset and can be scratched */
25954@@ -109,6 +114,7 @@ bpf_slow_path_word:
25955 js bpf_error
25956 mov -12(%rbp),%eax
25957 bswap %eax
25958+ pax_force_retaddr
25959 ret
25960
25961 bpf_slow_path_half:
25962@@ -117,12 +123,14 @@ bpf_slow_path_half:
25963 mov -12(%rbp),%ax
25964 rol $8,%ax
25965 movzwl %ax,%eax
25966+ pax_force_retaddr
25967 ret
25968
25969 bpf_slow_path_byte:
25970 bpf_slow_path_common(1)
25971 js bpf_error
25972 movzbl -12(%rbp),%eax
25973+ pax_force_retaddr
25974 ret
25975
25976 bpf_slow_path_byte_msh:
25977@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
25978 and $15,%al
25979 shl $2,%al
25980 xchg %eax,%ebx
25981+ pax_force_retaddr
25982 ret
25983
25984 #define sk_negative_common(SIZE) \
25985@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
25986 sk_negative_common(4)
25987 mov (%rax), %eax
25988 bswap %eax
25989+ pax_force_retaddr
25990 ret
25991
25992 bpf_slow_path_half_neg:
25993@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
25994 mov (%rax),%ax
25995 rol $8,%ax
25996 movzwl %ax,%eax
25997+ pax_force_retaddr
25998 ret
25999
26000 bpf_slow_path_byte_neg:
26001@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
26002 .globl sk_load_byte_negative_offset
26003 sk_negative_common(1)
26004 movzbl (%rax), %eax
26005+ pax_force_retaddr
26006 ret
26007
26008 bpf_slow_path_byte_msh_neg:
26009@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
26010 and $15,%al
26011 shl $2,%al
26012 xchg %eax,%ebx
26013+ pax_force_retaddr
26014 ret
26015
26016 bpf_error:
26017@@ -197,4 +210,5 @@ bpf_error:
26018 xor %eax,%eax
26019 mov -8(%rbp),%rbx
26020 leaveq
26021+ pax_force_retaddr
26022 ret
26023diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
26024index 0597f95..a12c36e 100644
26025--- a/arch/x86/net/bpf_jit_comp.c
26026+++ b/arch/x86/net/bpf_jit_comp.c
26027@@ -120,6 +120,11 @@ static inline void bpf_flush_icache(void *start, void *end)
26028 set_fs(old_fs);
26029 }
26030
26031+struct bpf_jit_work {
26032+ struct work_struct work;
26033+ void *image;
26034+};
26035+
26036 #define CHOOSE_LOAD_FUNC(K, func) \
26037 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
26038
26039@@ -146,6 +151,10 @@ void bpf_jit_compile(struct sk_filter *fp)
26040 if (addrs == NULL)
26041 return;
26042
26043+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
26044+ if (!fp->work)
26045+ goto out;
26046+
26047 /* Before first pass, make a rough estimation of addrs[]
26048 * each bpf instruction is translated to less than 64 bytes
26049 */
26050@@ -589,17 +598,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26051 break;
26052 default:
26053 /* hmm, too complex filter, give up with jit compiler */
26054- goto out;
26055+ goto error;
26056 }
26057 ilen = prog - temp;
26058 if (image) {
26059 if (unlikely(proglen + ilen > oldproglen)) {
26060 pr_err("bpb_jit_compile fatal error\n");
26061- kfree(addrs);
26062- module_free(NULL, image);
26063- return;
26064+ module_free_exec(NULL, image);
26065+ goto error;
26066 }
26067+ pax_open_kernel();
26068 memcpy(image + proglen, temp, ilen);
26069+ pax_close_kernel();
26070 }
26071 proglen += ilen;
26072 addrs[i] = proglen;
26073@@ -620,11 +630,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26074 break;
26075 }
26076 if (proglen == oldproglen) {
26077- image = module_alloc(max_t(unsigned int,
26078- proglen,
26079- sizeof(struct work_struct)));
26080+ image = module_alloc_exec(proglen);
26081 if (!image)
26082- goto out;
26083+ goto error;
26084 }
26085 oldproglen = proglen;
26086 }
26087@@ -640,7 +648,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26088 bpf_flush_icache(image, image + proglen);
26089
26090 fp->bpf_func = (void *)image;
26091- }
26092+ } else
26093+error:
26094+ kfree(fp->work);
26095+
26096 out:
26097 kfree(addrs);
26098 return;
26099@@ -648,18 +659,20 @@ out:
26100
26101 static void jit_free_defer(struct work_struct *arg)
26102 {
26103- module_free(NULL, arg);
26104+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
26105+ kfree(arg);
26106 }
26107
26108 /* run from softirq, we must use a work_struct to call
26109- * module_free() from process context
26110+ * module_free_exec() from process context
26111 */
26112 void bpf_jit_free(struct sk_filter *fp)
26113 {
26114 if (fp->bpf_func != sk_run_filter) {
26115- struct work_struct *work = (struct work_struct *)fp->bpf_func;
26116+ struct work_struct *work = &fp->work->work;
26117
26118 INIT_WORK(work, jit_free_defer);
26119+ fp->work->image = fp->bpf_func;
26120 schedule_work(work);
26121 }
26122 }
26123diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26124index d6aa6e8..266395a 100644
26125--- a/arch/x86/oprofile/backtrace.c
26126+++ b/arch/x86/oprofile/backtrace.c
26127@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
26128 struct stack_frame_ia32 *fp;
26129 unsigned long bytes;
26130
26131- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26132+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26133 if (bytes != sizeof(bufhead))
26134 return NULL;
26135
26136- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
26137+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
26138
26139 oprofile_add_trace(bufhead[0].return_address);
26140
26141@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
26142 struct stack_frame bufhead[2];
26143 unsigned long bytes;
26144
26145- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26146+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26147 if (bytes != sizeof(bufhead))
26148 return NULL;
26149
26150@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26151 {
26152 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
26153
26154- if (!user_mode_vm(regs)) {
26155+ if (!user_mode(regs)) {
26156 unsigned long stack = kernel_stack_pointer(regs);
26157 if (depth)
26158 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26159diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
26160index 140942f..8a5cc55 100644
26161--- a/arch/x86/pci/mrst.c
26162+++ b/arch/x86/pci/mrst.c
26163@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
26164 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
26165 pci_mmcfg_late_init();
26166 pcibios_enable_irq = mrst_pci_irq_enable;
26167- pci_root_ops = pci_mrst_ops;
26168+ pax_open_kernel();
26169+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
26170+ pax_close_kernel();
26171 pci_soc_mode = 1;
26172 /* Continue with standard init */
26173 return 1;
26174diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26175index da8fe05..7ee6704 100644
26176--- a/arch/x86/pci/pcbios.c
26177+++ b/arch/x86/pci/pcbios.c
26178@@ -79,50 +79,93 @@ union bios32 {
26179 static struct {
26180 unsigned long address;
26181 unsigned short segment;
26182-} bios32_indirect = { 0, __KERNEL_CS };
26183+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26184
26185 /*
26186 * Returns the entry point for the given service, NULL on error
26187 */
26188
26189-static unsigned long bios32_service(unsigned long service)
26190+static unsigned long __devinit bios32_service(unsigned long service)
26191 {
26192 unsigned char return_code; /* %al */
26193 unsigned long address; /* %ebx */
26194 unsigned long length; /* %ecx */
26195 unsigned long entry; /* %edx */
26196 unsigned long flags;
26197+ struct desc_struct d, *gdt;
26198
26199 local_irq_save(flags);
26200- __asm__("lcall *(%%edi); cld"
26201+
26202+ gdt = get_cpu_gdt_table(smp_processor_id());
26203+
26204+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26205+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26206+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26207+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26208+
26209+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26210 : "=a" (return_code),
26211 "=b" (address),
26212 "=c" (length),
26213 "=d" (entry)
26214 : "0" (service),
26215 "1" (0),
26216- "D" (&bios32_indirect));
26217+ "D" (&bios32_indirect),
26218+ "r"(__PCIBIOS_DS)
26219+ : "memory");
26220+
26221+ pax_open_kernel();
26222+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26223+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26224+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26225+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26226+ pax_close_kernel();
26227+
26228 local_irq_restore(flags);
26229
26230 switch (return_code) {
26231- case 0:
26232- return address + entry;
26233- case 0x80: /* Not present */
26234- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26235- return 0;
26236- default: /* Shouldn't happen */
26237- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26238- service, return_code);
26239+ case 0: {
26240+ int cpu;
26241+ unsigned char flags;
26242+
26243+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26244+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26245+ printk(KERN_WARNING "bios32_service: not valid\n");
26246 return 0;
26247+ }
26248+ address = address + PAGE_OFFSET;
26249+ length += 16UL; /* some BIOSs underreport this... */
26250+ flags = 4;
26251+ if (length >= 64*1024*1024) {
26252+ length >>= PAGE_SHIFT;
26253+ flags |= 8;
26254+ }
26255+
26256+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26257+ gdt = get_cpu_gdt_table(cpu);
26258+ pack_descriptor(&d, address, length, 0x9b, flags);
26259+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26260+ pack_descriptor(&d, address, length, 0x93, flags);
26261+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26262+ }
26263+ return entry;
26264+ }
26265+ case 0x80: /* Not present */
26266+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26267+ return 0;
26268+ default: /* Shouldn't happen */
26269+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26270+ service, return_code);
26271+ return 0;
26272 }
26273 }
26274
26275 static struct {
26276 unsigned long address;
26277 unsigned short segment;
26278-} pci_indirect = { 0, __KERNEL_CS };
26279+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26280
26281-static int pci_bios_present;
26282+static int pci_bios_present __read_only;
26283
26284 static int __devinit check_pcibios(void)
26285 {
26286@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
26287 unsigned long flags, pcibios_entry;
26288
26289 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26290- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26291+ pci_indirect.address = pcibios_entry;
26292
26293 local_irq_save(flags);
26294- __asm__(
26295- "lcall *(%%edi); cld\n\t"
26296+ __asm__("movw %w6, %%ds\n\t"
26297+ "lcall *%%ss:(%%edi); cld\n\t"
26298+ "push %%ss\n\t"
26299+ "pop %%ds\n\t"
26300 "jc 1f\n\t"
26301 "xor %%ah, %%ah\n"
26302 "1:"
26303@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
26304 "=b" (ebx),
26305 "=c" (ecx)
26306 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26307- "D" (&pci_indirect)
26308+ "D" (&pci_indirect),
26309+ "r" (__PCIBIOS_DS)
26310 : "memory");
26311 local_irq_restore(flags);
26312
26313@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26314
26315 switch (len) {
26316 case 1:
26317- __asm__("lcall *(%%esi); cld\n\t"
26318+ __asm__("movw %w6, %%ds\n\t"
26319+ "lcall *%%ss:(%%esi); cld\n\t"
26320+ "push %%ss\n\t"
26321+ "pop %%ds\n\t"
26322 "jc 1f\n\t"
26323 "xor %%ah, %%ah\n"
26324 "1:"
26325@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26326 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26327 "b" (bx),
26328 "D" ((long)reg),
26329- "S" (&pci_indirect));
26330+ "S" (&pci_indirect),
26331+ "r" (__PCIBIOS_DS));
26332 /*
26333 * Zero-extend the result beyond 8 bits, do not trust the
26334 * BIOS having done it:
26335@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26336 *value &= 0xff;
26337 break;
26338 case 2:
26339- __asm__("lcall *(%%esi); cld\n\t"
26340+ __asm__("movw %w6, %%ds\n\t"
26341+ "lcall *%%ss:(%%esi); cld\n\t"
26342+ "push %%ss\n\t"
26343+ "pop %%ds\n\t"
26344 "jc 1f\n\t"
26345 "xor %%ah, %%ah\n"
26346 "1:"
26347@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26348 : "1" (PCIBIOS_READ_CONFIG_WORD),
26349 "b" (bx),
26350 "D" ((long)reg),
26351- "S" (&pci_indirect));
26352+ "S" (&pci_indirect),
26353+ "r" (__PCIBIOS_DS));
26354 /*
26355 * Zero-extend the result beyond 16 bits, do not trust the
26356 * BIOS having done it:
26357@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26358 *value &= 0xffff;
26359 break;
26360 case 4:
26361- __asm__("lcall *(%%esi); cld\n\t"
26362+ __asm__("movw %w6, %%ds\n\t"
26363+ "lcall *%%ss:(%%esi); cld\n\t"
26364+ "push %%ss\n\t"
26365+ "pop %%ds\n\t"
26366 "jc 1f\n\t"
26367 "xor %%ah, %%ah\n"
26368 "1:"
26369@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26370 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26371 "b" (bx),
26372 "D" ((long)reg),
26373- "S" (&pci_indirect));
26374+ "S" (&pci_indirect),
26375+ "r" (__PCIBIOS_DS));
26376 break;
26377 }
26378
26379@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26380
26381 switch (len) {
26382 case 1:
26383- __asm__("lcall *(%%esi); cld\n\t"
26384+ __asm__("movw %w6, %%ds\n\t"
26385+ "lcall *%%ss:(%%esi); cld\n\t"
26386+ "push %%ss\n\t"
26387+ "pop %%ds\n\t"
26388 "jc 1f\n\t"
26389 "xor %%ah, %%ah\n"
26390 "1:"
26391@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26392 "c" (value),
26393 "b" (bx),
26394 "D" ((long)reg),
26395- "S" (&pci_indirect));
26396+ "S" (&pci_indirect),
26397+ "r" (__PCIBIOS_DS));
26398 break;
26399 case 2:
26400- __asm__("lcall *(%%esi); cld\n\t"
26401+ __asm__("movw %w6, %%ds\n\t"
26402+ "lcall *%%ss:(%%esi); cld\n\t"
26403+ "push %%ss\n\t"
26404+ "pop %%ds\n\t"
26405 "jc 1f\n\t"
26406 "xor %%ah, %%ah\n"
26407 "1:"
26408@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26409 "c" (value),
26410 "b" (bx),
26411 "D" ((long)reg),
26412- "S" (&pci_indirect));
26413+ "S" (&pci_indirect),
26414+ "r" (__PCIBIOS_DS));
26415 break;
26416 case 4:
26417- __asm__("lcall *(%%esi); cld\n\t"
26418+ __asm__("movw %w6, %%ds\n\t"
26419+ "lcall *%%ss:(%%esi); cld\n\t"
26420+ "push %%ss\n\t"
26421+ "pop %%ds\n\t"
26422 "jc 1f\n\t"
26423 "xor %%ah, %%ah\n"
26424 "1:"
26425@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26426 "c" (value),
26427 "b" (bx),
26428 "D" ((long)reg),
26429- "S" (&pci_indirect));
26430+ "S" (&pci_indirect),
26431+ "r" (__PCIBIOS_DS));
26432 break;
26433 }
26434
26435@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26436
26437 DBG("PCI: Fetching IRQ routing table... ");
26438 __asm__("push %%es\n\t"
26439+ "movw %w8, %%ds\n\t"
26440 "push %%ds\n\t"
26441 "pop %%es\n\t"
26442- "lcall *(%%esi); cld\n\t"
26443+ "lcall *%%ss:(%%esi); cld\n\t"
26444 "pop %%es\n\t"
26445+ "push %%ss\n\t"
26446+ "pop %%ds\n"
26447 "jc 1f\n\t"
26448 "xor %%ah, %%ah\n"
26449 "1:"
26450@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26451 "1" (0),
26452 "D" ((long) &opt),
26453 "S" (&pci_indirect),
26454- "m" (opt)
26455+ "m" (opt),
26456+ "r" (__PCIBIOS_DS)
26457 : "memory");
26458 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26459 if (ret & 0xff00)
26460@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26461 {
26462 int ret;
26463
26464- __asm__("lcall *(%%esi); cld\n\t"
26465+ __asm__("movw %w5, %%ds\n\t"
26466+ "lcall *%%ss:(%%esi); cld\n\t"
26467+ "push %%ss\n\t"
26468+ "pop %%ds\n"
26469 "jc 1f\n\t"
26470 "xor %%ah, %%ah\n"
26471 "1:"
26472@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26473 : "0" (PCIBIOS_SET_PCI_HW_INT),
26474 "b" ((dev->bus->number << 8) | dev->devfn),
26475 "c" ((irq << 8) | (pin + 10)),
26476- "S" (&pci_indirect));
26477+ "S" (&pci_indirect),
26478+ "r" (__PCIBIOS_DS));
26479 return !(ret & 0xff00);
26480 }
26481 EXPORT_SYMBOL(pcibios_set_irq_routing);
26482diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
26483index 40e4469..1ab536e 100644
26484--- a/arch/x86/platform/efi/efi_32.c
26485+++ b/arch/x86/platform/efi/efi_32.c
26486@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
26487 {
26488 struct desc_ptr gdt_descr;
26489
26490+#ifdef CONFIG_PAX_KERNEXEC
26491+ struct desc_struct d;
26492+#endif
26493+
26494 local_irq_save(efi_rt_eflags);
26495
26496 load_cr3(initial_page_table);
26497 __flush_tlb_all();
26498
26499+#ifdef CONFIG_PAX_KERNEXEC
26500+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
26501+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26502+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
26503+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26504+#endif
26505+
26506 gdt_descr.address = __pa(get_cpu_gdt_table(0));
26507 gdt_descr.size = GDT_SIZE - 1;
26508 load_gdt(&gdt_descr);
26509@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
26510 {
26511 struct desc_ptr gdt_descr;
26512
26513+#ifdef CONFIG_PAX_KERNEXEC
26514+ struct desc_struct d;
26515+
26516+ memset(&d, 0, sizeof d);
26517+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26518+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26519+#endif
26520+
26521 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
26522 gdt_descr.size = GDT_SIZE - 1;
26523 load_gdt(&gdt_descr);
26524diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
26525index fbe66e6..c5c0dd2 100644
26526--- a/arch/x86/platform/efi/efi_stub_32.S
26527+++ b/arch/x86/platform/efi/efi_stub_32.S
26528@@ -6,7 +6,9 @@
26529 */
26530
26531 #include <linux/linkage.h>
26532+#include <linux/init.h>
26533 #include <asm/page_types.h>
26534+#include <asm/segment.h>
26535
26536 /*
26537 * efi_call_phys(void *, ...) is a function with variable parameters.
26538@@ -20,7 +22,7 @@
26539 * service functions will comply with gcc calling convention, too.
26540 */
26541
26542-.text
26543+__INIT
26544 ENTRY(efi_call_phys)
26545 /*
26546 * 0. The function can only be called in Linux kernel. So CS has been
26547@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
26548 * The mapping of lower virtual memory has been created in prelog and
26549 * epilog.
26550 */
26551- movl $1f, %edx
26552- subl $__PAGE_OFFSET, %edx
26553- jmp *%edx
26554+ movl $(__KERNEXEC_EFI_DS), %edx
26555+ mov %edx, %ds
26556+ mov %edx, %es
26557+ mov %edx, %ss
26558+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
26559 1:
26560
26561 /*
26562@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
26563 * parameter 2, ..., param n. To make things easy, we save the return
26564 * address of efi_call_phys in a global variable.
26565 */
26566- popl %edx
26567- movl %edx, saved_return_addr
26568- /* get the function pointer into ECX*/
26569- popl %ecx
26570- movl %ecx, efi_rt_function_ptr
26571- movl $2f, %edx
26572- subl $__PAGE_OFFSET, %edx
26573- pushl %edx
26574+ popl (saved_return_addr)
26575+ popl (efi_rt_function_ptr)
26576
26577 /*
26578 * 3. Clear PG bit in %CR0.
26579@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
26580 /*
26581 * 5. Call the physical function.
26582 */
26583- jmp *%ecx
26584+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
26585
26586-2:
26587 /*
26588 * 6. After EFI runtime service returns, control will return to
26589 * following instruction. We'd better readjust stack pointer first.
26590@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
26591 movl %cr0, %edx
26592 orl $0x80000000, %edx
26593 movl %edx, %cr0
26594- jmp 1f
26595-1:
26596+
26597 /*
26598 * 8. Now restore the virtual mode from flat mode by
26599 * adding EIP with PAGE_OFFSET.
26600 */
26601- movl $1f, %edx
26602- jmp *%edx
26603+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
26604 1:
26605+ movl $(__KERNEL_DS), %edx
26606+ mov %edx, %ds
26607+ mov %edx, %es
26608+ mov %edx, %ss
26609
26610 /*
26611 * 9. Balance the stack. And because EAX contain the return value,
26612 * we'd better not clobber it.
26613 */
26614- leal efi_rt_function_ptr, %edx
26615- movl (%edx), %ecx
26616- pushl %ecx
26617+ pushl (efi_rt_function_ptr)
26618
26619 /*
26620- * 10. Push the saved return address onto the stack and return.
26621+ * 10. Return to the saved return address.
26622 */
26623- leal saved_return_addr, %edx
26624- movl (%edx), %ecx
26625- pushl %ecx
26626- ret
26627+ jmpl *(saved_return_addr)
26628 ENDPROC(efi_call_phys)
26629 .previous
26630
26631-.data
26632+__INITDATA
26633 saved_return_addr:
26634 .long 0
26635 efi_rt_function_ptr:
26636diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
26637index 4c07cca..2c8427d 100644
26638--- a/arch/x86/platform/efi/efi_stub_64.S
26639+++ b/arch/x86/platform/efi/efi_stub_64.S
26640@@ -7,6 +7,7 @@
26641 */
26642
26643 #include <linux/linkage.h>
26644+#include <asm/alternative-asm.h>
26645
26646 #define SAVE_XMM \
26647 mov %rsp, %rax; \
26648@@ -40,6 +41,7 @@ ENTRY(efi_call0)
26649 call *%rdi
26650 addq $32, %rsp
26651 RESTORE_XMM
26652+ pax_force_retaddr 0, 1
26653 ret
26654 ENDPROC(efi_call0)
26655
26656@@ -50,6 +52,7 @@ ENTRY(efi_call1)
26657 call *%rdi
26658 addq $32, %rsp
26659 RESTORE_XMM
26660+ pax_force_retaddr 0, 1
26661 ret
26662 ENDPROC(efi_call1)
26663
26664@@ -60,6 +63,7 @@ ENTRY(efi_call2)
26665 call *%rdi
26666 addq $32, %rsp
26667 RESTORE_XMM
26668+ pax_force_retaddr 0, 1
26669 ret
26670 ENDPROC(efi_call2)
26671
26672@@ -71,6 +75,7 @@ ENTRY(efi_call3)
26673 call *%rdi
26674 addq $32, %rsp
26675 RESTORE_XMM
26676+ pax_force_retaddr 0, 1
26677 ret
26678 ENDPROC(efi_call3)
26679
26680@@ -83,6 +88,7 @@ ENTRY(efi_call4)
26681 call *%rdi
26682 addq $32, %rsp
26683 RESTORE_XMM
26684+ pax_force_retaddr 0, 1
26685 ret
26686 ENDPROC(efi_call4)
26687
26688@@ -96,6 +102,7 @@ ENTRY(efi_call5)
26689 call *%rdi
26690 addq $48, %rsp
26691 RESTORE_XMM
26692+ pax_force_retaddr 0, 1
26693 ret
26694 ENDPROC(efi_call5)
26695
26696@@ -112,5 +119,6 @@ ENTRY(efi_call6)
26697 call *%rdi
26698 addq $48, %rsp
26699 RESTORE_XMM
26700+ pax_force_retaddr 0, 1
26701 ret
26702 ENDPROC(efi_call6)
26703diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
26704index e31bcd8..f12dc46 100644
26705--- a/arch/x86/platform/mrst/mrst.c
26706+++ b/arch/x86/platform/mrst/mrst.c
26707@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
26708 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
26709 int sfi_mrtc_num;
26710
26711-static void mrst_power_off(void)
26712+static __noreturn void mrst_power_off(void)
26713 {
26714+ BUG();
26715 }
26716
26717-static void mrst_reboot(void)
26718+static __noreturn void mrst_reboot(void)
26719 {
26720 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
26721+ BUG();
26722 }
26723
26724 /* parse all the mtimer info to a static mtimer array */
26725diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26726index 218cdb1..fd55c08 100644
26727--- a/arch/x86/power/cpu.c
26728+++ b/arch/x86/power/cpu.c
26729@@ -132,7 +132,7 @@ static void do_fpu_end(void)
26730 static void fix_processor_context(void)
26731 {
26732 int cpu = smp_processor_id();
26733- struct tss_struct *t = &per_cpu(init_tss, cpu);
26734+ struct tss_struct *t = init_tss + cpu;
26735
26736 set_tss_desc(cpu, t); /*
26737 * This just modifies memory; should not be
26738@@ -142,7 +142,9 @@ static void fix_processor_context(void)
26739 */
26740
26741 #ifdef CONFIG_X86_64
26742+ pax_open_kernel();
26743 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26744+ pax_close_kernel();
26745
26746 syscall_init(); /* This sets MSR_*STAR and related */
26747 #endif
26748diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
26749index b685296..e00eb65 100644
26750--- a/arch/x86/tools/relocs.c
26751+++ b/arch/x86/tools/relocs.c
26752@@ -12,10 +12,13 @@
26753 #include <regex.h>
26754 #include <tools/le_byteshift.h>
26755
26756+#include "../../../include/generated/autoconf.h"
26757+
26758 static void die(char *fmt, ...);
26759
26760 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
26761 static Elf32_Ehdr ehdr;
26762+static Elf32_Phdr *phdr;
26763 static unsigned long reloc_count, reloc_idx;
26764 static unsigned long *relocs;
26765 static unsigned long reloc16_count, reloc16_idx;
26766@@ -323,9 +326,39 @@ static void read_ehdr(FILE *fp)
26767 }
26768 }
26769
26770+static void read_phdrs(FILE *fp)
26771+{
26772+ unsigned int i;
26773+
26774+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
26775+ if (!phdr) {
26776+ die("Unable to allocate %d program headers\n",
26777+ ehdr.e_phnum);
26778+ }
26779+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
26780+ die("Seek to %d failed: %s\n",
26781+ ehdr.e_phoff, strerror(errno));
26782+ }
26783+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
26784+ die("Cannot read ELF program headers: %s\n",
26785+ strerror(errno));
26786+ }
26787+ for(i = 0; i < ehdr.e_phnum; i++) {
26788+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
26789+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
26790+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
26791+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
26792+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
26793+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
26794+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
26795+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
26796+ }
26797+
26798+}
26799+
26800 static void read_shdrs(FILE *fp)
26801 {
26802- int i;
26803+ unsigned int i;
26804 Elf32_Shdr shdr;
26805
26806 secs = calloc(ehdr.e_shnum, sizeof(struct section));
26807@@ -360,7 +393,7 @@ static void read_shdrs(FILE *fp)
26808
26809 static void read_strtabs(FILE *fp)
26810 {
26811- int i;
26812+ unsigned int i;
26813 for (i = 0; i < ehdr.e_shnum; i++) {
26814 struct section *sec = &secs[i];
26815 if (sec->shdr.sh_type != SHT_STRTAB) {
26816@@ -385,7 +418,7 @@ static void read_strtabs(FILE *fp)
26817
26818 static void read_symtabs(FILE *fp)
26819 {
26820- int i,j;
26821+ unsigned int i,j;
26822 for (i = 0; i < ehdr.e_shnum; i++) {
26823 struct section *sec = &secs[i];
26824 if (sec->shdr.sh_type != SHT_SYMTAB) {
26825@@ -418,7 +451,9 @@ static void read_symtabs(FILE *fp)
26826
26827 static void read_relocs(FILE *fp)
26828 {
26829- int i,j;
26830+ unsigned int i,j;
26831+ uint32_t base;
26832+
26833 for (i = 0; i < ehdr.e_shnum; i++) {
26834 struct section *sec = &secs[i];
26835 if (sec->shdr.sh_type != SHT_REL) {
26836@@ -438,9 +473,22 @@ static void read_relocs(FILE *fp)
26837 die("Cannot read symbol table: %s\n",
26838 strerror(errno));
26839 }
26840+ base = 0;
26841+
26842+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
26843+ for (j = 0; j < ehdr.e_phnum; j++) {
26844+ if (phdr[j].p_type != PT_LOAD )
26845+ continue;
26846+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
26847+ continue;
26848+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
26849+ break;
26850+ }
26851+#endif
26852+
26853 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
26854 Elf32_Rel *rel = &sec->reltab[j];
26855- rel->r_offset = elf32_to_cpu(rel->r_offset);
26856+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
26857 rel->r_info = elf32_to_cpu(rel->r_info);
26858 }
26859 }
26860@@ -449,13 +497,13 @@ static void read_relocs(FILE *fp)
26861
26862 static void print_absolute_symbols(void)
26863 {
26864- int i;
26865+ unsigned int i;
26866 printf("Absolute symbols\n");
26867 printf(" Num: Value Size Type Bind Visibility Name\n");
26868 for (i = 0; i < ehdr.e_shnum; i++) {
26869 struct section *sec = &secs[i];
26870 char *sym_strtab;
26871- int j;
26872+ unsigned int j;
26873
26874 if (sec->shdr.sh_type != SHT_SYMTAB) {
26875 continue;
26876@@ -482,14 +530,14 @@ static void print_absolute_symbols(void)
26877
26878 static void print_absolute_relocs(void)
26879 {
26880- int i, printed = 0;
26881+ unsigned int i, printed = 0;
26882
26883 for (i = 0; i < ehdr.e_shnum; i++) {
26884 struct section *sec = &secs[i];
26885 struct section *sec_applies, *sec_symtab;
26886 char *sym_strtab;
26887 Elf32_Sym *sh_symtab;
26888- int j;
26889+ unsigned int j;
26890 if (sec->shdr.sh_type != SHT_REL) {
26891 continue;
26892 }
26893@@ -551,13 +599,13 @@ static void print_absolute_relocs(void)
26894 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26895 int use_real_mode)
26896 {
26897- int i;
26898+ unsigned int i;
26899 /* Walk through the relocations */
26900 for (i = 0; i < ehdr.e_shnum; i++) {
26901 char *sym_strtab;
26902 Elf32_Sym *sh_symtab;
26903 struct section *sec_applies, *sec_symtab;
26904- int j;
26905+ unsigned int j;
26906 struct section *sec = &secs[i];
26907
26908 if (sec->shdr.sh_type != SHT_REL) {
26909@@ -581,6 +629,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26910 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
26911 r_type = ELF32_R_TYPE(rel->r_info);
26912
26913+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
26914+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
26915+ continue;
26916+
26917+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
26918+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
26919+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
26920+ continue;
26921+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
26922+ continue;
26923+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
26924+ continue;
26925+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
26926+ continue;
26927+#endif
26928+
26929 shn_abs = sym->st_shndx == SHN_ABS;
26930
26931 switch (r_type) {
26932@@ -674,7 +738,7 @@ static int write32(unsigned int v, FILE *f)
26933
26934 static void emit_relocs(int as_text, int use_real_mode)
26935 {
26936- int i;
26937+ unsigned int i;
26938 /* Count how many relocations I have and allocate space for them. */
26939 reloc_count = 0;
26940 walk_relocs(count_reloc, use_real_mode);
26941@@ -801,6 +865,7 @@ int main(int argc, char **argv)
26942 fname, strerror(errno));
26943 }
26944 read_ehdr(fp);
26945+ read_phdrs(fp);
26946 read_shdrs(fp);
26947 read_strtabs(fp);
26948 read_symtabs(fp);
26949diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26950index fd14be1..e3c79c0 100644
26951--- a/arch/x86/vdso/Makefile
26952+++ b/arch/x86/vdso/Makefile
26953@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
26954 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
26955 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
26956
26957-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26958+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26959 GCOV_PROFILE := n
26960
26961 #
26962diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26963index 66e6d93..587f435 100644
26964--- a/arch/x86/vdso/vdso32-setup.c
26965+++ b/arch/x86/vdso/vdso32-setup.c
26966@@ -25,6 +25,7 @@
26967 #include <asm/tlbflush.h>
26968 #include <asm/vdso.h>
26969 #include <asm/proto.h>
26970+#include <asm/mman.h>
26971
26972 enum {
26973 VDSO_DISABLED = 0,
26974@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26975 void enable_sep_cpu(void)
26976 {
26977 int cpu = get_cpu();
26978- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26979+ struct tss_struct *tss = init_tss + cpu;
26980
26981 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26982 put_cpu();
26983@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26984 gate_vma.vm_start = FIXADDR_USER_START;
26985 gate_vma.vm_end = FIXADDR_USER_END;
26986 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26987- gate_vma.vm_page_prot = __P101;
26988+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26989
26990 return 0;
26991 }
26992@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26993 if (compat)
26994 addr = VDSO_HIGH_BASE;
26995 else {
26996- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26997+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26998 if (IS_ERR_VALUE(addr)) {
26999 ret = addr;
27000 goto up_fail;
27001 }
27002 }
27003
27004- current->mm->context.vdso = (void *)addr;
27005+ current->mm->context.vdso = addr;
27006
27007 if (compat_uses_vma || !compat) {
27008 /*
27009@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27010 }
27011
27012 current_thread_info()->sysenter_return =
27013- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27014+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27015
27016 up_fail:
27017 if (ret)
27018- current->mm->context.vdso = NULL;
27019+ current->mm->context.vdso = 0;
27020
27021 up_write(&mm->mmap_sem);
27022
27023@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
27024
27025 const char *arch_vma_name(struct vm_area_struct *vma)
27026 {
27027- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
27028+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
27029 return "[vdso]";
27030+
27031+#ifdef CONFIG_PAX_SEGMEXEC
27032+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
27033+ return "[vdso]";
27034+#endif
27035+
27036 return NULL;
27037 }
27038
27039@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
27040 * Check to see if the corresponding task was created in compat vdso
27041 * mode.
27042 */
27043- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
27044+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
27045 return &gate_vma;
27046 return NULL;
27047 }
27048diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
27049index 00aaf04..4a26505 100644
27050--- a/arch/x86/vdso/vma.c
27051+++ b/arch/x86/vdso/vma.c
27052@@ -16,8 +16,6 @@
27053 #include <asm/vdso.h>
27054 #include <asm/page.h>
27055
27056-unsigned int __read_mostly vdso_enabled = 1;
27057-
27058 extern char vdso_start[], vdso_end[];
27059 extern unsigned short vdso_sync_cpuid;
27060
27061@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
27062 * unaligned here as a result of stack start randomization.
27063 */
27064 addr = PAGE_ALIGN(addr);
27065- addr = align_addr(addr, NULL, ALIGN_VDSO);
27066
27067 return addr;
27068 }
27069@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
27070 unsigned size)
27071 {
27072 struct mm_struct *mm = current->mm;
27073- unsigned long addr;
27074+ unsigned long addr = 0;
27075 int ret;
27076
27077- if (!vdso_enabled)
27078- return 0;
27079-
27080 down_write(&mm->mmap_sem);
27081+
27082+#ifdef CONFIG_PAX_RANDMMAP
27083+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27084+#endif
27085+
27086 addr = vdso_addr(mm->start_stack, size);
27087+ addr = align_addr(addr, NULL, ALIGN_VDSO);
27088 addr = get_unmapped_area(NULL, addr, size, 0, 0);
27089 if (IS_ERR_VALUE(addr)) {
27090 ret = addr;
27091 goto up_fail;
27092 }
27093
27094- current->mm->context.vdso = (void *)addr;
27095+ mm->context.vdso = addr;
27096
27097 ret = install_special_mapping(mm, addr, size,
27098 VM_READ|VM_EXEC|
27099 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
27100 pages);
27101- if (ret) {
27102- current->mm->context.vdso = NULL;
27103- goto up_fail;
27104- }
27105+ if (ret)
27106+ mm->context.vdso = 0;
27107
27108 up_fail:
27109 up_write(&mm->mmap_sem);
27110@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27111 vdsox32_size);
27112 }
27113 #endif
27114-
27115-static __init int vdso_setup(char *s)
27116-{
27117- vdso_enabled = simple_strtoul(s, NULL, 0);
27118- return 0;
27119-}
27120-__setup("vdso=", vdso_setup);
27121diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
27122index 40edfc3..b4d80ac 100644
27123--- a/arch/x86/xen/enlighten.c
27124+++ b/arch/x86/xen/enlighten.c
27125@@ -95,8 +95,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
27126
27127 struct shared_info xen_dummy_shared_info;
27128
27129-void *xen_initial_gdt;
27130-
27131 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
27132 __read_mostly int xen_have_vector_callback;
27133 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
27134@@ -1165,30 +1163,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
27135 #endif
27136 };
27137
27138-static void xen_reboot(int reason)
27139+static __noreturn void xen_reboot(int reason)
27140 {
27141 struct sched_shutdown r = { .reason = reason };
27142
27143- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
27144- BUG();
27145+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
27146+ BUG();
27147 }
27148
27149-static void xen_restart(char *msg)
27150+static __noreturn void xen_restart(char *msg)
27151 {
27152 xen_reboot(SHUTDOWN_reboot);
27153 }
27154
27155-static void xen_emergency_restart(void)
27156+static __noreturn void xen_emergency_restart(void)
27157 {
27158 xen_reboot(SHUTDOWN_reboot);
27159 }
27160
27161-static void xen_machine_halt(void)
27162+static __noreturn void xen_machine_halt(void)
27163 {
27164 xen_reboot(SHUTDOWN_poweroff);
27165 }
27166
27167-static void xen_machine_power_off(void)
27168+static __noreturn void xen_machine_power_off(void)
27169 {
27170 if (pm_power_off)
27171 pm_power_off();
27172@@ -1291,7 +1289,17 @@ asmlinkage void __init xen_start_kernel(void)
27173 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27174
27175 /* Work out if we support NX */
27176- x86_configure_nx();
27177+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27178+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
27179+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
27180+ unsigned l, h;
27181+
27182+ __supported_pte_mask |= _PAGE_NX;
27183+ rdmsr(MSR_EFER, l, h);
27184+ l |= EFER_NX;
27185+ wrmsr(MSR_EFER, l, h);
27186+ }
27187+#endif
27188
27189 xen_setup_features();
27190
27191@@ -1322,13 +1330,6 @@ asmlinkage void __init xen_start_kernel(void)
27192
27193 machine_ops = xen_machine_ops;
27194
27195- /*
27196- * The only reliable way to retain the initial address of the
27197- * percpu gdt_page is to remember it here, so we can go and
27198- * mark it RW later, when the initial percpu area is freed.
27199- */
27200- xen_initial_gdt = &per_cpu(gdt_page, 0);
27201-
27202 xen_smp_init();
27203
27204 #ifdef CONFIG_ACPI_NUMA
27205diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27206index 69f5857..0699dc5 100644
27207--- a/arch/x86/xen/mmu.c
27208+++ b/arch/x86/xen/mmu.c
27209@@ -1738,6 +1738,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27210 convert_pfn_mfn(init_level4_pgt);
27211 convert_pfn_mfn(level3_ident_pgt);
27212 convert_pfn_mfn(level3_kernel_pgt);
27213+ convert_pfn_mfn(level3_vmalloc_start_pgt);
27214+ convert_pfn_mfn(level3_vmalloc_end_pgt);
27215+ convert_pfn_mfn(level3_vmemmap_pgt);
27216
27217 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27218 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27219@@ -1756,7 +1759,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27220 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27221 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27222 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27223+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27224+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27225+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27226 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27227+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27228 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27229 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27230
27231@@ -1964,6 +1971,7 @@ static void __init xen_post_allocator_init(void)
27232 pv_mmu_ops.set_pud = xen_set_pud;
27233 #if PAGETABLE_LEVELS == 4
27234 pv_mmu_ops.set_pgd = xen_set_pgd;
27235+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27236 #endif
27237
27238 /* This will work as long as patching hasn't happened yet
27239@@ -2045,6 +2053,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
27240 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27241 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27242 .set_pgd = xen_set_pgd_hyper,
27243+ .set_pgd_batched = xen_set_pgd_hyper,
27244
27245 .alloc_pud = xen_alloc_pmd_init,
27246 .release_pud = xen_release_pmd_init,
27247diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27248index 0503c0c..ceb2d16 100644
27249--- a/arch/x86/xen/smp.c
27250+++ b/arch/x86/xen/smp.c
27251@@ -215,11 +215,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27252 {
27253 BUG_ON(smp_processor_id() != 0);
27254 native_smp_prepare_boot_cpu();
27255-
27256- /* We've switched to the "real" per-cpu gdt, so make sure the
27257- old memory can be recycled */
27258- make_lowmem_page_readwrite(xen_initial_gdt);
27259-
27260 xen_filter_cpu_maps();
27261 xen_setup_vcpu_info_placement();
27262 }
27263@@ -296,12 +291,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27264 gdt = get_cpu_gdt_table(cpu);
27265
27266 ctxt->flags = VGCF_IN_KERNEL;
27267- ctxt->user_regs.ds = __USER_DS;
27268- ctxt->user_regs.es = __USER_DS;
27269+ ctxt->user_regs.ds = __KERNEL_DS;
27270+ ctxt->user_regs.es = __KERNEL_DS;
27271 ctxt->user_regs.ss = __KERNEL_DS;
27272 #ifdef CONFIG_X86_32
27273 ctxt->user_regs.fs = __KERNEL_PERCPU;
27274- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27275+ savesegment(gs, ctxt->user_regs.gs);
27276 #else
27277 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27278 #endif
27279@@ -352,13 +347,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27280 int rc;
27281
27282 per_cpu(current_task, cpu) = idle;
27283+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27284 #ifdef CONFIG_X86_32
27285 irq_ctx_init(cpu);
27286 #else
27287 clear_tsk_thread_flag(idle, TIF_FORK);
27288- per_cpu(kernel_stack, cpu) =
27289- (unsigned long)task_stack_page(idle) -
27290- KERNEL_STACK_OFFSET + THREAD_SIZE;
27291+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27292 #endif
27293 xen_setup_runstate_info(cpu);
27294 xen_setup_timer(cpu);
27295diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27296index b040b0e..8cc4fe0 100644
27297--- a/arch/x86/xen/xen-asm_32.S
27298+++ b/arch/x86/xen/xen-asm_32.S
27299@@ -83,14 +83,14 @@ ENTRY(xen_iret)
27300 ESP_OFFSET=4 # bytes pushed onto stack
27301
27302 /*
27303- * Store vcpu_info pointer for easy access. Do it this way to
27304- * avoid having to reload %fs
27305+ * Store vcpu_info pointer for easy access.
27306 */
27307 #ifdef CONFIG_SMP
27308- GET_THREAD_INFO(%eax)
27309- movl TI_cpu(%eax), %eax
27310- movl __per_cpu_offset(,%eax,4), %eax
27311- mov xen_vcpu(%eax), %eax
27312+ push %fs
27313+ mov $(__KERNEL_PERCPU), %eax
27314+ mov %eax, %fs
27315+ mov PER_CPU_VAR(xen_vcpu), %eax
27316+ pop %fs
27317 #else
27318 movl xen_vcpu, %eax
27319 #endif
27320diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27321index aaa7291..3f77960 100644
27322--- a/arch/x86/xen/xen-head.S
27323+++ b/arch/x86/xen/xen-head.S
27324@@ -19,6 +19,17 @@ ENTRY(startup_xen)
27325 #ifdef CONFIG_X86_32
27326 mov %esi,xen_start_info
27327 mov $init_thread_union+THREAD_SIZE,%esp
27328+#ifdef CONFIG_SMP
27329+ movl $cpu_gdt_table,%edi
27330+ movl $__per_cpu_load,%eax
27331+ movw %ax,__KERNEL_PERCPU + 2(%edi)
27332+ rorl $16,%eax
27333+ movb %al,__KERNEL_PERCPU + 4(%edi)
27334+ movb %ah,__KERNEL_PERCPU + 7(%edi)
27335+ movl $__per_cpu_end - 1,%eax
27336+ subl $__per_cpu_start,%eax
27337+ movw %ax,__KERNEL_PERCPU + 0(%edi)
27338+#endif
27339 #else
27340 mov %rsi,xen_start_info
27341 mov $init_thread_union+THREAD_SIZE,%rsp
27342diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27343index b095739..8c17bcd 100644
27344--- a/arch/x86/xen/xen-ops.h
27345+++ b/arch/x86/xen/xen-ops.h
27346@@ -10,8 +10,6 @@
27347 extern const char xen_hypervisor_callback[];
27348 extern const char xen_failsafe_callback[];
27349
27350-extern void *xen_initial_gdt;
27351-
27352 struct trap_info;
27353 void xen_copy_trap_info(struct trap_info *traps);
27354
27355diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
27356index 525bd3d..ef888b1 100644
27357--- a/arch/xtensa/variants/dc232b/include/variant/core.h
27358+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
27359@@ -119,9 +119,9 @@
27360 ----------------------------------------------------------------------*/
27361
27362 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
27363-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
27364 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
27365 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
27366+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27367
27368 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
27369 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
27370diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
27371index 2f33760..835e50a 100644
27372--- a/arch/xtensa/variants/fsf/include/variant/core.h
27373+++ b/arch/xtensa/variants/fsf/include/variant/core.h
27374@@ -11,6 +11,7 @@
27375 #ifndef _XTENSA_CORE_H
27376 #define _XTENSA_CORE_H
27377
27378+#include <linux/const.h>
27379
27380 /****************************************************************************
27381 Parameters Useful for Any Code, USER or PRIVILEGED
27382@@ -112,9 +113,9 @@
27383 ----------------------------------------------------------------------*/
27384
27385 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27386-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27387 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27388 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27389+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27390
27391 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
27392 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
27393diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
27394index af00795..2bb8105 100644
27395--- a/arch/xtensa/variants/s6000/include/variant/core.h
27396+++ b/arch/xtensa/variants/s6000/include/variant/core.h
27397@@ -11,6 +11,7 @@
27398 #ifndef _XTENSA_CORE_CONFIGURATION_H
27399 #define _XTENSA_CORE_CONFIGURATION_H
27400
27401+#include <linux/const.h>
27402
27403 /****************************************************************************
27404 Parameters Useful for Any Code, USER or PRIVILEGED
27405@@ -118,9 +119,9 @@
27406 ----------------------------------------------------------------------*/
27407
27408 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27409-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27410 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27411 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27412+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27413
27414 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
27415 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
27416diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27417index 58916af..9cb880b 100644
27418--- a/block/blk-iopoll.c
27419+++ b/block/blk-iopoll.c
27420@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27421 }
27422 EXPORT_SYMBOL(blk_iopoll_complete);
27423
27424-static void blk_iopoll_softirq(struct softirq_action *h)
27425+static void blk_iopoll_softirq(void)
27426 {
27427 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27428 int rearm = 0, budget = blk_iopoll_budget;
27429diff --git a/block/blk-map.c b/block/blk-map.c
27430index 623e1cd..ca1e109 100644
27431--- a/block/blk-map.c
27432+++ b/block/blk-map.c
27433@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27434 if (!len || !kbuf)
27435 return -EINVAL;
27436
27437- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
27438+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
27439 if (do_copy)
27440 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27441 else
27442diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27443index 467c8de..4bddc6d 100644
27444--- a/block/blk-softirq.c
27445+++ b/block/blk-softirq.c
27446@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27447 * Softirq action handler - move entries to local list and loop over them
27448 * while passing them to the queue registered handler.
27449 */
27450-static void blk_done_softirq(struct softirq_action *h)
27451+static void blk_done_softirq(void)
27452 {
27453 struct list_head *cpu_list, local_list;
27454
27455diff --git a/block/bsg.c b/block/bsg.c
27456index ff64ae3..593560c 100644
27457--- a/block/bsg.c
27458+++ b/block/bsg.c
27459@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27460 struct sg_io_v4 *hdr, struct bsg_device *bd,
27461 fmode_t has_write_perm)
27462 {
27463+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27464+ unsigned char *cmdptr;
27465+
27466 if (hdr->request_len > BLK_MAX_CDB) {
27467 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27468 if (!rq->cmd)
27469 return -ENOMEM;
27470- }
27471+ cmdptr = rq->cmd;
27472+ } else
27473+ cmdptr = tmpcmd;
27474
27475- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
27476+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27477 hdr->request_len))
27478 return -EFAULT;
27479
27480+ if (cmdptr != rq->cmd)
27481+ memcpy(rq->cmd, cmdptr, hdr->request_len);
27482+
27483 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27484 if (blk_verify_command(rq->cmd, has_write_perm))
27485 return -EPERM;
27486diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27487index 7c668c8..db3521c 100644
27488--- a/block/compat_ioctl.c
27489+++ b/block/compat_ioctl.c
27490@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27491 err |= __get_user(f->spec1, &uf->spec1);
27492 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27493 err |= __get_user(name, &uf->name);
27494- f->name = compat_ptr(name);
27495+ f->name = (void __force_kernel *)compat_ptr(name);
27496 if (err) {
27497 err = -EFAULT;
27498 goto out;
27499diff --git a/block/partitions/efi.c b/block/partitions/efi.c
27500index 6296b40..417c00f 100644
27501--- a/block/partitions/efi.c
27502+++ b/block/partitions/efi.c
27503@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
27504 if (!gpt)
27505 return NULL;
27506
27507+ if (!le32_to_cpu(gpt->num_partition_entries))
27508+ return NULL;
27509+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
27510+ if (!pte)
27511+ return NULL;
27512+
27513 count = le32_to_cpu(gpt->num_partition_entries) *
27514 le32_to_cpu(gpt->sizeof_partition_entry);
27515- if (!count)
27516- return NULL;
27517- pte = kzalloc(count, GFP_KERNEL);
27518- if (!pte)
27519- return NULL;
27520-
27521 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
27522 (u8 *) pte,
27523 count) < count) {
27524diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27525index 260fa80..e8f3caf 100644
27526--- a/block/scsi_ioctl.c
27527+++ b/block/scsi_ioctl.c
27528@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
27529 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27530 struct sg_io_hdr *hdr, fmode_t mode)
27531 {
27532- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27533+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27534+ unsigned char *cmdptr;
27535+
27536+ if (rq->cmd != rq->__cmd)
27537+ cmdptr = rq->cmd;
27538+ else
27539+ cmdptr = tmpcmd;
27540+
27541+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27542 return -EFAULT;
27543+
27544+ if (cmdptr != rq->cmd)
27545+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27546+
27547 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27548 return -EPERM;
27549
27550@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27551 int err;
27552 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27553 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27554+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27555+ unsigned char *cmdptr;
27556
27557 if (!sic)
27558 return -EINVAL;
27559@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27560 */
27561 err = -EFAULT;
27562 rq->cmd_len = cmdlen;
27563- if (copy_from_user(rq->cmd, sic->data, cmdlen))
27564+
27565+ if (rq->cmd != rq->__cmd)
27566+ cmdptr = rq->cmd;
27567+ else
27568+ cmdptr = tmpcmd;
27569+
27570+ if (copy_from_user(cmdptr, sic->data, cmdlen))
27571 goto error;
27572
27573+ if (rq->cmd != cmdptr)
27574+ memcpy(rq->cmd, cmdptr, cmdlen);
27575+
27576 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27577 goto error;
27578
27579diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27580index 671d4d6..5f24030 100644
27581--- a/crypto/cryptd.c
27582+++ b/crypto/cryptd.c
27583@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
27584
27585 struct cryptd_blkcipher_request_ctx {
27586 crypto_completion_t complete;
27587-};
27588+} __no_const;
27589
27590 struct cryptd_hash_ctx {
27591 struct crypto_shash *child;
27592@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
27593
27594 struct cryptd_aead_request_ctx {
27595 crypto_completion_t complete;
27596-};
27597+} __no_const;
27598
27599 static void cryptd_queue_worker(struct work_struct *work);
27600
27601diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
27602index e6defd8..c26a225 100644
27603--- a/drivers/acpi/apei/cper.c
27604+++ b/drivers/acpi/apei/cper.c
27605@@ -38,12 +38,12 @@
27606 */
27607 u64 cper_next_record_id(void)
27608 {
27609- static atomic64_t seq;
27610+ static atomic64_unchecked_t seq;
27611
27612- if (!atomic64_read(&seq))
27613- atomic64_set(&seq, ((u64)get_seconds()) << 32);
27614+ if (!atomic64_read_unchecked(&seq))
27615+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
27616
27617- return atomic64_inc_return(&seq);
27618+ return atomic64_inc_return_unchecked(&seq);
27619 }
27620 EXPORT_SYMBOL_GPL(cper_next_record_id);
27621
27622diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
27623index 7586544..636a2f0 100644
27624--- a/drivers/acpi/ec_sys.c
27625+++ b/drivers/acpi/ec_sys.c
27626@@ -12,6 +12,7 @@
27627 #include <linux/acpi.h>
27628 #include <linux/debugfs.h>
27629 #include <linux/module.h>
27630+#include <linux/uaccess.h>
27631 #include "internal.h"
27632
27633 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
27634@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27635 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
27636 */
27637 unsigned int size = EC_SPACE_SIZE;
27638- u8 *data = (u8 *) buf;
27639+ u8 data;
27640 loff_t init_off = *off;
27641 int err = 0;
27642
27643@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27644 size = count;
27645
27646 while (size) {
27647- err = ec_read(*off, &data[*off - init_off]);
27648+ err = ec_read(*off, &data);
27649 if (err)
27650 return err;
27651+ if (put_user(data, &buf[*off - init_off]))
27652+ return -EFAULT;
27653 *off += 1;
27654 size--;
27655 }
27656@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27657
27658 unsigned int size = count;
27659 loff_t init_off = *off;
27660- u8 *data = (u8 *) buf;
27661 int err = 0;
27662
27663 if (*off >= EC_SPACE_SIZE)
27664@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27665 }
27666
27667 while (size) {
27668- u8 byte_write = data[*off - init_off];
27669+ u8 byte_write;
27670+ if (get_user(byte_write, &buf[*off - init_off]))
27671+ return -EFAULT;
27672 err = ec_write(*off, byte_write);
27673 if (err)
27674 return err;
27675diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27676index 251c7b62..000462d 100644
27677--- a/drivers/acpi/proc.c
27678+++ b/drivers/acpi/proc.c
27679@@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
27680 size_t count, loff_t * ppos)
27681 {
27682 struct list_head *node, *next;
27683- char strbuf[5];
27684- char str[5] = "";
27685- unsigned int len = count;
27686+ char strbuf[5] = {0};
27687
27688- if (len > 4)
27689- len = 4;
27690- if (len < 0)
27691+ if (count > 4)
27692+ count = 4;
27693+ if (copy_from_user(strbuf, buffer, count))
27694 return -EFAULT;
27695-
27696- if (copy_from_user(strbuf, buffer, len))
27697- return -EFAULT;
27698- strbuf[len] = '\0';
27699- sscanf(strbuf, "%s", str);
27700+ strbuf[count] = '\0';
27701
27702 mutex_lock(&acpi_device_lock);
27703 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27704@@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
27705 if (!dev->wakeup.flags.valid)
27706 continue;
27707
27708- if (!strncmp(dev->pnp.bus_id, str, 4)) {
27709+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27710 if (device_can_wakeup(&dev->dev)) {
27711 bool enable = !device_may_wakeup(&dev->dev);
27712 device_set_wakeup_enable(&dev->dev, enable);
27713diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
27714index 0734086..3ad3e4c 100644
27715--- a/drivers/acpi/processor_driver.c
27716+++ b/drivers/acpi/processor_driver.c
27717@@ -556,7 +556,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27718 return 0;
27719 #endif
27720
27721- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27722+ BUG_ON(pr->id >= nr_cpu_ids);
27723
27724 /*
27725 * Buggy BIOS check
27726diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27727index d31ee55..8363a8b 100644
27728--- a/drivers/ata/libata-core.c
27729+++ b/drivers/ata/libata-core.c
27730@@ -4742,7 +4742,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27731 struct ata_port *ap;
27732 unsigned int tag;
27733
27734- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27735+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27736 ap = qc->ap;
27737
27738 qc->flags = 0;
27739@@ -4758,7 +4758,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27740 struct ata_port *ap;
27741 struct ata_link *link;
27742
27743- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27744+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27745 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27746 ap = qc->ap;
27747 link = qc->dev->link;
27748@@ -5822,6 +5822,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27749 return;
27750
27751 spin_lock(&lock);
27752+ pax_open_kernel();
27753
27754 for (cur = ops->inherits; cur; cur = cur->inherits) {
27755 void **inherit = (void **)cur;
27756@@ -5835,8 +5836,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27757 if (IS_ERR(*pp))
27758 *pp = NULL;
27759
27760- ops->inherits = NULL;
27761+ *(struct ata_port_operations **)&ops->inherits = NULL;
27762
27763+ pax_close_kernel();
27764 spin_unlock(&lock);
27765 }
27766
27767diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
27768index 3239517..343b5f6 100644
27769--- a/drivers/ata/pata_arasan_cf.c
27770+++ b/drivers/ata/pata_arasan_cf.c
27771@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
27772 /* Handle platform specific quirks */
27773 if (pdata->quirk) {
27774 if (pdata->quirk & CF_BROKEN_PIO) {
27775- ap->ops->set_piomode = NULL;
27776+ pax_open_kernel();
27777+ *(void **)&ap->ops->set_piomode = NULL;
27778+ pax_close_kernel();
27779 ap->pio_mask = 0;
27780 }
27781 if (pdata->quirk & CF_BROKEN_MWDMA)
27782diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
27783index f9b983a..887b9d8 100644
27784--- a/drivers/atm/adummy.c
27785+++ b/drivers/atm/adummy.c
27786@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
27787 vcc->pop(vcc, skb);
27788 else
27789 dev_kfree_skb_any(skb);
27790- atomic_inc(&vcc->stats->tx);
27791+ atomic_inc_unchecked(&vcc->stats->tx);
27792
27793 return 0;
27794 }
27795diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
27796index f8f41e0..1f987dd 100644
27797--- a/drivers/atm/ambassador.c
27798+++ b/drivers/atm/ambassador.c
27799@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
27800 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
27801
27802 // VC layer stats
27803- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27804+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27805
27806 // free the descriptor
27807 kfree (tx_descr);
27808@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27809 dump_skb ("<<<", vc, skb);
27810
27811 // VC layer stats
27812- atomic_inc(&atm_vcc->stats->rx);
27813+ atomic_inc_unchecked(&atm_vcc->stats->rx);
27814 __net_timestamp(skb);
27815 // end of our responsibility
27816 atm_vcc->push (atm_vcc, skb);
27817@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27818 } else {
27819 PRINTK (KERN_INFO, "dropped over-size frame");
27820 // should we count this?
27821- atomic_inc(&atm_vcc->stats->rx_drop);
27822+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27823 }
27824
27825 } else {
27826@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
27827 }
27828
27829 if (check_area (skb->data, skb->len)) {
27830- atomic_inc(&atm_vcc->stats->tx_err);
27831+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
27832 return -ENOMEM; // ?
27833 }
27834
27835diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
27836index b22d71c..d6e1049 100644
27837--- a/drivers/atm/atmtcp.c
27838+++ b/drivers/atm/atmtcp.c
27839@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27840 if (vcc->pop) vcc->pop(vcc,skb);
27841 else dev_kfree_skb(skb);
27842 if (dev_data) return 0;
27843- atomic_inc(&vcc->stats->tx_err);
27844+ atomic_inc_unchecked(&vcc->stats->tx_err);
27845 return -ENOLINK;
27846 }
27847 size = skb->len+sizeof(struct atmtcp_hdr);
27848@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27849 if (!new_skb) {
27850 if (vcc->pop) vcc->pop(vcc,skb);
27851 else dev_kfree_skb(skb);
27852- atomic_inc(&vcc->stats->tx_err);
27853+ atomic_inc_unchecked(&vcc->stats->tx_err);
27854 return -ENOBUFS;
27855 }
27856 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
27857@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27858 if (vcc->pop) vcc->pop(vcc,skb);
27859 else dev_kfree_skb(skb);
27860 out_vcc->push(out_vcc,new_skb);
27861- atomic_inc(&vcc->stats->tx);
27862- atomic_inc(&out_vcc->stats->rx);
27863+ atomic_inc_unchecked(&vcc->stats->tx);
27864+ atomic_inc_unchecked(&out_vcc->stats->rx);
27865 return 0;
27866 }
27867
27868@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27869 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
27870 read_unlock(&vcc_sklist_lock);
27871 if (!out_vcc) {
27872- atomic_inc(&vcc->stats->tx_err);
27873+ atomic_inc_unchecked(&vcc->stats->tx_err);
27874 goto done;
27875 }
27876 skb_pull(skb,sizeof(struct atmtcp_hdr));
27877@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27878 __net_timestamp(new_skb);
27879 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
27880 out_vcc->push(out_vcc,new_skb);
27881- atomic_inc(&vcc->stats->tx);
27882- atomic_inc(&out_vcc->stats->rx);
27883+ atomic_inc_unchecked(&vcc->stats->tx);
27884+ atomic_inc_unchecked(&out_vcc->stats->rx);
27885 done:
27886 if (vcc->pop) vcc->pop(vcc,skb);
27887 else dev_kfree_skb(skb);
27888diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
27889index 2059ee4..faf51c7 100644
27890--- a/drivers/atm/eni.c
27891+++ b/drivers/atm/eni.c
27892@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
27893 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
27894 vcc->dev->number);
27895 length = 0;
27896- atomic_inc(&vcc->stats->rx_err);
27897+ atomic_inc_unchecked(&vcc->stats->rx_err);
27898 }
27899 else {
27900 length = ATM_CELL_SIZE-1; /* no HEC */
27901@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27902 size);
27903 }
27904 eff = length = 0;
27905- atomic_inc(&vcc->stats->rx_err);
27906+ atomic_inc_unchecked(&vcc->stats->rx_err);
27907 }
27908 else {
27909 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
27910@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27911 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
27912 vcc->dev->number,vcc->vci,length,size << 2,descr);
27913 length = eff = 0;
27914- atomic_inc(&vcc->stats->rx_err);
27915+ atomic_inc_unchecked(&vcc->stats->rx_err);
27916 }
27917 }
27918 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
27919@@ -767,7 +767,7 @@ rx_dequeued++;
27920 vcc->push(vcc,skb);
27921 pushed++;
27922 }
27923- atomic_inc(&vcc->stats->rx);
27924+ atomic_inc_unchecked(&vcc->stats->rx);
27925 }
27926 wake_up(&eni_dev->rx_wait);
27927 }
27928@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
27929 PCI_DMA_TODEVICE);
27930 if (vcc->pop) vcc->pop(vcc,skb);
27931 else dev_kfree_skb_irq(skb);
27932- atomic_inc(&vcc->stats->tx);
27933+ atomic_inc_unchecked(&vcc->stats->tx);
27934 wake_up(&eni_dev->tx_wait);
27935 dma_complete++;
27936 }
27937@@ -1567,7 +1567,7 @@ tx_complete++;
27938 /*--------------------------------- entries ---------------------------------*/
27939
27940
27941-static const char *media_name[] __devinitdata = {
27942+static const char *media_name[] __devinitconst = {
27943 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
27944 "UTP", "05?", "06?", "07?", /* 4- 7 */
27945 "TAXI","09?", "10?", "11?", /* 8-11 */
27946diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
27947index 86fed1b..6dc4721 100644
27948--- a/drivers/atm/firestream.c
27949+++ b/drivers/atm/firestream.c
27950@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
27951 }
27952 }
27953
27954- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27955+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27956
27957 fs_dprintk (FS_DEBUG_TXMEM, "i");
27958 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
27959@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27960 #endif
27961 skb_put (skb, qe->p1 & 0xffff);
27962 ATM_SKB(skb)->vcc = atm_vcc;
27963- atomic_inc(&atm_vcc->stats->rx);
27964+ atomic_inc_unchecked(&atm_vcc->stats->rx);
27965 __net_timestamp(skb);
27966 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
27967 atm_vcc->push (atm_vcc, skb);
27968@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27969 kfree (pe);
27970 }
27971 if (atm_vcc)
27972- atomic_inc(&atm_vcc->stats->rx_drop);
27973+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27974 break;
27975 case 0x1f: /* Reassembly abort: no buffers. */
27976 /* Silently increment error counter. */
27977 if (atm_vcc)
27978- atomic_inc(&atm_vcc->stats->rx_drop);
27979+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27980 break;
27981 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
27982 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
27983diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
27984index 361f5ae..7fc552d 100644
27985--- a/drivers/atm/fore200e.c
27986+++ b/drivers/atm/fore200e.c
27987@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
27988 #endif
27989 /* check error condition */
27990 if (*entry->status & STATUS_ERROR)
27991- atomic_inc(&vcc->stats->tx_err);
27992+ atomic_inc_unchecked(&vcc->stats->tx_err);
27993 else
27994- atomic_inc(&vcc->stats->tx);
27995+ atomic_inc_unchecked(&vcc->stats->tx);
27996 }
27997 }
27998
27999@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
28000 if (skb == NULL) {
28001 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
28002
28003- atomic_inc(&vcc->stats->rx_drop);
28004+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28005 return -ENOMEM;
28006 }
28007
28008@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
28009
28010 dev_kfree_skb_any(skb);
28011
28012- atomic_inc(&vcc->stats->rx_drop);
28013+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28014 return -ENOMEM;
28015 }
28016
28017 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
28018
28019 vcc->push(vcc, skb);
28020- atomic_inc(&vcc->stats->rx);
28021+ atomic_inc_unchecked(&vcc->stats->rx);
28022
28023 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
28024
28025@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
28026 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
28027 fore200e->atm_dev->number,
28028 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
28029- atomic_inc(&vcc->stats->rx_err);
28030+ atomic_inc_unchecked(&vcc->stats->rx_err);
28031 }
28032 }
28033
28034@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
28035 goto retry_here;
28036 }
28037
28038- atomic_inc(&vcc->stats->tx_err);
28039+ atomic_inc_unchecked(&vcc->stats->tx_err);
28040
28041 fore200e->tx_sat++;
28042 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
28043diff --git a/drivers/atm/he.c b/drivers/atm/he.c
28044index b182c2f..1c6fa8a 100644
28045--- a/drivers/atm/he.c
28046+++ b/drivers/atm/he.c
28047@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28048
28049 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
28050 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
28051- atomic_inc(&vcc->stats->rx_drop);
28052+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28053 goto return_host_buffers;
28054 }
28055
28056@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28057 RBRQ_LEN_ERR(he_dev->rbrq_head)
28058 ? "LEN_ERR" : "",
28059 vcc->vpi, vcc->vci);
28060- atomic_inc(&vcc->stats->rx_err);
28061+ atomic_inc_unchecked(&vcc->stats->rx_err);
28062 goto return_host_buffers;
28063 }
28064
28065@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28066 vcc->push(vcc, skb);
28067 spin_lock(&he_dev->global_lock);
28068
28069- atomic_inc(&vcc->stats->rx);
28070+ atomic_inc_unchecked(&vcc->stats->rx);
28071
28072 return_host_buffers:
28073 ++pdus_assembled;
28074@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
28075 tpd->vcc->pop(tpd->vcc, tpd->skb);
28076 else
28077 dev_kfree_skb_any(tpd->skb);
28078- atomic_inc(&tpd->vcc->stats->tx_err);
28079+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
28080 }
28081 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
28082 return;
28083@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28084 vcc->pop(vcc, skb);
28085 else
28086 dev_kfree_skb_any(skb);
28087- atomic_inc(&vcc->stats->tx_err);
28088+ atomic_inc_unchecked(&vcc->stats->tx_err);
28089 return -EINVAL;
28090 }
28091
28092@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28093 vcc->pop(vcc, skb);
28094 else
28095 dev_kfree_skb_any(skb);
28096- atomic_inc(&vcc->stats->tx_err);
28097+ atomic_inc_unchecked(&vcc->stats->tx_err);
28098 return -EINVAL;
28099 }
28100 #endif
28101@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28102 vcc->pop(vcc, skb);
28103 else
28104 dev_kfree_skb_any(skb);
28105- atomic_inc(&vcc->stats->tx_err);
28106+ atomic_inc_unchecked(&vcc->stats->tx_err);
28107 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28108 return -ENOMEM;
28109 }
28110@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28111 vcc->pop(vcc, skb);
28112 else
28113 dev_kfree_skb_any(skb);
28114- atomic_inc(&vcc->stats->tx_err);
28115+ atomic_inc_unchecked(&vcc->stats->tx_err);
28116 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28117 return -ENOMEM;
28118 }
28119@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28120 __enqueue_tpd(he_dev, tpd, cid);
28121 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28122
28123- atomic_inc(&vcc->stats->tx);
28124+ atomic_inc_unchecked(&vcc->stats->tx);
28125
28126 return 0;
28127 }
28128diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
28129index 75fd691..2d20b14 100644
28130--- a/drivers/atm/horizon.c
28131+++ b/drivers/atm/horizon.c
28132@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
28133 {
28134 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
28135 // VC layer stats
28136- atomic_inc(&vcc->stats->rx);
28137+ atomic_inc_unchecked(&vcc->stats->rx);
28138 __net_timestamp(skb);
28139 // end of our responsibility
28140 vcc->push (vcc, skb);
28141@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
28142 dev->tx_iovec = NULL;
28143
28144 // VC layer stats
28145- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28146+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28147
28148 // free the skb
28149 hrz_kfree_skb (skb);
28150diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
28151index 1c05212..c28e200 100644
28152--- a/drivers/atm/idt77252.c
28153+++ b/drivers/atm/idt77252.c
28154@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
28155 else
28156 dev_kfree_skb(skb);
28157
28158- atomic_inc(&vcc->stats->tx);
28159+ atomic_inc_unchecked(&vcc->stats->tx);
28160 }
28161
28162 atomic_dec(&scq->used);
28163@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28164 if ((sb = dev_alloc_skb(64)) == NULL) {
28165 printk("%s: Can't allocate buffers for aal0.\n",
28166 card->name);
28167- atomic_add(i, &vcc->stats->rx_drop);
28168+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
28169 break;
28170 }
28171 if (!atm_charge(vcc, sb->truesize)) {
28172 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
28173 card->name);
28174- atomic_add(i - 1, &vcc->stats->rx_drop);
28175+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
28176 dev_kfree_skb(sb);
28177 break;
28178 }
28179@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28180 ATM_SKB(sb)->vcc = vcc;
28181 __net_timestamp(sb);
28182 vcc->push(vcc, sb);
28183- atomic_inc(&vcc->stats->rx);
28184+ atomic_inc_unchecked(&vcc->stats->rx);
28185
28186 cell += ATM_CELL_PAYLOAD;
28187 }
28188@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28189 "(CDC: %08x)\n",
28190 card->name, len, rpp->len, readl(SAR_REG_CDC));
28191 recycle_rx_pool_skb(card, rpp);
28192- atomic_inc(&vcc->stats->rx_err);
28193+ atomic_inc_unchecked(&vcc->stats->rx_err);
28194 return;
28195 }
28196 if (stat & SAR_RSQE_CRC) {
28197 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
28198 recycle_rx_pool_skb(card, rpp);
28199- atomic_inc(&vcc->stats->rx_err);
28200+ atomic_inc_unchecked(&vcc->stats->rx_err);
28201 return;
28202 }
28203 if (skb_queue_len(&rpp->queue) > 1) {
28204@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28205 RXPRINTK("%s: Can't alloc RX skb.\n",
28206 card->name);
28207 recycle_rx_pool_skb(card, rpp);
28208- atomic_inc(&vcc->stats->rx_err);
28209+ atomic_inc_unchecked(&vcc->stats->rx_err);
28210 return;
28211 }
28212 if (!atm_charge(vcc, skb->truesize)) {
28213@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28214 __net_timestamp(skb);
28215
28216 vcc->push(vcc, skb);
28217- atomic_inc(&vcc->stats->rx);
28218+ atomic_inc_unchecked(&vcc->stats->rx);
28219
28220 return;
28221 }
28222@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28223 __net_timestamp(skb);
28224
28225 vcc->push(vcc, skb);
28226- atomic_inc(&vcc->stats->rx);
28227+ atomic_inc_unchecked(&vcc->stats->rx);
28228
28229 if (skb->truesize > SAR_FB_SIZE_3)
28230 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
28231@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
28232 if (vcc->qos.aal != ATM_AAL0) {
28233 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
28234 card->name, vpi, vci);
28235- atomic_inc(&vcc->stats->rx_drop);
28236+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28237 goto drop;
28238 }
28239
28240 if ((sb = dev_alloc_skb(64)) == NULL) {
28241 printk("%s: Can't allocate buffers for AAL0.\n",
28242 card->name);
28243- atomic_inc(&vcc->stats->rx_err);
28244+ atomic_inc_unchecked(&vcc->stats->rx_err);
28245 goto drop;
28246 }
28247
28248@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
28249 ATM_SKB(sb)->vcc = vcc;
28250 __net_timestamp(sb);
28251 vcc->push(vcc, sb);
28252- atomic_inc(&vcc->stats->rx);
28253+ atomic_inc_unchecked(&vcc->stats->rx);
28254
28255 drop:
28256 skb_pull(queue, 64);
28257@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28258
28259 if (vc == NULL) {
28260 printk("%s: NULL connection in send().\n", card->name);
28261- atomic_inc(&vcc->stats->tx_err);
28262+ atomic_inc_unchecked(&vcc->stats->tx_err);
28263 dev_kfree_skb(skb);
28264 return -EINVAL;
28265 }
28266 if (!test_bit(VCF_TX, &vc->flags)) {
28267 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
28268- atomic_inc(&vcc->stats->tx_err);
28269+ atomic_inc_unchecked(&vcc->stats->tx_err);
28270 dev_kfree_skb(skb);
28271 return -EINVAL;
28272 }
28273@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28274 break;
28275 default:
28276 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
28277- atomic_inc(&vcc->stats->tx_err);
28278+ atomic_inc_unchecked(&vcc->stats->tx_err);
28279 dev_kfree_skb(skb);
28280 return -EINVAL;
28281 }
28282
28283 if (skb_shinfo(skb)->nr_frags != 0) {
28284 printk("%s: No scatter-gather yet.\n", card->name);
28285- atomic_inc(&vcc->stats->tx_err);
28286+ atomic_inc_unchecked(&vcc->stats->tx_err);
28287 dev_kfree_skb(skb);
28288 return -EINVAL;
28289 }
28290@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28291
28292 err = queue_skb(card, vc, skb, oam);
28293 if (err) {
28294- atomic_inc(&vcc->stats->tx_err);
28295+ atomic_inc_unchecked(&vcc->stats->tx_err);
28296 dev_kfree_skb(skb);
28297 return err;
28298 }
28299@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
28300 skb = dev_alloc_skb(64);
28301 if (!skb) {
28302 printk("%s: Out of memory in send_oam().\n", card->name);
28303- atomic_inc(&vcc->stats->tx_err);
28304+ atomic_inc_unchecked(&vcc->stats->tx_err);
28305 return -ENOMEM;
28306 }
28307 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
28308diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
28309index d438601..8b98495 100644
28310--- a/drivers/atm/iphase.c
28311+++ b/drivers/atm/iphase.c
28312@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
28313 status = (u_short) (buf_desc_ptr->desc_mode);
28314 if (status & (RX_CER | RX_PTE | RX_OFL))
28315 {
28316- atomic_inc(&vcc->stats->rx_err);
28317+ atomic_inc_unchecked(&vcc->stats->rx_err);
28318 IF_ERR(printk("IA: bad packet, dropping it");)
28319 if (status & RX_CER) {
28320 IF_ERR(printk(" cause: packet CRC error\n");)
28321@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
28322 len = dma_addr - buf_addr;
28323 if (len > iadev->rx_buf_sz) {
28324 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
28325- atomic_inc(&vcc->stats->rx_err);
28326+ atomic_inc_unchecked(&vcc->stats->rx_err);
28327 goto out_free_desc;
28328 }
28329
28330@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28331 ia_vcc = INPH_IA_VCC(vcc);
28332 if (ia_vcc == NULL)
28333 {
28334- atomic_inc(&vcc->stats->rx_err);
28335+ atomic_inc_unchecked(&vcc->stats->rx_err);
28336 atm_return(vcc, skb->truesize);
28337 dev_kfree_skb_any(skb);
28338 goto INCR_DLE;
28339@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28340 if ((length > iadev->rx_buf_sz) || (length >
28341 (skb->len - sizeof(struct cpcs_trailer))))
28342 {
28343- atomic_inc(&vcc->stats->rx_err);
28344+ atomic_inc_unchecked(&vcc->stats->rx_err);
28345 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
28346 length, skb->len);)
28347 atm_return(vcc, skb->truesize);
28348@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28349
28350 IF_RX(printk("rx_dle_intr: skb push");)
28351 vcc->push(vcc,skb);
28352- atomic_inc(&vcc->stats->rx);
28353+ atomic_inc_unchecked(&vcc->stats->rx);
28354 iadev->rx_pkt_cnt++;
28355 }
28356 INCR_DLE:
28357@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
28358 {
28359 struct k_sonet_stats *stats;
28360 stats = &PRIV(_ia_dev[board])->sonet_stats;
28361- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
28362- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
28363- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
28364- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
28365- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
28366- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
28367- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
28368- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
28369- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
28370+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
28371+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
28372+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
28373+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
28374+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
28375+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
28376+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
28377+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
28378+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
28379 }
28380 ia_cmds.status = 0;
28381 break;
28382@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28383 if ((desc == 0) || (desc > iadev->num_tx_desc))
28384 {
28385 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
28386- atomic_inc(&vcc->stats->tx);
28387+ atomic_inc_unchecked(&vcc->stats->tx);
28388 if (vcc->pop)
28389 vcc->pop(vcc, skb);
28390 else
28391@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28392 ATM_DESC(skb) = vcc->vci;
28393 skb_queue_tail(&iadev->tx_dma_q, skb);
28394
28395- atomic_inc(&vcc->stats->tx);
28396+ atomic_inc_unchecked(&vcc->stats->tx);
28397 iadev->tx_pkt_cnt++;
28398 /* Increment transaction counter */
28399 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
28400
28401 #if 0
28402 /* add flow control logic */
28403- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
28404+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
28405 if (iavcc->vc_desc_cnt > 10) {
28406 vcc->tx_quota = vcc->tx_quota * 3 / 4;
28407 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
28408diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
28409index 68c7588..7036683 100644
28410--- a/drivers/atm/lanai.c
28411+++ b/drivers/atm/lanai.c
28412@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
28413 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
28414 lanai_endtx(lanai, lvcc);
28415 lanai_free_skb(lvcc->tx.atmvcc, skb);
28416- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
28417+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
28418 }
28419
28420 /* Try to fill the buffer - don't call unless there is backlog */
28421@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
28422 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
28423 __net_timestamp(skb);
28424 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
28425- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
28426+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
28427 out:
28428 lvcc->rx.buf.ptr = end;
28429 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
28430@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28431 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
28432 "vcc %d\n", lanai->number, (unsigned int) s, vci);
28433 lanai->stats.service_rxnotaal5++;
28434- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28435+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28436 return 0;
28437 }
28438 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
28439@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28440 int bytes;
28441 read_unlock(&vcc_sklist_lock);
28442 DPRINTK("got trashed rx pdu on vci %d\n", vci);
28443- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28444+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28445 lvcc->stats.x.aal5.service_trash++;
28446 bytes = (SERVICE_GET_END(s) * 16) -
28447 (((unsigned long) lvcc->rx.buf.ptr) -
28448@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28449 }
28450 if (s & SERVICE_STREAM) {
28451 read_unlock(&vcc_sklist_lock);
28452- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28453+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28454 lvcc->stats.x.aal5.service_stream++;
28455 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
28456 "PDU on VCI %d!\n", lanai->number, vci);
28457@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28458 return 0;
28459 }
28460 DPRINTK("got rx crc error on vci %d\n", vci);
28461- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28462+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28463 lvcc->stats.x.aal5.service_rxcrc++;
28464 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
28465 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
28466diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
28467index 1c70c45..300718d 100644
28468--- a/drivers/atm/nicstar.c
28469+++ b/drivers/atm/nicstar.c
28470@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28471 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
28472 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
28473 card->index);
28474- atomic_inc(&vcc->stats->tx_err);
28475+ atomic_inc_unchecked(&vcc->stats->tx_err);
28476 dev_kfree_skb_any(skb);
28477 return -EINVAL;
28478 }
28479@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28480 if (!vc->tx) {
28481 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
28482 card->index);
28483- atomic_inc(&vcc->stats->tx_err);
28484+ atomic_inc_unchecked(&vcc->stats->tx_err);
28485 dev_kfree_skb_any(skb);
28486 return -EINVAL;
28487 }
28488@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28489 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
28490 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
28491 card->index);
28492- atomic_inc(&vcc->stats->tx_err);
28493+ atomic_inc_unchecked(&vcc->stats->tx_err);
28494 dev_kfree_skb_any(skb);
28495 return -EINVAL;
28496 }
28497
28498 if (skb_shinfo(skb)->nr_frags != 0) {
28499 printk("nicstar%d: No scatter-gather yet.\n", card->index);
28500- atomic_inc(&vcc->stats->tx_err);
28501+ atomic_inc_unchecked(&vcc->stats->tx_err);
28502 dev_kfree_skb_any(skb);
28503 return -EINVAL;
28504 }
28505@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28506 }
28507
28508 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
28509- atomic_inc(&vcc->stats->tx_err);
28510+ atomic_inc_unchecked(&vcc->stats->tx_err);
28511 dev_kfree_skb_any(skb);
28512 return -EIO;
28513 }
28514- atomic_inc(&vcc->stats->tx);
28515+ atomic_inc_unchecked(&vcc->stats->tx);
28516
28517 return 0;
28518 }
28519@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28520 printk
28521 ("nicstar%d: Can't allocate buffers for aal0.\n",
28522 card->index);
28523- atomic_add(i, &vcc->stats->rx_drop);
28524+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
28525 break;
28526 }
28527 if (!atm_charge(vcc, sb->truesize)) {
28528 RXPRINTK
28529 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
28530 card->index);
28531- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28532+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28533 dev_kfree_skb_any(sb);
28534 break;
28535 }
28536@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28537 ATM_SKB(sb)->vcc = vcc;
28538 __net_timestamp(sb);
28539 vcc->push(vcc, sb);
28540- atomic_inc(&vcc->stats->rx);
28541+ atomic_inc_unchecked(&vcc->stats->rx);
28542 cell += ATM_CELL_PAYLOAD;
28543 }
28544
28545@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28546 if (iovb == NULL) {
28547 printk("nicstar%d: Out of iovec buffers.\n",
28548 card->index);
28549- atomic_inc(&vcc->stats->rx_drop);
28550+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28551 recycle_rx_buf(card, skb);
28552 return;
28553 }
28554@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28555 small or large buffer itself. */
28556 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
28557 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
28558- atomic_inc(&vcc->stats->rx_err);
28559+ atomic_inc_unchecked(&vcc->stats->rx_err);
28560 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28561 NS_MAX_IOVECS);
28562 NS_PRV_IOVCNT(iovb) = 0;
28563@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28564 ("nicstar%d: Expected a small buffer, and this is not one.\n",
28565 card->index);
28566 which_list(card, skb);
28567- atomic_inc(&vcc->stats->rx_err);
28568+ atomic_inc_unchecked(&vcc->stats->rx_err);
28569 recycle_rx_buf(card, skb);
28570 vc->rx_iov = NULL;
28571 recycle_iov_buf(card, iovb);
28572@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28573 ("nicstar%d: Expected a large buffer, and this is not one.\n",
28574 card->index);
28575 which_list(card, skb);
28576- atomic_inc(&vcc->stats->rx_err);
28577+ atomic_inc_unchecked(&vcc->stats->rx_err);
28578 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28579 NS_PRV_IOVCNT(iovb));
28580 vc->rx_iov = NULL;
28581@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28582 printk(" - PDU size mismatch.\n");
28583 else
28584 printk(".\n");
28585- atomic_inc(&vcc->stats->rx_err);
28586+ atomic_inc_unchecked(&vcc->stats->rx_err);
28587 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28588 NS_PRV_IOVCNT(iovb));
28589 vc->rx_iov = NULL;
28590@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28591 /* skb points to a small buffer */
28592 if (!atm_charge(vcc, skb->truesize)) {
28593 push_rxbufs(card, skb);
28594- atomic_inc(&vcc->stats->rx_drop);
28595+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28596 } else {
28597 skb_put(skb, len);
28598 dequeue_sm_buf(card, skb);
28599@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28600 ATM_SKB(skb)->vcc = vcc;
28601 __net_timestamp(skb);
28602 vcc->push(vcc, skb);
28603- atomic_inc(&vcc->stats->rx);
28604+ atomic_inc_unchecked(&vcc->stats->rx);
28605 }
28606 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
28607 struct sk_buff *sb;
28608@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28609 if (len <= NS_SMBUFSIZE) {
28610 if (!atm_charge(vcc, sb->truesize)) {
28611 push_rxbufs(card, sb);
28612- atomic_inc(&vcc->stats->rx_drop);
28613+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28614 } else {
28615 skb_put(sb, len);
28616 dequeue_sm_buf(card, sb);
28617@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28618 ATM_SKB(sb)->vcc = vcc;
28619 __net_timestamp(sb);
28620 vcc->push(vcc, sb);
28621- atomic_inc(&vcc->stats->rx);
28622+ atomic_inc_unchecked(&vcc->stats->rx);
28623 }
28624
28625 push_rxbufs(card, skb);
28626@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28627
28628 if (!atm_charge(vcc, skb->truesize)) {
28629 push_rxbufs(card, skb);
28630- atomic_inc(&vcc->stats->rx_drop);
28631+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28632 } else {
28633 dequeue_lg_buf(card, skb);
28634 #ifdef NS_USE_DESTRUCTORS
28635@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28636 ATM_SKB(skb)->vcc = vcc;
28637 __net_timestamp(skb);
28638 vcc->push(vcc, skb);
28639- atomic_inc(&vcc->stats->rx);
28640+ atomic_inc_unchecked(&vcc->stats->rx);
28641 }
28642
28643 push_rxbufs(card, sb);
28644@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28645 printk
28646 ("nicstar%d: Out of huge buffers.\n",
28647 card->index);
28648- atomic_inc(&vcc->stats->rx_drop);
28649+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28650 recycle_iovec_rx_bufs(card,
28651 (struct iovec *)
28652 iovb->data,
28653@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28654 card->hbpool.count++;
28655 } else
28656 dev_kfree_skb_any(hb);
28657- atomic_inc(&vcc->stats->rx_drop);
28658+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28659 } else {
28660 /* Copy the small buffer to the huge buffer */
28661 sb = (struct sk_buff *)iov->iov_base;
28662@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28663 #endif /* NS_USE_DESTRUCTORS */
28664 __net_timestamp(hb);
28665 vcc->push(vcc, hb);
28666- atomic_inc(&vcc->stats->rx);
28667+ atomic_inc_unchecked(&vcc->stats->rx);
28668 }
28669 }
28670
28671diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
28672index 9851093..adb2b1e 100644
28673--- a/drivers/atm/solos-pci.c
28674+++ b/drivers/atm/solos-pci.c
28675@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
28676 }
28677 atm_charge(vcc, skb->truesize);
28678 vcc->push(vcc, skb);
28679- atomic_inc(&vcc->stats->rx);
28680+ atomic_inc_unchecked(&vcc->stats->rx);
28681 break;
28682
28683 case PKT_STATUS:
28684@@ -1009,7 +1009,7 @@ static uint32_t fpga_tx(struct solos_card *card)
28685 vcc = SKB_CB(oldskb)->vcc;
28686
28687 if (vcc) {
28688- atomic_inc(&vcc->stats->tx);
28689+ atomic_inc_unchecked(&vcc->stats->tx);
28690 solos_pop(vcc, oldskb);
28691 } else
28692 dev_kfree_skb_irq(oldskb);
28693diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
28694index 0215934..ce9f5b1 100644
28695--- a/drivers/atm/suni.c
28696+++ b/drivers/atm/suni.c
28697@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
28698
28699
28700 #define ADD_LIMITED(s,v) \
28701- atomic_add((v),&stats->s); \
28702- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
28703+ atomic_add_unchecked((v),&stats->s); \
28704+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
28705
28706
28707 static void suni_hz(unsigned long from_timer)
28708diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
28709index 5120a96..e2572bd 100644
28710--- a/drivers/atm/uPD98402.c
28711+++ b/drivers/atm/uPD98402.c
28712@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
28713 struct sonet_stats tmp;
28714 int error = 0;
28715
28716- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28717+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28718 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
28719 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
28720 if (zero && !error) {
28721@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
28722
28723
28724 #define ADD_LIMITED(s,v) \
28725- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
28726- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
28727- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28728+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
28729+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
28730+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28731
28732
28733 static void stat_event(struct atm_dev *dev)
28734@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
28735 if (reason & uPD98402_INT_PFM) stat_event(dev);
28736 if (reason & uPD98402_INT_PCO) {
28737 (void) GET(PCOCR); /* clear interrupt cause */
28738- atomic_add(GET(HECCT),
28739+ atomic_add_unchecked(GET(HECCT),
28740 &PRIV(dev)->sonet_stats.uncorr_hcs);
28741 }
28742 if ((reason & uPD98402_INT_RFO) &&
28743@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
28744 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
28745 uPD98402_INT_LOS),PIMR); /* enable them */
28746 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
28747- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28748- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
28749- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
28750+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28751+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
28752+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
28753 return 0;
28754 }
28755
28756diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
28757index abe4e20..83c4727 100644
28758--- a/drivers/atm/zatm.c
28759+++ b/drivers/atm/zatm.c
28760@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28761 }
28762 if (!size) {
28763 dev_kfree_skb_irq(skb);
28764- if (vcc) atomic_inc(&vcc->stats->rx_err);
28765+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
28766 continue;
28767 }
28768 if (!atm_charge(vcc,skb->truesize)) {
28769@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28770 skb->len = size;
28771 ATM_SKB(skb)->vcc = vcc;
28772 vcc->push(vcc,skb);
28773- atomic_inc(&vcc->stats->rx);
28774+ atomic_inc_unchecked(&vcc->stats->rx);
28775 }
28776 zout(pos & 0xffff,MTA(mbx));
28777 #if 0 /* probably a stupid idea */
28778@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
28779 skb_queue_head(&zatm_vcc->backlog,skb);
28780 break;
28781 }
28782- atomic_inc(&vcc->stats->tx);
28783+ atomic_inc_unchecked(&vcc->stats->tx);
28784 wake_up(&zatm_vcc->tx_wait);
28785 }
28786
28787diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
28788index 8493536..31adee0 100644
28789--- a/drivers/base/devtmpfs.c
28790+++ b/drivers/base/devtmpfs.c
28791@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
28792 if (!thread)
28793 return 0;
28794
28795- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
28796+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
28797 if (err)
28798 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
28799 else
28800diff --git a/drivers/base/node.c b/drivers/base/node.c
28801index 90aa2a1..af1a177 100644
28802--- a/drivers/base/node.c
28803+++ b/drivers/base/node.c
28804@@ -592,11 +592,9 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
28805 {
28806 int n;
28807
28808- n = nodelist_scnprintf(buf, PAGE_SIZE, node_states[state]);
28809- if (n > 0 && PAGE_SIZE > n + 1) {
28810- *(buf + n++) = '\n';
28811- *(buf + n++) = '\0';
28812- }
28813+ n = nodelist_scnprintf(buf, PAGE_SIZE-2, node_states[state]);
28814+ buf[n++] = '\n';
28815+ buf[n] = '\0';
28816 return n;
28817 }
28818
28819diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
28820index 2a3e581..3d6a73f 100644
28821--- a/drivers/base/power/wakeup.c
28822+++ b/drivers/base/power/wakeup.c
28823@@ -30,14 +30,14 @@ bool events_check_enabled;
28824 * They need to be modified together atomically, so it's better to use one
28825 * atomic variable to hold them both.
28826 */
28827-static atomic_t combined_event_count = ATOMIC_INIT(0);
28828+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
28829
28830 #define IN_PROGRESS_BITS (sizeof(int) * 4)
28831 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
28832
28833 static void split_counters(unsigned int *cnt, unsigned int *inpr)
28834 {
28835- unsigned int comb = atomic_read(&combined_event_count);
28836+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
28837
28838 *cnt = (comb >> IN_PROGRESS_BITS);
28839 *inpr = comb & MAX_IN_PROGRESS;
28840@@ -379,7 +379,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
28841 ws->last_time = ktime_get();
28842
28843 /* Increment the counter of events in progress. */
28844- atomic_inc(&combined_event_count);
28845+ atomic_inc_unchecked(&combined_event_count);
28846 }
28847
28848 /**
28849@@ -475,7 +475,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
28850 * Increment the counter of registered wakeup events and decrement the
28851 * couter of wakeup events in progress simultaneously.
28852 */
28853- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
28854+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
28855 }
28856
28857 /**
28858diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
28859index b0f553b..77b928b 100644
28860--- a/drivers/block/cciss.c
28861+++ b/drivers/block/cciss.c
28862@@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
28863 int err;
28864 u32 cp;
28865
28866+ memset(&arg64, 0, sizeof(arg64));
28867+
28868 err = 0;
28869 err |=
28870 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
28871@@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
28872 while (!list_empty(&h->reqQ)) {
28873 c = list_entry(h->reqQ.next, CommandList_struct, list);
28874 /* can't do anything if fifo is full */
28875- if ((h->access.fifo_full(h))) {
28876+ if ((h->access->fifo_full(h))) {
28877 dev_warn(&h->pdev->dev, "fifo full\n");
28878 break;
28879 }
28880@@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
28881 h->Qdepth--;
28882
28883 /* Tell the controller execute command */
28884- h->access.submit_command(h, c);
28885+ h->access->submit_command(h, c);
28886
28887 /* Put job onto the completed Q */
28888 addQ(&h->cmpQ, c);
28889@@ -3443,17 +3445,17 @@ startio:
28890
28891 static inline unsigned long get_next_completion(ctlr_info_t *h)
28892 {
28893- return h->access.command_completed(h);
28894+ return h->access->command_completed(h);
28895 }
28896
28897 static inline int interrupt_pending(ctlr_info_t *h)
28898 {
28899- return h->access.intr_pending(h);
28900+ return h->access->intr_pending(h);
28901 }
28902
28903 static inline long interrupt_not_for_us(ctlr_info_t *h)
28904 {
28905- return ((h->access.intr_pending(h) == 0) ||
28906+ return ((h->access->intr_pending(h) == 0) ||
28907 (h->interrupts_enabled == 0));
28908 }
28909
28910@@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
28911 u32 a;
28912
28913 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
28914- return h->access.command_completed(h);
28915+ return h->access->command_completed(h);
28916
28917 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
28918 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
28919@@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
28920 trans_support & CFGTBL_Trans_use_short_tags);
28921
28922 /* Change the access methods to the performant access methods */
28923- h->access = SA5_performant_access;
28924+ h->access = &SA5_performant_access;
28925 h->transMethod = CFGTBL_Trans_Performant;
28926
28927 return;
28928@@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
28929 if (prod_index < 0)
28930 return -ENODEV;
28931 h->product_name = products[prod_index].product_name;
28932- h->access = *(products[prod_index].access);
28933+ h->access = products[prod_index].access;
28934
28935 if (cciss_board_disabled(h)) {
28936 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
28937@@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
28938 }
28939
28940 /* make sure the board interrupts are off */
28941- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28942+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
28943 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
28944 if (rc)
28945 goto clean2;
28946@@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
28947 * fake ones to scoop up any residual completions.
28948 */
28949 spin_lock_irqsave(&h->lock, flags);
28950- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28951+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
28952 spin_unlock_irqrestore(&h->lock, flags);
28953 free_irq(h->intr[h->intr_mode], h);
28954 rc = cciss_request_irq(h, cciss_msix_discard_completions,
28955@@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
28956 dev_info(&h->pdev->dev, "Board READY.\n");
28957 dev_info(&h->pdev->dev,
28958 "Waiting for stale completions to drain.\n");
28959- h->access.set_intr_mask(h, CCISS_INTR_ON);
28960+ h->access->set_intr_mask(h, CCISS_INTR_ON);
28961 msleep(10000);
28962- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28963+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
28964
28965 rc = controller_reset_failed(h->cfgtable);
28966 if (rc)
28967@@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
28968 cciss_scsi_setup(h);
28969
28970 /* Turn the interrupts on so we can service requests */
28971- h->access.set_intr_mask(h, CCISS_INTR_ON);
28972+ h->access->set_intr_mask(h, CCISS_INTR_ON);
28973
28974 /* Get the firmware version */
28975 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
28976@@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
28977 kfree(flush_buf);
28978 if (return_code != IO_OK)
28979 dev_warn(&h->pdev->dev, "Error flushing cache\n");
28980- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28981+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
28982 free_irq(h->intr[h->intr_mode], h);
28983 }
28984
28985diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
28986index 7fda30e..eb5dfe0 100644
28987--- a/drivers/block/cciss.h
28988+++ b/drivers/block/cciss.h
28989@@ -101,7 +101,7 @@ struct ctlr_info
28990 /* information about each logical volume */
28991 drive_info_struct *drv[CISS_MAX_LUN];
28992
28993- struct access_method access;
28994+ struct access_method *access;
28995
28996 /* queue and queue Info */
28997 struct list_head reqQ;
28998diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
28999index 9125bbe..eede5c8 100644
29000--- a/drivers/block/cpqarray.c
29001+++ b/drivers/block/cpqarray.c
29002@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
29003 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
29004 goto Enomem4;
29005 }
29006- hba[i]->access.set_intr_mask(hba[i], 0);
29007+ hba[i]->access->set_intr_mask(hba[i], 0);
29008 if (request_irq(hba[i]->intr, do_ida_intr,
29009 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
29010 {
29011@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
29012 add_timer(&hba[i]->timer);
29013
29014 /* Enable IRQ now that spinlock and rate limit timer are set up */
29015- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
29016+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
29017
29018 for(j=0; j<NWD; j++) {
29019 struct gendisk *disk = ida_gendisk[i][j];
29020@@ -694,7 +694,7 @@ DBGINFO(
29021 for(i=0; i<NR_PRODUCTS; i++) {
29022 if (board_id == products[i].board_id) {
29023 c->product_name = products[i].product_name;
29024- c->access = *(products[i].access);
29025+ c->access = products[i].access;
29026 break;
29027 }
29028 }
29029@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
29030 hba[ctlr]->intr = intr;
29031 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
29032 hba[ctlr]->product_name = products[j].product_name;
29033- hba[ctlr]->access = *(products[j].access);
29034+ hba[ctlr]->access = products[j].access;
29035 hba[ctlr]->ctlr = ctlr;
29036 hba[ctlr]->board_id = board_id;
29037 hba[ctlr]->pci_dev = NULL; /* not PCI */
29038@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
29039
29040 while((c = h->reqQ) != NULL) {
29041 /* Can't do anything if we're busy */
29042- if (h->access.fifo_full(h) == 0)
29043+ if (h->access->fifo_full(h) == 0)
29044 return;
29045
29046 /* Get the first entry from the request Q */
29047@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
29048 h->Qdepth--;
29049
29050 /* Tell the controller to do our bidding */
29051- h->access.submit_command(h, c);
29052+ h->access->submit_command(h, c);
29053
29054 /* Get onto the completion Q */
29055 addQ(&h->cmpQ, c);
29056@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
29057 unsigned long flags;
29058 __u32 a,a1;
29059
29060- istat = h->access.intr_pending(h);
29061+ istat = h->access->intr_pending(h);
29062 /* Is this interrupt for us? */
29063 if (istat == 0)
29064 return IRQ_NONE;
29065@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
29066 */
29067 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
29068 if (istat & FIFO_NOT_EMPTY) {
29069- while((a = h->access.command_completed(h))) {
29070+ while((a = h->access->command_completed(h))) {
29071 a1 = a; a &= ~3;
29072 if ((c = h->cmpQ) == NULL)
29073 {
29074@@ -1449,11 +1449,11 @@ static int sendcmd(
29075 /*
29076 * Disable interrupt
29077 */
29078- info_p->access.set_intr_mask(info_p, 0);
29079+ info_p->access->set_intr_mask(info_p, 0);
29080 /* Make sure there is room in the command FIFO */
29081 /* Actually it should be completely empty at this time. */
29082 for (i = 200000; i > 0; i--) {
29083- temp = info_p->access.fifo_full(info_p);
29084+ temp = info_p->access->fifo_full(info_p);
29085 if (temp != 0) {
29086 break;
29087 }
29088@@ -1466,7 +1466,7 @@ DBG(
29089 /*
29090 * Send the cmd
29091 */
29092- info_p->access.submit_command(info_p, c);
29093+ info_p->access->submit_command(info_p, c);
29094 complete = pollcomplete(ctlr);
29095
29096 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
29097@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
29098 * we check the new geometry. Then turn interrupts back on when
29099 * we're done.
29100 */
29101- host->access.set_intr_mask(host, 0);
29102+ host->access->set_intr_mask(host, 0);
29103 getgeometry(ctlr);
29104- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
29105+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
29106
29107 for(i=0; i<NWD; i++) {
29108 struct gendisk *disk = ida_gendisk[ctlr][i];
29109@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
29110 /* Wait (up to 2 seconds) for a command to complete */
29111
29112 for (i = 200000; i > 0; i--) {
29113- done = hba[ctlr]->access.command_completed(hba[ctlr]);
29114+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
29115 if (done == 0) {
29116 udelay(10); /* a short fixed delay */
29117 } else
29118diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
29119index be73e9d..7fbf140 100644
29120--- a/drivers/block/cpqarray.h
29121+++ b/drivers/block/cpqarray.h
29122@@ -99,7 +99,7 @@ struct ctlr_info {
29123 drv_info_t drv[NWD];
29124 struct proc_dir_entry *proc;
29125
29126- struct access_method access;
29127+ struct access_method *access;
29128
29129 cmdlist_t *reqQ;
29130 cmdlist_t *cmpQ;
29131diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
29132index 8d68056..e67050f 100644
29133--- a/drivers/block/drbd/drbd_int.h
29134+++ b/drivers/block/drbd/drbd_int.h
29135@@ -736,7 +736,7 @@ struct drbd_request;
29136 struct drbd_epoch {
29137 struct list_head list;
29138 unsigned int barrier_nr;
29139- atomic_t epoch_size; /* increased on every request added. */
29140+ atomic_unchecked_t epoch_size; /* increased on every request added. */
29141 atomic_t active; /* increased on every req. added, and dec on every finished. */
29142 unsigned long flags;
29143 };
29144@@ -1108,7 +1108,7 @@ struct drbd_conf {
29145 void *int_dig_in;
29146 void *int_dig_vv;
29147 wait_queue_head_t seq_wait;
29148- atomic_t packet_seq;
29149+ atomic_unchecked_t packet_seq;
29150 unsigned int peer_seq;
29151 spinlock_t peer_seq_lock;
29152 unsigned int minor;
29153@@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
29154
29155 static inline void drbd_tcp_cork(struct socket *sock)
29156 {
29157- int __user val = 1;
29158+ int val = 1;
29159 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29160- (char __user *)&val, sizeof(val));
29161+ (char __force_user *)&val, sizeof(val));
29162 }
29163
29164 static inline void drbd_tcp_uncork(struct socket *sock)
29165 {
29166- int __user val = 0;
29167+ int val = 0;
29168 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29169- (char __user *)&val, sizeof(val));
29170+ (char __force_user *)&val, sizeof(val));
29171 }
29172
29173 static inline void drbd_tcp_nodelay(struct socket *sock)
29174 {
29175- int __user val = 1;
29176+ int val = 1;
29177 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
29178- (char __user *)&val, sizeof(val));
29179+ (char __force_user *)&val, sizeof(val));
29180 }
29181
29182 static inline void drbd_tcp_quickack(struct socket *sock)
29183 {
29184- int __user val = 2;
29185+ int val = 2;
29186 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
29187- (char __user *)&val, sizeof(val));
29188+ (char __force_user *)&val, sizeof(val));
29189 }
29190
29191 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
29192diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
29193index 211fc44..c5116f1 100644
29194--- a/drivers/block/drbd/drbd_main.c
29195+++ b/drivers/block/drbd/drbd_main.c
29196@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
29197 p.sector = sector;
29198 p.block_id = block_id;
29199 p.blksize = blksize;
29200- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
29201+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
29202
29203 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
29204 return false;
29205@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
29206 p.sector = cpu_to_be64(req->sector);
29207 p.block_id = (unsigned long)req;
29208 p.seq_num = cpu_to_be32(req->seq_num =
29209- atomic_add_return(1, &mdev->packet_seq));
29210+ atomic_add_return_unchecked(1, &mdev->packet_seq));
29211
29212 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
29213
29214@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
29215 atomic_set(&mdev->unacked_cnt, 0);
29216 atomic_set(&mdev->local_cnt, 0);
29217 atomic_set(&mdev->net_cnt, 0);
29218- atomic_set(&mdev->packet_seq, 0);
29219+ atomic_set_unchecked(&mdev->packet_seq, 0);
29220 atomic_set(&mdev->pp_in_use, 0);
29221 atomic_set(&mdev->pp_in_use_by_net, 0);
29222 atomic_set(&mdev->rs_sect_in, 0);
29223@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
29224 mdev->receiver.t_state);
29225
29226 /* no need to lock it, I'm the only thread alive */
29227- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
29228- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
29229+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
29230+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
29231 mdev->al_writ_cnt =
29232 mdev->bm_writ_cnt =
29233 mdev->read_cnt =
29234diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
29235index 946166e..356b39a 100644
29236--- a/drivers/block/drbd/drbd_nl.c
29237+++ b/drivers/block/drbd/drbd_nl.c
29238@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
29239 module_put(THIS_MODULE);
29240 }
29241
29242-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29243+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29244
29245 static unsigned short *
29246 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
29247@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
29248 cn_reply->id.idx = CN_IDX_DRBD;
29249 cn_reply->id.val = CN_VAL_DRBD;
29250
29251- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29252+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29253 cn_reply->ack = 0; /* not used here. */
29254 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29255 (int)((char *)tl - (char *)reply->tag_list);
29256@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
29257 cn_reply->id.idx = CN_IDX_DRBD;
29258 cn_reply->id.val = CN_VAL_DRBD;
29259
29260- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29261+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29262 cn_reply->ack = 0; /* not used here. */
29263 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29264 (int)((char *)tl - (char *)reply->tag_list);
29265@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
29266 cn_reply->id.idx = CN_IDX_DRBD;
29267 cn_reply->id.val = CN_VAL_DRBD;
29268
29269- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
29270+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
29271 cn_reply->ack = 0; // not used here.
29272 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29273 (int)((char*)tl - (char*)reply->tag_list);
29274@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
29275 cn_reply->id.idx = CN_IDX_DRBD;
29276 cn_reply->id.val = CN_VAL_DRBD;
29277
29278- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29279+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29280 cn_reply->ack = 0; /* not used here. */
29281 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29282 (int)((char *)tl - (char *)reply->tag_list);
29283diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
29284index 43beaca..4a5b1dd 100644
29285--- a/drivers/block/drbd/drbd_receiver.c
29286+++ b/drivers/block/drbd/drbd_receiver.c
29287@@ -894,7 +894,7 @@ retry:
29288 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
29289 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
29290
29291- atomic_set(&mdev->packet_seq, 0);
29292+ atomic_set_unchecked(&mdev->packet_seq, 0);
29293 mdev->peer_seq = 0;
29294
29295 drbd_thread_start(&mdev->asender);
29296@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29297 do {
29298 next_epoch = NULL;
29299
29300- epoch_size = atomic_read(&epoch->epoch_size);
29301+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
29302
29303 switch (ev & ~EV_CLEANUP) {
29304 case EV_PUT:
29305@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29306 rv = FE_DESTROYED;
29307 } else {
29308 epoch->flags = 0;
29309- atomic_set(&epoch->epoch_size, 0);
29310+ atomic_set_unchecked(&epoch->epoch_size, 0);
29311 /* atomic_set(&epoch->active, 0); is already zero */
29312 if (rv == FE_STILL_LIVE)
29313 rv = FE_RECYCLED;
29314@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29315 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
29316 drbd_flush(mdev);
29317
29318- if (atomic_read(&mdev->current_epoch->epoch_size)) {
29319+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29320 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
29321 if (epoch)
29322 break;
29323 }
29324
29325 epoch = mdev->current_epoch;
29326- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
29327+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
29328
29329 D_ASSERT(atomic_read(&epoch->active) == 0);
29330 D_ASSERT(epoch->flags == 0);
29331@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29332 }
29333
29334 epoch->flags = 0;
29335- atomic_set(&epoch->epoch_size, 0);
29336+ atomic_set_unchecked(&epoch->epoch_size, 0);
29337 atomic_set(&epoch->active, 0);
29338
29339 spin_lock(&mdev->epoch_lock);
29340- if (atomic_read(&mdev->current_epoch->epoch_size)) {
29341+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29342 list_add(&epoch->list, &mdev->current_epoch->list);
29343 mdev->current_epoch = epoch;
29344 mdev->epochs++;
29345@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29346 spin_unlock(&mdev->peer_seq_lock);
29347
29348 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
29349- atomic_inc(&mdev->current_epoch->epoch_size);
29350+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
29351 return drbd_drain_block(mdev, data_size);
29352 }
29353
29354@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29355
29356 spin_lock(&mdev->epoch_lock);
29357 e->epoch = mdev->current_epoch;
29358- atomic_inc(&e->epoch->epoch_size);
29359+ atomic_inc_unchecked(&e->epoch->epoch_size);
29360 atomic_inc(&e->epoch->active);
29361 spin_unlock(&mdev->epoch_lock);
29362
29363@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
29364 D_ASSERT(list_empty(&mdev->done_ee));
29365
29366 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
29367- atomic_set(&mdev->current_epoch->epoch_size, 0);
29368+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
29369 D_ASSERT(list_empty(&mdev->current_epoch->list));
29370 }
29371
29372diff --git a/drivers/block/loop.c b/drivers/block/loop.c
29373index bbca966..65e37dd 100644
29374--- a/drivers/block/loop.c
29375+++ b/drivers/block/loop.c
29376@@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
29377 mm_segment_t old_fs = get_fs();
29378
29379 set_fs(get_ds());
29380- bw = file->f_op->write(file, buf, len, &pos);
29381+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
29382 set_fs(old_fs);
29383 if (likely(bw == len))
29384 return 0;
29385diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
29386index ee94686..3e09ad3 100644
29387--- a/drivers/char/Kconfig
29388+++ b/drivers/char/Kconfig
29389@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
29390
29391 config DEVKMEM
29392 bool "/dev/kmem virtual device support"
29393- default y
29394+ default n
29395+ depends on !GRKERNSEC_KMEM
29396 help
29397 Say Y here if you want to support the /dev/kmem device. The
29398 /dev/kmem device is rarely used, but can be used for certain
29399@@ -581,6 +582,7 @@ config DEVPORT
29400 bool
29401 depends on !M68K
29402 depends on ISA || PCI
29403+ depends on !GRKERNSEC_KMEM
29404 default y
29405
29406 source "drivers/s390/char/Kconfig"
29407diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
29408index 2e04433..22afc64 100644
29409--- a/drivers/char/agp/frontend.c
29410+++ b/drivers/char/agp/frontend.c
29411@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
29412 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
29413 return -EFAULT;
29414
29415- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
29416+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
29417 return -EFAULT;
29418
29419 client = agp_find_client_by_pid(reserve.pid);
29420diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
29421index 21cb980..f15107c 100644
29422--- a/drivers/char/genrtc.c
29423+++ b/drivers/char/genrtc.c
29424@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
29425 switch (cmd) {
29426
29427 case RTC_PLL_GET:
29428+ memset(&pll, 0, sizeof(pll));
29429 if (get_rtc_pll(&pll))
29430 return -EINVAL;
29431 else
29432diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
29433index dfd7876..c0b0885 100644
29434--- a/drivers/char/hpet.c
29435+++ b/drivers/char/hpet.c
29436@@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
29437 }
29438
29439 static int
29440-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
29441+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
29442 struct hpet_info *info)
29443 {
29444 struct hpet_timer __iomem *timer;
29445diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
29446index 2c29942..604c5ba 100644
29447--- a/drivers/char/ipmi/ipmi_msghandler.c
29448+++ b/drivers/char/ipmi/ipmi_msghandler.c
29449@@ -420,7 +420,7 @@ struct ipmi_smi {
29450 struct proc_dir_entry *proc_dir;
29451 char proc_dir_name[10];
29452
29453- atomic_t stats[IPMI_NUM_STATS];
29454+ atomic_unchecked_t stats[IPMI_NUM_STATS];
29455
29456 /*
29457 * run_to_completion duplicate of smb_info, smi_info
29458@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
29459
29460
29461 #define ipmi_inc_stat(intf, stat) \
29462- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
29463+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
29464 #define ipmi_get_stat(intf, stat) \
29465- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
29466+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
29467
29468 static int is_lan_addr(struct ipmi_addr *addr)
29469 {
29470@@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
29471 INIT_LIST_HEAD(&intf->cmd_rcvrs);
29472 init_waitqueue_head(&intf->waitq);
29473 for (i = 0; i < IPMI_NUM_STATS; i++)
29474- atomic_set(&intf->stats[i], 0);
29475+ atomic_set_unchecked(&intf->stats[i], 0);
29476
29477 intf->proc_dir = NULL;
29478
29479diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
29480index 1e638ff..a869ef5 100644
29481--- a/drivers/char/ipmi/ipmi_si_intf.c
29482+++ b/drivers/char/ipmi/ipmi_si_intf.c
29483@@ -275,7 +275,7 @@ struct smi_info {
29484 unsigned char slave_addr;
29485
29486 /* Counters and things for the proc filesystem. */
29487- atomic_t stats[SI_NUM_STATS];
29488+ atomic_unchecked_t stats[SI_NUM_STATS];
29489
29490 struct task_struct *thread;
29491
29492@@ -284,9 +284,9 @@ struct smi_info {
29493 };
29494
29495 #define smi_inc_stat(smi, stat) \
29496- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
29497+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
29498 #define smi_get_stat(smi, stat) \
29499- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
29500+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
29501
29502 #define SI_MAX_PARMS 4
29503
29504@@ -3209,7 +3209,7 @@ static int try_smi_init(struct smi_info *new_smi)
29505 atomic_set(&new_smi->req_events, 0);
29506 new_smi->run_to_completion = 0;
29507 for (i = 0; i < SI_NUM_STATS; i++)
29508- atomic_set(&new_smi->stats[i], 0);
29509+ atomic_set_unchecked(&new_smi->stats[i], 0);
29510
29511 new_smi->interrupt_disabled = 1;
29512 atomic_set(&new_smi->stop_operation, 0);
29513diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
29514index 47ff7e4..0c7d340 100644
29515--- a/drivers/char/mbcs.c
29516+++ b/drivers/char/mbcs.c
29517@@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
29518 return 0;
29519 }
29520
29521-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
29522+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
29523 {
29524 .part_num = MBCS_PART_NUM,
29525 .mfg_num = MBCS_MFG_NUM,
29526diff --git a/drivers/char/mem.c b/drivers/char/mem.c
29527index d6e9d08..0c314bf 100644
29528--- a/drivers/char/mem.c
29529+++ b/drivers/char/mem.c
29530@@ -18,6 +18,7 @@
29531 #include <linux/raw.h>
29532 #include <linux/tty.h>
29533 #include <linux/capability.h>
29534+#include <linux/security.h>
29535 #include <linux/ptrace.h>
29536 #include <linux/device.h>
29537 #include <linux/highmem.h>
29538@@ -35,6 +36,10 @@
29539 # include <linux/efi.h>
29540 #endif
29541
29542+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29543+extern const struct file_operations grsec_fops;
29544+#endif
29545+
29546 static inline unsigned long size_inside_page(unsigned long start,
29547 unsigned long size)
29548 {
29549@@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29550
29551 while (cursor < to) {
29552 if (!devmem_is_allowed(pfn)) {
29553+#ifdef CONFIG_GRKERNSEC_KMEM
29554+ gr_handle_mem_readwrite(from, to);
29555+#else
29556 printk(KERN_INFO
29557 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
29558 current->comm, from, to);
29559+#endif
29560 return 0;
29561 }
29562 cursor += PAGE_SIZE;
29563@@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29564 }
29565 return 1;
29566 }
29567+#elif defined(CONFIG_GRKERNSEC_KMEM)
29568+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29569+{
29570+ return 0;
29571+}
29572 #else
29573 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29574 {
29575@@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29576
29577 while (count > 0) {
29578 unsigned long remaining;
29579+ char *temp;
29580
29581 sz = size_inside_page(p, count);
29582
29583@@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29584 if (!ptr)
29585 return -EFAULT;
29586
29587- remaining = copy_to_user(buf, ptr, sz);
29588+#ifdef CONFIG_PAX_USERCOPY
29589+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
29590+ if (!temp) {
29591+ unxlate_dev_mem_ptr(p, ptr);
29592+ return -ENOMEM;
29593+ }
29594+ memcpy(temp, ptr, sz);
29595+#else
29596+ temp = ptr;
29597+#endif
29598+
29599+ remaining = copy_to_user(buf, temp, sz);
29600+
29601+#ifdef CONFIG_PAX_USERCOPY
29602+ kfree(temp);
29603+#endif
29604+
29605 unxlate_dev_mem_ptr(p, ptr);
29606 if (remaining)
29607 return -EFAULT;
29608@@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29609 size_t count, loff_t *ppos)
29610 {
29611 unsigned long p = *ppos;
29612- ssize_t low_count, read, sz;
29613+ ssize_t low_count, read, sz, err = 0;
29614 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
29615- int err = 0;
29616
29617 read = 0;
29618 if (p < (unsigned long) high_memory) {
29619@@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29620 }
29621 #endif
29622 while (low_count > 0) {
29623+ char *temp;
29624+
29625 sz = size_inside_page(p, low_count);
29626
29627 /*
29628@@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29629 */
29630 kbuf = xlate_dev_kmem_ptr((char *)p);
29631
29632- if (copy_to_user(buf, kbuf, sz))
29633+#ifdef CONFIG_PAX_USERCOPY
29634+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
29635+ if (!temp)
29636+ return -ENOMEM;
29637+ memcpy(temp, kbuf, sz);
29638+#else
29639+ temp = kbuf;
29640+#endif
29641+
29642+ err = copy_to_user(buf, temp, sz);
29643+
29644+#ifdef CONFIG_PAX_USERCOPY
29645+ kfree(temp);
29646+#endif
29647+
29648+ if (err)
29649 return -EFAULT;
29650 buf += sz;
29651 p += sz;
29652@@ -867,6 +914,9 @@ static const struct memdev {
29653 #ifdef CONFIG_CRASH_DUMP
29654 [12] = { "oldmem", 0, &oldmem_fops, NULL },
29655 #endif
29656+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29657+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
29658+#endif
29659 };
29660
29661 static int memory_open(struct inode *inode, struct file *filp)
29662diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
29663index 9df78e2..01ba9ae 100644
29664--- a/drivers/char/nvram.c
29665+++ b/drivers/char/nvram.c
29666@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
29667
29668 spin_unlock_irq(&rtc_lock);
29669
29670- if (copy_to_user(buf, contents, tmp - contents))
29671+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
29672 return -EFAULT;
29673
29674 *ppos = i;
29675diff --git a/drivers/char/random.c b/drivers/char/random.c
29676index 4ec04a7..9918387 100644
29677--- a/drivers/char/random.c
29678+++ b/drivers/char/random.c
29679@@ -261,8 +261,13 @@
29680 /*
29681 * Configuration information
29682 */
29683+#ifdef CONFIG_GRKERNSEC_RANDNET
29684+#define INPUT_POOL_WORDS 512
29685+#define OUTPUT_POOL_WORDS 128
29686+#else
29687 #define INPUT_POOL_WORDS 128
29688 #define OUTPUT_POOL_WORDS 32
29689+#endif
29690 #define SEC_XFER_SIZE 512
29691 #define EXTRACT_SIZE 10
29692
29693@@ -300,10 +305,17 @@ static struct poolinfo {
29694 int poolwords;
29695 int tap1, tap2, tap3, tap4, tap5;
29696 } poolinfo_table[] = {
29697+#ifdef CONFIG_GRKERNSEC_RANDNET
29698+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
29699+ { 512, 411, 308, 208, 104, 1 },
29700+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
29701+ { 128, 103, 76, 51, 25, 1 },
29702+#else
29703 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
29704 { 128, 103, 76, 51, 25, 1 },
29705 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
29706 { 32, 26, 20, 14, 7, 1 },
29707+#endif
29708 #if 0
29709 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
29710 { 2048, 1638, 1231, 819, 411, 1 },
29711@@ -726,6 +738,17 @@ void add_disk_randomness(struct gendisk *disk)
29712 }
29713 #endif
29714
29715+#ifdef CONFIG_PAX_LATENT_ENTROPY
29716+u64 latent_entropy;
29717+
29718+__init void transfer_latent_entropy(void)
29719+{
29720+ mix_pool_bytes(&input_pool, &latent_entropy, sizeof(latent_entropy));
29721+ mix_pool_bytes(&nonblocking_pool, &latent_entropy, sizeof(latent_entropy));
29722+// printk(KERN_INFO "PAX: transferring latent entropy: %16llx\n", latent_entropy);
29723+}
29724+#endif
29725+
29726 /*********************************************************************
29727 *
29728 * Entropy extraction routines
29729@@ -913,7 +936,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
29730
29731 extract_buf(r, tmp);
29732 i = min_t(int, nbytes, EXTRACT_SIZE);
29733- if (copy_to_user(buf, tmp, i)) {
29734+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
29735 ret = -EFAULT;
29736 break;
29737 }
29738@@ -1238,7 +1261,7 @@ EXPORT_SYMBOL(generate_random_uuid);
29739 #include <linux/sysctl.h>
29740
29741 static int min_read_thresh = 8, min_write_thresh;
29742-static int max_read_thresh = INPUT_POOL_WORDS * 32;
29743+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
29744 static int max_write_thresh = INPUT_POOL_WORDS * 32;
29745 static char sysctl_bootid[16];
29746
29747diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
29748index 45713f0..8286d21 100644
29749--- a/drivers/char/sonypi.c
29750+++ b/drivers/char/sonypi.c
29751@@ -54,6 +54,7 @@
29752
29753 #include <asm/uaccess.h>
29754 #include <asm/io.h>
29755+#include <asm/local.h>
29756
29757 #include <linux/sonypi.h>
29758
29759@@ -490,7 +491,7 @@ static struct sonypi_device {
29760 spinlock_t fifo_lock;
29761 wait_queue_head_t fifo_proc_list;
29762 struct fasync_struct *fifo_async;
29763- int open_count;
29764+ local_t open_count;
29765 int model;
29766 struct input_dev *input_jog_dev;
29767 struct input_dev *input_key_dev;
29768@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
29769 static int sonypi_misc_release(struct inode *inode, struct file *file)
29770 {
29771 mutex_lock(&sonypi_device.lock);
29772- sonypi_device.open_count--;
29773+ local_dec(&sonypi_device.open_count);
29774 mutex_unlock(&sonypi_device.lock);
29775 return 0;
29776 }
29777@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
29778 {
29779 mutex_lock(&sonypi_device.lock);
29780 /* Flush input queue on first open */
29781- if (!sonypi_device.open_count)
29782+ if (!local_read(&sonypi_device.open_count))
29783 kfifo_reset(&sonypi_device.fifo);
29784- sonypi_device.open_count++;
29785+ local_inc(&sonypi_device.open_count);
29786 mutex_unlock(&sonypi_device.lock);
29787
29788 return 0;
29789diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
29790index ad7c732..5aa8054 100644
29791--- a/drivers/char/tpm/tpm.c
29792+++ b/drivers/char/tpm/tpm.c
29793@@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
29794 chip->vendor.req_complete_val)
29795 goto out_recv;
29796
29797- if ((status == chip->vendor.req_canceled)) {
29798+ if (status == chip->vendor.req_canceled) {
29799 dev_err(chip->dev, "Operation Canceled\n");
29800 rc = -ECANCELED;
29801 goto out;
29802diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
29803index 0636520..169c1d0 100644
29804--- a/drivers/char/tpm/tpm_bios.c
29805+++ b/drivers/char/tpm/tpm_bios.c
29806@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
29807 event = addr;
29808
29809 if ((event->event_type == 0 && event->event_size == 0) ||
29810- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
29811+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
29812 return NULL;
29813
29814 return addr;
29815@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
29816 return NULL;
29817
29818 if ((event->event_type == 0 && event->event_size == 0) ||
29819- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
29820+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
29821 return NULL;
29822
29823 (*pos)++;
29824@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
29825 int i;
29826
29827 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
29828- seq_putc(m, data[i]);
29829+ if (!seq_putc(m, data[i]))
29830+ return -EFAULT;
29831
29832 return 0;
29833 }
29834@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
29835 log->bios_event_log_end = log->bios_event_log + len;
29836
29837 virt = acpi_os_map_memory(start, len);
29838+ if (!virt) {
29839+ kfree(log->bios_event_log);
29840+ log->bios_event_log = NULL;
29841+ return -EFAULT;
29842+ }
29843
29844- memcpy(log->bios_event_log, virt, len);
29845+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
29846
29847 acpi_os_unmap_memory(virt, len);
29848 return 0;
29849diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
29850index cdf2f54..e55c197 100644
29851--- a/drivers/char/virtio_console.c
29852+++ b/drivers/char/virtio_console.c
29853@@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
29854 if (to_user) {
29855 ssize_t ret;
29856
29857- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
29858+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
29859 if (ret)
29860 return -EFAULT;
29861 } else {
29862@@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
29863 if (!port_has_data(port) && !port->host_connected)
29864 return 0;
29865
29866- return fill_readbuf(port, ubuf, count, true);
29867+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
29868 }
29869
29870 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
29871diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
29872index 97f5064..202b6e6 100644
29873--- a/drivers/edac/edac_pci_sysfs.c
29874+++ b/drivers/edac/edac_pci_sysfs.c
29875@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
29876 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
29877 static int edac_pci_poll_msec = 1000; /* one second workq period */
29878
29879-static atomic_t pci_parity_count = ATOMIC_INIT(0);
29880-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
29881+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
29882+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
29883
29884 static struct kobject *edac_pci_top_main_kobj;
29885 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
29886@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29887 edac_printk(KERN_CRIT, EDAC_PCI,
29888 "Signaled System Error on %s\n",
29889 pci_name(dev));
29890- atomic_inc(&pci_nonparity_count);
29891+ atomic_inc_unchecked(&pci_nonparity_count);
29892 }
29893
29894 if (status & (PCI_STATUS_PARITY)) {
29895@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29896 "Master Data Parity Error on %s\n",
29897 pci_name(dev));
29898
29899- atomic_inc(&pci_parity_count);
29900+ atomic_inc_unchecked(&pci_parity_count);
29901 }
29902
29903 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29904@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29905 "Detected Parity Error on %s\n",
29906 pci_name(dev));
29907
29908- atomic_inc(&pci_parity_count);
29909+ atomic_inc_unchecked(&pci_parity_count);
29910 }
29911 }
29912
29913@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29914 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
29915 "Signaled System Error on %s\n",
29916 pci_name(dev));
29917- atomic_inc(&pci_nonparity_count);
29918+ atomic_inc_unchecked(&pci_nonparity_count);
29919 }
29920
29921 if (status & (PCI_STATUS_PARITY)) {
29922@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29923 "Master Data Parity Error on "
29924 "%s\n", pci_name(dev));
29925
29926- atomic_inc(&pci_parity_count);
29927+ atomic_inc_unchecked(&pci_parity_count);
29928 }
29929
29930 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29931@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29932 "Detected Parity Error on %s\n",
29933 pci_name(dev));
29934
29935- atomic_inc(&pci_parity_count);
29936+ atomic_inc_unchecked(&pci_parity_count);
29937 }
29938 }
29939 }
29940@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
29941 if (!check_pci_errors)
29942 return;
29943
29944- before_count = atomic_read(&pci_parity_count);
29945+ before_count = atomic_read_unchecked(&pci_parity_count);
29946
29947 /* scan all PCI devices looking for a Parity Error on devices and
29948 * bridges.
29949@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
29950 /* Only if operator has selected panic on PCI Error */
29951 if (edac_pci_get_panic_on_pe()) {
29952 /* If the count is different 'after' from 'before' */
29953- if (before_count != atomic_read(&pci_parity_count))
29954+ if (before_count != atomic_read_unchecked(&pci_parity_count))
29955 panic("EDAC: PCI Parity Error");
29956 }
29957 }
29958diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
29959index c6074c5..88a9e2e 100644
29960--- a/drivers/edac/mce_amd.h
29961+++ b/drivers/edac/mce_amd.h
29962@@ -82,7 +82,7 @@ extern const char * const ii_msgs[];
29963 struct amd_decoder_ops {
29964 bool (*dc_mce)(u16, u8);
29965 bool (*ic_mce)(u16, u8);
29966-};
29967+} __no_const;
29968
29969 void amd_report_gart_errors(bool);
29970 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
29971diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
29972index cc595eb..4ec702a 100644
29973--- a/drivers/firewire/core-card.c
29974+++ b/drivers/firewire/core-card.c
29975@@ -679,7 +679,7 @@ void fw_card_release(struct kref *kref)
29976
29977 void fw_core_remove_card(struct fw_card *card)
29978 {
29979- struct fw_card_driver dummy_driver = dummy_driver_template;
29980+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
29981
29982 card->driver->update_phy_reg(card, 4,
29983 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
29984diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
29985index 2e6b245..c3857d9 100644
29986--- a/drivers/firewire/core-cdev.c
29987+++ b/drivers/firewire/core-cdev.c
29988@@ -1341,8 +1341,7 @@ static int init_iso_resource(struct client *client,
29989 int ret;
29990
29991 if ((request->channels == 0 && request->bandwidth == 0) ||
29992- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29993- request->bandwidth < 0)
29994+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29995 return -EINVAL;
29996
29997 r = kmalloc(sizeof(*r), GFP_KERNEL);
29998diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
29999index dea2dcc..a4fb978 100644
30000--- a/drivers/firewire/core-transaction.c
30001+++ b/drivers/firewire/core-transaction.c
30002@@ -37,6 +37,7 @@
30003 #include <linux/timer.h>
30004 #include <linux/types.h>
30005 #include <linux/workqueue.h>
30006+#include <linux/sched.h>
30007
30008 #include <asm/byteorder.h>
30009
30010diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
30011index 9047f55..e47c7ff 100644
30012--- a/drivers/firewire/core.h
30013+++ b/drivers/firewire/core.h
30014@@ -110,6 +110,7 @@ struct fw_card_driver {
30015
30016 int (*stop_iso)(struct fw_iso_context *ctx);
30017 };
30018+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
30019
30020 void fw_card_initialize(struct fw_card *card,
30021 const struct fw_card_driver *driver, struct device *device);
30022diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
30023index 153980b..4b4d046 100644
30024--- a/drivers/firmware/dmi_scan.c
30025+++ b/drivers/firmware/dmi_scan.c
30026@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
30027 }
30028 }
30029 else {
30030- /*
30031- * no iounmap() for that ioremap(); it would be a no-op, but
30032- * it's so early in setup that sucker gets confused into doing
30033- * what it shouldn't if we actually call it.
30034- */
30035 p = dmi_ioremap(0xF0000, 0x10000);
30036 if (p == NULL)
30037 goto error;
30038@@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
30039 if (buf == NULL)
30040 return -1;
30041
30042- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
30043+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
30044
30045 iounmap(buf);
30046 return 0;
30047diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
30048index 82d5c20..44a7177 100644
30049--- a/drivers/gpio/gpio-vr41xx.c
30050+++ b/drivers/gpio/gpio-vr41xx.c
30051@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
30052 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
30053 maskl, pendl, maskh, pendh);
30054
30055- atomic_inc(&irq_err_count);
30056+ atomic_inc_unchecked(&irq_err_count);
30057
30058 return -EINVAL;
30059 }
30060diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
30061index 8111889..367b253 100644
30062--- a/drivers/gpu/drm/drm_crtc_helper.c
30063+++ b/drivers/gpu/drm/drm_crtc_helper.c
30064@@ -286,7 +286,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
30065 struct drm_crtc *tmp;
30066 int crtc_mask = 1;
30067
30068- WARN(!crtc, "checking null crtc?\n");
30069+ BUG_ON(!crtc);
30070
30071 dev = crtc->dev;
30072
30073diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
30074index 6116e3b..c29dd16 100644
30075--- a/drivers/gpu/drm/drm_drv.c
30076+++ b/drivers/gpu/drm/drm_drv.c
30077@@ -316,7 +316,7 @@ module_exit(drm_core_exit);
30078 /**
30079 * Copy and IOCTL return string to user space
30080 */
30081-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
30082+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
30083 {
30084 int len;
30085
30086@@ -399,7 +399,7 @@ long drm_ioctl(struct file *filp,
30087 return -ENODEV;
30088
30089 atomic_inc(&dev->ioctl_count);
30090- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
30091+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
30092 ++file_priv->ioctl_count;
30093
30094 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
30095diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
30096index 123de28..43a0897 100644
30097--- a/drivers/gpu/drm/drm_fops.c
30098+++ b/drivers/gpu/drm/drm_fops.c
30099@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
30100 }
30101
30102 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
30103- atomic_set(&dev->counts[i], 0);
30104+ atomic_set_unchecked(&dev->counts[i], 0);
30105
30106 dev->sigdata.lock = NULL;
30107
30108@@ -138,8 +138,8 @@ int drm_open(struct inode *inode, struct file *filp)
30109
30110 retcode = drm_open_helper(inode, filp, dev);
30111 if (!retcode) {
30112- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
30113- if (!dev->open_count++)
30114+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
30115+ if (local_inc_return(&dev->open_count) == 1)
30116 retcode = drm_setup(dev);
30117 }
30118 if (!retcode) {
30119@@ -482,7 +482,7 @@ int drm_release(struct inode *inode, struct file *filp)
30120
30121 mutex_lock(&drm_global_mutex);
30122
30123- DRM_DEBUG("open_count = %d\n", dev->open_count);
30124+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
30125
30126 if (dev->driver->preclose)
30127 dev->driver->preclose(dev, file_priv);
30128@@ -491,10 +491,10 @@ int drm_release(struct inode *inode, struct file *filp)
30129 * Begin inline drm_release
30130 */
30131
30132- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
30133+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
30134 task_pid_nr(current),
30135 (long)old_encode_dev(file_priv->minor->device),
30136- dev->open_count);
30137+ local_read(&dev->open_count));
30138
30139 /* Release any auth tokens that might point to this file_priv,
30140 (do that under the drm_global_mutex) */
30141@@ -584,8 +584,8 @@ int drm_release(struct inode *inode, struct file *filp)
30142 * End inline drm_release
30143 */
30144
30145- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
30146- if (!--dev->open_count) {
30147+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
30148+ if (local_dec_and_test(&dev->open_count)) {
30149 if (atomic_read(&dev->ioctl_count)) {
30150 DRM_ERROR("Device busy: %d\n",
30151 atomic_read(&dev->ioctl_count));
30152diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
30153index c87dc96..326055d 100644
30154--- a/drivers/gpu/drm/drm_global.c
30155+++ b/drivers/gpu/drm/drm_global.c
30156@@ -36,7 +36,7 @@
30157 struct drm_global_item {
30158 struct mutex mutex;
30159 void *object;
30160- int refcount;
30161+ atomic_t refcount;
30162 };
30163
30164 static struct drm_global_item glob[DRM_GLOBAL_NUM];
30165@@ -49,7 +49,7 @@ void drm_global_init(void)
30166 struct drm_global_item *item = &glob[i];
30167 mutex_init(&item->mutex);
30168 item->object = NULL;
30169- item->refcount = 0;
30170+ atomic_set(&item->refcount, 0);
30171 }
30172 }
30173
30174@@ -59,7 +59,7 @@ void drm_global_release(void)
30175 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
30176 struct drm_global_item *item = &glob[i];
30177 BUG_ON(item->object != NULL);
30178- BUG_ON(item->refcount != 0);
30179+ BUG_ON(atomic_read(&item->refcount) != 0);
30180 }
30181 }
30182
30183@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30184 void *object;
30185
30186 mutex_lock(&item->mutex);
30187- if (item->refcount == 0) {
30188+ if (atomic_read(&item->refcount) == 0) {
30189 item->object = kzalloc(ref->size, GFP_KERNEL);
30190 if (unlikely(item->object == NULL)) {
30191 ret = -ENOMEM;
30192@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30193 goto out_err;
30194
30195 }
30196- ++item->refcount;
30197+ atomic_inc(&item->refcount);
30198 ref->object = item->object;
30199 object = item->object;
30200 mutex_unlock(&item->mutex);
30201@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
30202 struct drm_global_item *item = &glob[ref->global_type];
30203
30204 mutex_lock(&item->mutex);
30205- BUG_ON(item->refcount == 0);
30206+ BUG_ON(atomic_read(&item->refcount) == 0);
30207 BUG_ON(ref->object != item->object);
30208- if (--item->refcount == 0) {
30209+ if (atomic_dec_and_test(&item->refcount)) {
30210 ref->release(ref);
30211 item->object = NULL;
30212 }
30213diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
30214index ab1162d..42587b2 100644
30215--- a/drivers/gpu/drm/drm_info.c
30216+++ b/drivers/gpu/drm/drm_info.c
30217@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
30218 struct drm_local_map *map;
30219 struct drm_map_list *r_list;
30220
30221- /* Hardcoded from _DRM_FRAME_BUFFER,
30222- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
30223- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
30224- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
30225+ static const char * const types[] = {
30226+ [_DRM_FRAME_BUFFER] = "FB",
30227+ [_DRM_REGISTERS] = "REG",
30228+ [_DRM_SHM] = "SHM",
30229+ [_DRM_AGP] = "AGP",
30230+ [_DRM_SCATTER_GATHER] = "SG",
30231+ [_DRM_CONSISTENT] = "PCI",
30232+ [_DRM_GEM] = "GEM" };
30233 const char *type;
30234 int i;
30235
30236@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
30237 map = r_list->map;
30238 if (!map)
30239 continue;
30240- if (map->type < 0 || map->type > 5)
30241+ if (map->type >= ARRAY_SIZE(types))
30242 type = "??";
30243 else
30244 type = types[map->type];
30245@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
30246 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
30247 vma->vm_flags & VM_LOCKED ? 'l' : '-',
30248 vma->vm_flags & VM_IO ? 'i' : '-',
30249+#ifdef CONFIG_GRKERNSEC_HIDESYM
30250+ 0);
30251+#else
30252 vma->vm_pgoff);
30253+#endif
30254
30255 #if defined(__i386__)
30256 pgprot = pgprot_val(vma->vm_page_prot);
30257diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
30258index 637fcc3..e890b33 100644
30259--- a/drivers/gpu/drm/drm_ioc32.c
30260+++ b/drivers/gpu/drm/drm_ioc32.c
30261@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
30262 request = compat_alloc_user_space(nbytes);
30263 if (!access_ok(VERIFY_WRITE, request, nbytes))
30264 return -EFAULT;
30265- list = (struct drm_buf_desc *) (request + 1);
30266+ list = (struct drm_buf_desc __user *) (request + 1);
30267
30268 if (__put_user(count, &request->count)
30269 || __put_user(list, &request->list))
30270@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
30271 request = compat_alloc_user_space(nbytes);
30272 if (!access_ok(VERIFY_WRITE, request, nbytes))
30273 return -EFAULT;
30274- list = (struct drm_buf_pub *) (request + 1);
30275+ list = (struct drm_buf_pub __user *) (request + 1);
30276
30277 if (__put_user(count, &request->count)
30278 || __put_user(list, &request->list))
30279diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
30280index cf85155..f2665cb 100644
30281--- a/drivers/gpu/drm/drm_ioctl.c
30282+++ b/drivers/gpu/drm/drm_ioctl.c
30283@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
30284 stats->data[i].value =
30285 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
30286 else
30287- stats->data[i].value = atomic_read(&dev->counts[i]);
30288+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
30289 stats->data[i].type = dev->types[i];
30290 }
30291
30292diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
30293index c79c713..2048588 100644
30294--- a/drivers/gpu/drm/drm_lock.c
30295+++ b/drivers/gpu/drm/drm_lock.c
30296@@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30297 if (drm_lock_take(&master->lock, lock->context)) {
30298 master->lock.file_priv = file_priv;
30299 master->lock.lock_time = jiffies;
30300- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
30301+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
30302 break; /* Got lock */
30303 }
30304
30305@@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30306 return -EINVAL;
30307 }
30308
30309- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
30310+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
30311
30312 if (drm_lock_free(&master->lock, lock->context)) {
30313 /* FIXME: Should really bail out here. */
30314diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
30315index aa454f8..6d38580 100644
30316--- a/drivers/gpu/drm/drm_stub.c
30317+++ b/drivers/gpu/drm/drm_stub.c
30318@@ -512,7 +512,7 @@ void drm_unplug_dev(struct drm_device *dev)
30319
30320 drm_device_set_unplugged(dev);
30321
30322- if (dev->open_count == 0) {
30323+ if (local_read(&dev->open_count) == 0) {
30324 drm_put_dev(dev);
30325 }
30326 mutex_unlock(&drm_global_mutex);
30327diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
30328index f920fb5..001c52d 100644
30329--- a/drivers/gpu/drm/i810/i810_dma.c
30330+++ b/drivers/gpu/drm/i810/i810_dma.c
30331@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
30332 dma->buflist[vertex->idx],
30333 vertex->discard, vertex->used);
30334
30335- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30336- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30337+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30338+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30339 sarea_priv->last_enqueue = dev_priv->counter - 1;
30340 sarea_priv->last_dispatch = (int)hw_status[5];
30341
30342@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
30343 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
30344 mc->last_render);
30345
30346- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30347- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30348+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30349+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30350 sarea_priv->last_enqueue = dev_priv->counter - 1;
30351 sarea_priv->last_dispatch = (int)hw_status[5];
30352
30353diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
30354index c9339f4..f5e1b9d 100644
30355--- a/drivers/gpu/drm/i810/i810_drv.h
30356+++ b/drivers/gpu/drm/i810/i810_drv.h
30357@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
30358 int page_flipping;
30359
30360 wait_queue_head_t irq_queue;
30361- atomic_t irq_received;
30362- atomic_t irq_emitted;
30363+ atomic_unchecked_t irq_received;
30364+ atomic_unchecked_t irq_emitted;
30365
30366 int front_offset;
30367 } drm_i810_private_t;
30368diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
30369index e6162a1..b2ff486 100644
30370--- a/drivers/gpu/drm/i915/i915_debugfs.c
30371+++ b/drivers/gpu/drm/i915/i915_debugfs.c
30372@@ -500,7 +500,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
30373 I915_READ(GTIMR));
30374 }
30375 seq_printf(m, "Interrupts received: %d\n",
30376- atomic_read(&dev_priv->irq_received));
30377+ atomic_read_unchecked(&dev_priv->irq_received));
30378 for (i = 0; i < I915_NUM_RINGS; i++) {
30379 if (IS_GEN6(dev) || IS_GEN7(dev)) {
30380 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
30381@@ -1313,7 +1313,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
30382 return ret;
30383
30384 if (opregion->header)
30385- seq_write(m, opregion->header, OPREGION_SIZE);
30386+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
30387
30388 mutex_unlock(&dev->struct_mutex);
30389
30390diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
30391index ba60f3c..e2dff7f 100644
30392--- a/drivers/gpu/drm/i915/i915_dma.c
30393+++ b/drivers/gpu/drm/i915/i915_dma.c
30394@@ -1178,7 +1178,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
30395 bool can_switch;
30396
30397 spin_lock(&dev->count_lock);
30398- can_switch = (dev->open_count == 0);
30399+ can_switch = (local_read(&dev->open_count) == 0);
30400 spin_unlock(&dev->count_lock);
30401 return can_switch;
30402 }
30403diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
30404index 5fabc6c..0b08aa1 100644
30405--- a/drivers/gpu/drm/i915/i915_drv.h
30406+++ b/drivers/gpu/drm/i915/i915_drv.h
30407@@ -240,7 +240,7 @@ struct drm_i915_display_funcs {
30408 /* render clock increase/decrease */
30409 /* display clock increase/decrease */
30410 /* pll clock increase/decrease */
30411-};
30412+} __no_const;
30413
30414 struct intel_device_info {
30415 u8 gen;
30416@@ -350,7 +350,7 @@ typedef struct drm_i915_private {
30417 int current_page;
30418 int page_flipping;
30419
30420- atomic_t irq_received;
30421+ atomic_unchecked_t irq_received;
30422
30423 /* protects the irq masks */
30424 spinlock_t irq_lock;
30425@@ -937,7 +937,7 @@ struct drm_i915_gem_object {
30426 * will be page flipped away on the next vblank. When it
30427 * reaches 0, dev_priv->pending_flip_queue will be woken up.
30428 */
30429- atomic_t pending_flip;
30430+ atomic_unchecked_t pending_flip;
30431 };
30432
30433 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
30434@@ -1359,7 +1359,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
30435 extern void intel_teardown_gmbus(struct drm_device *dev);
30436 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
30437 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
30438-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30439+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30440 {
30441 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
30442 }
30443diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30444index de43194..a14c4cc 100644
30445--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30446+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30447@@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
30448 i915_gem_clflush_object(obj);
30449
30450 if (obj->base.pending_write_domain)
30451- cd->flips |= atomic_read(&obj->pending_flip);
30452+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
30453
30454 /* The actual obj->write_domain will be updated with
30455 * pending_write_domain after we emit the accumulated flush for all
30456@@ -933,9 +933,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
30457
30458 static int
30459 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
30460- int count)
30461+ unsigned int count)
30462 {
30463- int i;
30464+ unsigned int i;
30465
30466 for (i = 0; i < count; i++) {
30467 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
30468diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
30469index 26c67a7..8d4cbcb 100644
30470--- a/drivers/gpu/drm/i915/i915_irq.c
30471+++ b/drivers/gpu/drm/i915/i915_irq.c
30472@@ -496,7 +496,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
30473 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
30474 struct drm_i915_master_private *master_priv;
30475
30476- atomic_inc(&dev_priv->irq_received);
30477+ atomic_inc_unchecked(&dev_priv->irq_received);
30478
30479 /* disable master interrupt before clearing iir */
30480 de_ier = I915_READ(DEIER);
30481@@ -579,7 +579,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
30482 struct drm_i915_master_private *master_priv;
30483 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
30484
30485- atomic_inc(&dev_priv->irq_received);
30486+ atomic_inc_unchecked(&dev_priv->irq_received);
30487
30488 if (IS_GEN6(dev))
30489 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
30490@@ -1291,7 +1291,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
30491 int ret = IRQ_NONE, pipe;
30492 bool blc_event = false;
30493
30494- atomic_inc(&dev_priv->irq_received);
30495+ atomic_inc_unchecked(&dev_priv->irq_received);
30496
30497 iir = I915_READ(IIR);
30498
30499@@ -1802,7 +1802,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
30500 {
30501 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30502
30503- atomic_set(&dev_priv->irq_received, 0);
30504+ atomic_set_unchecked(&dev_priv->irq_received, 0);
30505
30506 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30507 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30508@@ -1979,7 +1979,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
30509 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30510 int pipe;
30511
30512- atomic_set(&dev_priv->irq_received, 0);
30513+ atomic_set_unchecked(&dev_priv->irq_received, 0);
30514
30515 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30516 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30517diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
30518index d4d162f..b49a04e 100644
30519--- a/drivers/gpu/drm/i915/intel_display.c
30520+++ b/drivers/gpu/drm/i915/intel_display.c
30521@@ -2254,7 +2254,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
30522
30523 wait_event(dev_priv->pending_flip_queue,
30524 atomic_read(&dev_priv->mm.wedged) ||
30525- atomic_read(&obj->pending_flip) == 0);
30526+ atomic_read_unchecked(&obj->pending_flip) == 0);
30527
30528 /* Big Hammer, we also need to ensure that any pending
30529 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
30530@@ -2919,7 +2919,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
30531 obj = to_intel_framebuffer(crtc->fb)->obj;
30532 dev_priv = crtc->dev->dev_private;
30533 wait_event(dev_priv->pending_flip_queue,
30534- atomic_read(&obj->pending_flip) == 0);
30535+ atomic_read_unchecked(&obj->pending_flip) == 0);
30536 }
30537
30538 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
30539@@ -7284,9 +7284,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
30540
30541 obj = work->old_fb_obj;
30542
30543- atomic_clear_mask(1 << intel_crtc->plane,
30544- &obj->pending_flip.counter);
30545- if (atomic_read(&obj->pending_flip) == 0)
30546+ atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
30547+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
30548 wake_up(&dev_priv->pending_flip_queue);
30549
30550 schedule_work(&work->work);
30551@@ -7582,7 +7581,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30552 /* Block clients from rendering to the new back buffer until
30553 * the flip occurs and the object is no longer visible.
30554 */
30555- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30556+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30557
30558 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
30559 if (ret)
30560@@ -7596,7 +7595,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30561 return 0;
30562
30563 cleanup_pending:
30564- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30565+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30566 drm_gem_object_unreference(&work->old_fb_obj->base);
30567 drm_gem_object_unreference(&obj->base);
30568 mutex_unlock(&dev->struct_mutex);
30569diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
30570index 54558a0..2d97005 100644
30571--- a/drivers/gpu/drm/mga/mga_drv.h
30572+++ b/drivers/gpu/drm/mga/mga_drv.h
30573@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
30574 u32 clear_cmd;
30575 u32 maccess;
30576
30577- atomic_t vbl_received; /**< Number of vblanks received. */
30578+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
30579 wait_queue_head_t fence_queue;
30580- atomic_t last_fence_retired;
30581+ atomic_unchecked_t last_fence_retired;
30582 u32 next_fence_to_post;
30583
30584 unsigned int fb_cpp;
30585diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
30586index 2581202..f230a8d9 100644
30587--- a/drivers/gpu/drm/mga/mga_irq.c
30588+++ b/drivers/gpu/drm/mga/mga_irq.c
30589@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
30590 if (crtc != 0)
30591 return 0;
30592
30593- return atomic_read(&dev_priv->vbl_received);
30594+ return atomic_read_unchecked(&dev_priv->vbl_received);
30595 }
30596
30597
30598@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30599 /* VBLANK interrupt */
30600 if (status & MGA_VLINEPEN) {
30601 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
30602- atomic_inc(&dev_priv->vbl_received);
30603+ atomic_inc_unchecked(&dev_priv->vbl_received);
30604 drm_handle_vblank(dev, 0);
30605 handled = 1;
30606 }
30607@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30608 if ((prim_start & ~0x03) != (prim_end & ~0x03))
30609 MGA_WRITE(MGA_PRIMEND, prim_end);
30610
30611- atomic_inc(&dev_priv->last_fence_retired);
30612+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
30613 DRM_WAKEUP(&dev_priv->fence_queue);
30614 handled = 1;
30615 }
30616@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
30617 * using fences.
30618 */
30619 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
30620- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
30621+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
30622 - *sequence) <= (1 << 23)));
30623
30624 *sequence = cur_fence;
30625diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
30626index 0be4a81..7464804 100644
30627--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
30628+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
30629@@ -5329,7 +5329,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
30630 struct bit_table {
30631 const char id;
30632 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
30633-};
30634+} __no_const;
30635
30636 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
30637
30638diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
30639index 3aef353..0ad1322 100644
30640--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
30641+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
30642@@ -240,7 +240,7 @@ struct nouveau_channel {
30643 struct list_head pending;
30644 uint32_t sequence;
30645 uint32_t sequence_ack;
30646- atomic_t last_sequence_irq;
30647+ atomic_unchecked_t last_sequence_irq;
30648 struct nouveau_vma vma;
30649 } fence;
30650
30651@@ -321,7 +321,7 @@ struct nouveau_exec_engine {
30652 u32 handle, u16 class);
30653 void (*set_tile_region)(struct drm_device *dev, int i);
30654 void (*tlb_flush)(struct drm_device *, int engine);
30655-};
30656+} __no_const;
30657
30658 struct nouveau_instmem_engine {
30659 void *priv;
30660@@ -343,13 +343,13 @@ struct nouveau_instmem_engine {
30661 struct nouveau_mc_engine {
30662 int (*init)(struct drm_device *dev);
30663 void (*takedown)(struct drm_device *dev);
30664-};
30665+} __no_const;
30666
30667 struct nouveau_timer_engine {
30668 int (*init)(struct drm_device *dev);
30669 void (*takedown)(struct drm_device *dev);
30670 uint64_t (*read)(struct drm_device *dev);
30671-};
30672+} __no_const;
30673
30674 struct nouveau_fb_engine {
30675 int num_tiles;
30676@@ -590,7 +590,7 @@ struct nouveau_vram_engine {
30677 void (*put)(struct drm_device *, struct nouveau_mem **);
30678
30679 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
30680-};
30681+} __no_const;
30682
30683 struct nouveau_engine {
30684 struct nouveau_instmem_engine instmem;
30685@@ -739,7 +739,7 @@ struct drm_nouveau_private {
30686 struct drm_global_reference mem_global_ref;
30687 struct ttm_bo_global_ref bo_global_ref;
30688 struct ttm_bo_device bdev;
30689- atomic_t validate_sequence;
30690+ atomic_unchecked_t validate_sequence;
30691 } ttm;
30692
30693 struct {
30694diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
30695index c1dc20f..4df673c 100644
30696--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
30697+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
30698@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
30699 if (USE_REFCNT(dev))
30700 sequence = nvchan_rd32(chan, 0x48);
30701 else
30702- sequence = atomic_read(&chan->fence.last_sequence_irq);
30703+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
30704
30705 if (chan->fence.sequence_ack == sequence)
30706 goto out;
30707@@ -538,7 +538,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
30708 return ret;
30709 }
30710
30711- atomic_set(&chan->fence.last_sequence_irq, 0);
30712+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
30713 return 0;
30714 }
30715
30716diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
30717index ed52a6f..484acdc 100644
30718--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
30719+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
30720@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
30721 int trycnt = 0;
30722 int ret, i;
30723
30724- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
30725+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
30726 retry:
30727 if (++trycnt > 100000) {
30728 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
30729diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
30730index c2a8511..4b996f9 100644
30731--- a/drivers/gpu/drm/nouveau/nouveau_state.c
30732+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
30733@@ -588,7 +588,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
30734 bool can_switch;
30735
30736 spin_lock(&dev->count_lock);
30737- can_switch = (dev->open_count == 0);
30738+ can_switch = (local_read(&dev->open_count) == 0);
30739 spin_unlock(&dev->count_lock);
30740 return can_switch;
30741 }
30742diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
30743index dbdea8e..cd6eeeb 100644
30744--- a/drivers/gpu/drm/nouveau/nv04_graph.c
30745+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
30746@@ -554,7 +554,7 @@ static int
30747 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
30748 u32 class, u32 mthd, u32 data)
30749 {
30750- atomic_set(&chan->fence.last_sequence_irq, data);
30751+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
30752 return 0;
30753 }
30754
30755diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
30756index 2746402..c8dc4a4 100644
30757--- a/drivers/gpu/drm/nouveau/nv50_sor.c
30758+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
30759@@ -304,7 +304,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
30760 }
30761
30762 if (nv_encoder->dcb->type == OUTPUT_DP) {
30763- struct dp_train_func func = {
30764+ static struct dp_train_func func = {
30765 .link_set = nv50_sor_dp_link_set,
30766 .train_set = nv50_sor_dp_train_set,
30767 .train_adj = nv50_sor_dp_train_adj
30768diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
30769index 0247250..d2f6aaf 100644
30770--- a/drivers/gpu/drm/nouveau/nvd0_display.c
30771+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
30772@@ -1366,7 +1366,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
30773 nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
30774
30775 if (nv_encoder->dcb->type == OUTPUT_DP) {
30776- struct dp_train_func func = {
30777+ static struct dp_train_func func = {
30778 .link_set = nvd0_sor_dp_link_set,
30779 .train_set = nvd0_sor_dp_train_set,
30780 .train_adj = nvd0_sor_dp_train_adj
30781diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
30782index bcac90b..53bfc76 100644
30783--- a/drivers/gpu/drm/r128/r128_cce.c
30784+++ b/drivers/gpu/drm/r128/r128_cce.c
30785@@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
30786
30787 /* GH: Simple idle check.
30788 */
30789- atomic_set(&dev_priv->idle_count, 0);
30790+ atomic_set_unchecked(&dev_priv->idle_count, 0);
30791
30792 /* We don't support anything other than bus-mastering ring mode,
30793 * but the ring can be in either AGP or PCI space for the ring
30794diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
30795index 930c71b..499aded 100644
30796--- a/drivers/gpu/drm/r128/r128_drv.h
30797+++ b/drivers/gpu/drm/r128/r128_drv.h
30798@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
30799 int is_pci;
30800 unsigned long cce_buffers_offset;
30801
30802- atomic_t idle_count;
30803+ atomic_unchecked_t idle_count;
30804
30805 int page_flipping;
30806 int current_page;
30807 u32 crtc_offset;
30808 u32 crtc_offset_cntl;
30809
30810- atomic_t vbl_received;
30811+ atomic_unchecked_t vbl_received;
30812
30813 u32 color_fmt;
30814 unsigned int front_offset;
30815diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
30816index 429d5a0..7e899ed 100644
30817--- a/drivers/gpu/drm/r128/r128_irq.c
30818+++ b/drivers/gpu/drm/r128/r128_irq.c
30819@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
30820 if (crtc != 0)
30821 return 0;
30822
30823- return atomic_read(&dev_priv->vbl_received);
30824+ return atomic_read_unchecked(&dev_priv->vbl_received);
30825 }
30826
30827 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30828@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30829 /* VBLANK interrupt */
30830 if (status & R128_CRTC_VBLANK_INT) {
30831 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
30832- atomic_inc(&dev_priv->vbl_received);
30833+ atomic_inc_unchecked(&dev_priv->vbl_received);
30834 drm_handle_vblank(dev, 0);
30835 return IRQ_HANDLED;
30836 }
30837diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
30838index a9e33ce..09edd4b 100644
30839--- a/drivers/gpu/drm/r128/r128_state.c
30840+++ b/drivers/gpu/drm/r128/r128_state.c
30841@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
30842
30843 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
30844 {
30845- if (atomic_read(&dev_priv->idle_count) == 0)
30846+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
30847 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
30848 else
30849- atomic_set(&dev_priv->idle_count, 0);
30850+ atomic_set_unchecked(&dev_priv->idle_count, 0);
30851 }
30852
30853 #endif
30854diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
30855index 5a82b6b..9e69c73 100644
30856--- a/drivers/gpu/drm/radeon/mkregtable.c
30857+++ b/drivers/gpu/drm/radeon/mkregtable.c
30858@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
30859 regex_t mask_rex;
30860 regmatch_t match[4];
30861 char buf[1024];
30862- size_t end;
30863+ long end;
30864 int len;
30865 int done = 0;
30866 int r;
30867 unsigned o;
30868 struct offset *offset;
30869 char last_reg_s[10];
30870- int last_reg;
30871+ unsigned long last_reg;
30872
30873 if (regcomp
30874 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
30875diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
30876index 138b952..d74f9cb 100644
30877--- a/drivers/gpu/drm/radeon/radeon.h
30878+++ b/drivers/gpu/drm/radeon/radeon.h
30879@@ -253,7 +253,7 @@ struct radeon_fence_driver {
30880 uint32_t scratch_reg;
30881 uint64_t gpu_addr;
30882 volatile uint32_t *cpu_addr;
30883- atomic_t seq;
30884+ atomic_unchecked_t seq;
30885 uint32_t last_seq;
30886 unsigned long last_jiffies;
30887 unsigned long last_timeout;
30888@@ -753,7 +753,7 @@ struct r600_blit_cp_primitives {
30889 int x2, int y2);
30890 void (*draw_auto)(struct radeon_device *rdev);
30891 void (*set_default_state)(struct radeon_device *rdev);
30892-};
30893+} __no_const;
30894
30895 struct r600_blit {
30896 struct mutex mutex;
30897@@ -1246,7 +1246,7 @@ struct radeon_asic {
30898 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
30899 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
30900 } pflip;
30901-};
30902+} __no_const;
30903
30904 /*
30905 * Asic structures
30906diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
30907index 5992502..c19c633 100644
30908--- a/drivers/gpu/drm/radeon/radeon_device.c
30909+++ b/drivers/gpu/drm/radeon/radeon_device.c
30910@@ -691,7 +691,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
30911 bool can_switch;
30912
30913 spin_lock(&dev->count_lock);
30914- can_switch = (dev->open_count == 0);
30915+ can_switch = (local_read(&dev->open_count) == 0);
30916 spin_unlock(&dev->count_lock);
30917 return can_switch;
30918 }
30919diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
30920index a1b59ca..86f2d44 100644
30921--- a/drivers/gpu/drm/radeon/radeon_drv.h
30922+++ b/drivers/gpu/drm/radeon/radeon_drv.h
30923@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
30924
30925 /* SW interrupt */
30926 wait_queue_head_t swi_queue;
30927- atomic_t swi_emitted;
30928+ atomic_unchecked_t swi_emitted;
30929 int vblank_crtc;
30930 uint32_t irq_enable_reg;
30931 uint32_t r500_disp_irq_reg;
30932diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
30933index 4bd36a3..e66fe9c 100644
30934--- a/drivers/gpu/drm/radeon/radeon_fence.c
30935+++ b/drivers/gpu/drm/radeon/radeon_fence.c
30936@@ -70,7 +70,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
30937 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
30938 return 0;
30939 }
30940- fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
30941+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv[fence->ring].seq);
30942 if (!rdev->ring[fence->ring].ready)
30943 /* FIXME: cp is not running assume everythings is done right
30944 * away
30945@@ -405,7 +405,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
30946 }
30947 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
30948 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
30949- radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
30950+ radeon_fence_write(rdev, atomic_read_unchecked(&rdev->fence_drv[ring].seq), ring);
30951 rdev->fence_drv[ring].initialized = true;
30952 DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
30953 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
30954@@ -418,7 +418,7 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
30955 rdev->fence_drv[ring].scratch_reg = -1;
30956 rdev->fence_drv[ring].cpu_addr = NULL;
30957 rdev->fence_drv[ring].gpu_addr = 0;
30958- atomic_set(&rdev->fence_drv[ring].seq, 0);
30959+ atomic_set_unchecked(&rdev->fence_drv[ring].seq, 0);
30960 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
30961 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
30962 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
30963diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
30964index 48b7cea..342236f 100644
30965--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
30966+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
30967@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
30968 request = compat_alloc_user_space(sizeof(*request));
30969 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
30970 || __put_user(req32.param, &request->param)
30971- || __put_user((void __user *)(unsigned long)req32.value,
30972+ || __put_user((unsigned long)req32.value,
30973 &request->value))
30974 return -EFAULT;
30975
30976diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
30977index 00da384..32f972d 100644
30978--- a/drivers/gpu/drm/radeon/radeon_irq.c
30979+++ b/drivers/gpu/drm/radeon/radeon_irq.c
30980@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
30981 unsigned int ret;
30982 RING_LOCALS;
30983
30984- atomic_inc(&dev_priv->swi_emitted);
30985- ret = atomic_read(&dev_priv->swi_emitted);
30986+ atomic_inc_unchecked(&dev_priv->swi_emitted);
30987+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30988
30989 BEGIN_RING(4);
30990 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30991@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
30992 drm_radeon_private_t *dev_priv =
30993 (drm_radeon_private_t *) dev->dev_private;
30994
30995- atomic_set(&dev_priv->swi_emitted, 0);
30996+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
30997 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
30998
30999 dev->max_vblank_count = 0x001fffff;
31000diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
31001index e8422ae..d22d4a8 100644
31002--- a/drivers/gpu/drm/radeon/radeon_state.c
31003+++ b/drivers/gpu/drm/radeon/radeon_state.c
31004@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
31005 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
31006 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
31007
31008- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31009+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31010 sarea_priv->nbox * sizeof(depth_boxes[0])))
31011 return -EFAULT;
31012
31013@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
31014 {
31015 drm_radeon_private_t *dev_priv = dev->dev_private;
31016 drm_radeon_getparam_t *param = data;
31017- int value;
31018+ int value = 0;
31019
31020 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
31021
31022diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
31023index f493c64..524ab6b 100644
31024--- a/drivers/gpu/drm/radeon/radeon_ttm.c
31025+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
31026@@ -843,8 +843,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
31027 }
31028 if (unlikely(ttm_vm_ops == NULL)) {
31029 ttm_vm_ops = vma->vm_ops;
31030- radeon_ttm_vm_ops = *ttm_vm_ops;
31031- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
31032+ pax_open_kernel();
31033+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
31034+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
31035+ pax_close_kernel();
31036 }
31037 vma->vm_ops = &radeon_ttm_vm_ops;
31038 return 0;
31039diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
31040index f2c3b9d..d5a376b 100644
31041--- a/drivers/gpu/drm/radeon/rs690.c
31042+++ b/drivers/gpu/drm/radeon/rs690.c
31043@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
31044 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
31045 rdev->pm.sideport_bandwidth.full)
31046 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
31047- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
31048+ read_delay_latency.full = dfixed_const(800 * 1000);
31049 read_delay_latency.full = dfixed_div(read_delay_latency,
31050 rdev->pm.igp_sideport_mclk);
31051+ a.full = dfixed_const(370);
31052+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
31053 } else {
31054 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
31055 rdev->pm.k8_bandwidth.full)
31056diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
31057index ebc6fac..a8313ed 100644
31058--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
31059+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
31060@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
31061 static int ttm_pool_mm_shrink(struct shrinker *shrink,
31062 struct shrink_control *sc)
31063 {
31064- static atomic_t start_pool = ATOMIC_INIT(0);
31065+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
31066 unsigned i;
31067- unsigned pool_offset = atomic_add_return(1, &start_pool);
31068+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
31069 struct ttm_page_pool *pool;
31070 int shrink_pages = sc->nr_to_scan;
31071
31072diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
31073index 88edacc..1e5412b 100644
31074--- a/drivers/gpu/drm/via/via_drv.h
31075+++ b/drivers/gpu/drm/via/via_drv.h
31076@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
31077 typedef uint32_t maskarray_t[5];
31078
31079 typedef struct drm_via_irq {
31080- atomic_t irq_received;
31081+ atomic_unchecked_t irq_received;
31082 uint32_t pending_mask;
31083 uint32_t enable_mask;
31084 wait_queue_head_t irq_queue;
31085@@ -75,7 +75,7 @@ typedef struct drm_via_private {
31086 struct timeval last_vblank;
31087 int last_vblank_valid;
31088 unsigned usec_per_vblank;
31089- atomic_t vbl_received;
31090+ atomic_unchecked_t vbl_received;
31091 drm_via_state_t hc_state;
31092 char pci_buf[VIA_PCI_BUF_SIZE];
31093 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
31094diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
31095index d391f48..10c8ca3 100644
31096--- a/drivers/gpu/drm/via/via_irq.c
31097+++ b/drivers/gpu/drm/via/via_irq.c
31098@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
31099 if (crtc != 0)
31100 return 0;
31101
31102- return atomic_read(&dev_priv->vbl_received);
31103+ return atomic_read_unchecked(&dev_priv->vbl_received);
31104 }
31105
31106 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31107@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31108
31109 status = VIA_READ(VIA_REG_INTERRUPT);
31110 if (status & VIA_IRQ_VBLANK_PENDING) {
31111- atomic_inc(&dev_priv->vbl_received);
31112- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
31113+ atomic_inc_unchecked(&dev_priv->vbl_received);
31114+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
31115 do_gettimeofday(&cur_vblank);
31116 if (dev_priv->last_vblank_valid) {
31117 dev_priv->usec_per_vblank =
31118@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31119 dev_priv->last_vblank = cur_vblank;
31120 dev_priv->last_vblank_valid = 1;
31121 }
31122- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
31123+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
31124 DRM_DEBUG("US per vblank is: %u\n",
31125 dev_priv->usec_per_vblank);
31126 }
31127@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31128
31129 for (i = 0; i < dev_priv->num_irqs; ++i) {
31130 if (status & cur_irq->pending_mask) {
31131- atomic_inc(&cur_irq->irq_received);
31132+ atomic_inc_unchecked(&cur_irq->irq_received);
31133 DRM_WAKEUP(&cur_irq->irq_queue);
31134 handled = 1;
31135 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
31136@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
31137 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31138 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
31139 masks[irq][4]));
31140- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
31141+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
31142 } else {
31143 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31144 (((cur_irq_sequence =
31145- atomic_read(&cur_irq->irq_received)) -
31146+ atomic_read_unchecked(&cur_irq->irq_received)) -
31147 *sequence) <= (1 << 23)));
31148 }
31149 *sequence = cur_irq_sequence;
31150@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
31151 }
31152
31153 for (i = 0; i < dev_priv->num_irqs; ++i) {
31154- atomic_set(&cur_irq->irq_received, 0);
31155+ atomic_set_unchecked(&cur_irq->irq_received, 0);
31156 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
31157 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
31158 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
31159@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
31160 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
31161 case VIA_IRQ_RELATIVE:
31162 irqwait->request.sequence +=
31163- atomic_read(&cur_irq->irq_received);
31164+ atomic_read_unchecked(&cur_irq->irq_received);
31165 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
31166 case VIA_IRQ_ABSOLUTE:
31167 break;
31168diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31169index d0f2c07..9ebd9c3 100644
31170--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31171+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31172@@ -263,7 +263,7 @@ struct vmw_private {
31173 * Fencing and IRQs.
31174 */
31175
31176- atomic_t marker_seq;
31177+ atomic_unchecked_t marker_seq;
31178 wait_queue_head_t fence_queue;
31179 wait_queue_head_t fifo_queue;
31180 int fence_queue_waiters; /* Protected by hw_mutex */
31181diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31182index a0c2f12..68ae6cb 100644
31183--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31184+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31185@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
31186 (unsigned int) min,
31187 (unsigned int) fifo->capabilities);
31188
31189- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31190+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31191 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
31192 vmw_marker_queue_init(&fifo->marker_queue);
31193 return vmw_fifo_send_fence(dev_priv, &dummy);
31194@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
31195 if (reserveable)
31196 iowrite32(bytes, fifo_mem +
31197 SVGA_FIFO_RESERVED);
31198- return fifo_mem + (next_cmd >> 2);
31199+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
31200 } else {
31201 need_bounce = true;
31202 }
31203@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31204
31205 fm = vmw_fifo_reserve(dev_priv, bytes);
31206 if (unlikely(fm == NULL)) {
31207- *seqno = atomic_read(&dev_priv->marker_seq);
31208+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31209 ret = -ENOMEM;
31210 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
31211 false, 3*HZ);
31212@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31213 }
31214
31215 do {
31216- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
31217+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
31218 } while (*seqno == 0);
31219
31220 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
31221diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31222index cabc95f..14b3d77 100644
31223--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31224+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31225@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
31226 * emitted. Then the fence is stale and signaled.
31227 */
31228
31229- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
31230+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
31231 > VMW_FENCE_WRAP);
31232
31233 return ret;
31234@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
31235
31236 if (fifo_idle)
31237 down_read(&fifo_state->rwsem);
31238- signal_seq = atomic_read(&dev_priv->marker_seq);
31239+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
31240 ret = 0;
31241
31242 for (;;) {
31243diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31244index 8a8725c..afed796 100644
31245--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31246+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31247@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
31248 while (!vmw_lag_lt(queue, us)) {
31249 spin_lock(&queue->lock);
31250 if (list_empty(&queue->head))
31251- seqno = atomic_read(&dev_priv->marker_seq);
31252+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31253 else {
31254 marker = list_first_entry(&queue->head,
31255 struct vmw_marker, head);
31256diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
31257index 054677b..741672a 100644
31258--- a/drivers/hid/hid-core.c
31259+++ b/drivers/hid/hid-core.c
31260@@ -2070,7 +2070,7 @@ static bool hid_ignore(struct hid_device *hdev)
31261
31262 int hid_add_device(struct hid_device *hdev)
31263 {
31264- static atomic_t id = ATOMIC_INIT(0);
31265+ static atomic_unchecked_t id = ATOMIC_INIT(0);
31266 int ret;
31267
31268 if (WARN_ON(hdev->status & HID_STAT_ADDED))
31269@@ -2085,7 +2085,7 @@ int hid_add_device(struct hid_device *hdev)
31270 /* XXX hack, any other cleaner solution after the driver core
31271 * is converted to allow more than 20 bytes as the device name? */
31272 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
31273- hdev->vendor, hdev->product, atomic_inc_return(&id));
31274+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
31275
31276 hid_debug_register(hdev, dev_name(&hdev->dev));
31277 ret = device_add(&hdev->dev);
31278diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
31279index eec3291..8ed706b 100644
31280--- a/drivers/hid/hid-wiimote-debug.c
31281+++ b/drivers/hid/hid-wiimote-debug.c
31282@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
31283 else if (size == 0)
31284 return -EIO;
31285
31286- if (copy_to_user(u, buf, size))
31287+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
31288 return -EFAULT;
31289
31290 *off += size;
31291diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
31292index b1ec0e2..c295a61 100644
31293--- a/drivers/hid/usbhid/hiddev.c
31294+++ b/drivers/hid/usbhid/hiddev.c
31295@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31296 break;
31297
31298 case HIDIOCAPPLICATION:
31299- if (arg < 0 || arg >= hid->maxapplication)
31300+ if (arg >= hid->maxapplication)
31301 break;
31302
31303 for (i = 0; i < hid->maxcollection; i++)
31304diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
31305index 4065374..10ed7dc 100644
31306--- a/drivers/hv/channel.c
31307+++ b/drivers/hv/channel.c
31308@@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
31309 int ret = 0;
31310 int t;
31311
31312- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31313- atomic_inc(&vmbus_connection.next_gpadl_handle);
31314+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31315+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31316
31317 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31318 if (ret)
31319diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
31320index 15956bd..ea34398 100644
31321--- a/drivers/hv/hv.c
31322+++ b/drivers/hv/hv.c
31323@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
31324 u64 output_address = (output) ? virt_to_phys(output) : 0;
31325 u32 output_address_hi = output_address >> 32;
31326 u32 output_address_lo = output_address & 0xFFFFFFFF;
31327- void *hypercall_page = hv_context.hypercall_page;
31328+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31329
31330 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
31331 "=a"(hv_status_lo) : "d" (control_hi),
31332diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
31333index 699f0d8..f4f19250 100644
31334--- a/drivers/hv/hyperv_vmbus.h
31335+++ b/drivers/hv/hyperv_vmbus.h
31336@@ -555,7 +555,7 @@ enum vmbus_connect_state {
31337 struct vmbus_connection {
31338 enum vmbus_connect_state conn_state;
31339
31340- atomic_t next_gpadl_handle;
31341+ atomic_unchecked_t next_gpadl_handle;
31342
31343 /*
31344 * Represents channel interrupts. Each bit position represents a
31345diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
31346index a220e57..428f54d 100644
31347--- a/drivers/hv/vmbus_drv.c
31348+++ b/drivers/hv/vmbus_drv.c
31349@@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
31350 {
31351 int ret = 0;
31352
31353- static atomic_t device_num = ATOMIC_INIT(0);
31354+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
31355
31356 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
31357- atomic_inc_return(&device_num));
31358+ atomic_inc_return_unchecked(&device_num));
31359
31360 child_device_obj->device.bus = &hv_bus;
31361 child_device_obj->device.parent = &hv_acpi_dev->dev;
31362diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
31363index 9140236..ceaef4e 100644
31364--- a/drivers/hwmon/acpi_power_meter.c
31365+++ b/drivers/hwmon/acpi_power_meter.c
31366@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
31367 return res;
31368
31369 temp /= 1000;
31370- if (temp < 0)
31371- return -EINVAL;
31372
31373 mutex_lock(&resource->lock);
31374 resource->trip[attr->index - 7] = temp;
31375diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
31376index 8b011d0..3de24a1 100644
31377--- a/drivers/hwmon/sht15.c
31378+++ b/drivers/hwmon/sht15.c
31379@@ -166,7 +166,7 @@ struct sht15_data {
31380 int supply_uV;
31381 bool supply_uV_valid;
31382 struct work_struct update_supply_work;
31383- atomic_t interrupt_handled;
31384+ atomic_unchecked_t interrupt_handled;
31385 };
31386
31387 /**
31388@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
31389 return ret;
31390
31391 gpio_direction_input(data->pdata->gpio_data);
31392- atomic_set(&data->interrupt_handled, 0);
31393+ atomic_set_unchecked(&data->interrupt_handled, 0);
31394
31395 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31396 if (gpio_get_value(data->pdata->gpio_data) == 0) {
31397 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
31398 /* Only relevant if the interrupt hasn't occurred. */
31399- if (!atomic_read(&data->interrupt_handled))
31400+ if (!atomic_read_unchecked(&data->interrupt_handled))
31401 schedule_work(&data->read_work);
31402 }
31403 ret = wait_event_timeout(data->wait_queue,
31404@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
31405
31406 /* First disable the interrupt */
31407 disable_irq_nosync(irq);
31408- atomic_inc(&data->interrupt_handled);
31409+ atomic_inc_unchecked(&data->interrupt_handled);
31410 /* Then schedule a reading work struct */
31411 if (data->state != SHT15_READING_NOTHING)
31412 schedule_work(&data->read_work);
31413@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
31414 * If not, then start the interrupt again - care here as could
31415 * have gone low in meantime so verify it hasn't!
31416 */
31417- atomic_set(&data->interrupt_handled, 0);
31418+ atomic_set_unchecked(&data->interrupt_handled, 0);
31419 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31420 /* If still not occurred or another handler was scheduled */
31421 if (gpio_get_value(data->pdata->gpio_data)
31422- || atomic_read(&data->interrupt_handled))
31423+ || atomic_read_unchecked(&data->interrupt_handled))
31424 return;
31425 }
31426
31427diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
31428index 378fcb5..5e91fa8 100644
31429--- a/drivers/i2c/busses/i2c-amd756-s4882.c
31430+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
31431@@ -43,7 +43,7 @@
31432 extern struct i2c_adapter amd756_smbus;
31433
31434 static struct i2c_adapter *s4882_adapter;
31435-static struct i2c_algorithm *s4882_algo;
31436+static i2c_algorithm_no_const *s4882_algo;
31437
31438 /* Wrapper access functions for multiplexed SMBus */
31439 static DEFINE_MUTEX(amd756_lock);
31440diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
31441index 29015eb..af2d8e9 100644
31442--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
31443+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
31444@@ -41,7 +41,7 @@
31445 extern struct i2c_adapter *nforce2_smbus;
31446
31447 static struct i2c_adapter *s4985_adapter;
31448-static struct i2c_algorithm *s4985_algo;
31449+static i2c_algorithm_no_const *s4985_algo;
31450
31451 /* Wrapper access functions for multiplexed SMBus */
31452 static DEFINE_MUTEX(nforce2_lock);
31453diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
31454index d7a4833..7fae376 100644
31455--- a/drivers/i2c/i2c-mux.c
31456+++ b/drivers/i2c/i2c-mux.c
31457@@ -28,7 +28,7 @@
31458 /* multiplexer per channel data */
31459 struct i2c_mux_priv {
31460 struct i2c_adapter adap;
31461- struct i2c_algorithm algo;
31462+ i2c_algorithm_no_const algo;
31463
31464 struct i2c_adapter *parent;
31465 void *mux_dev; /* the mux chip/device */
31466diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
31467index 57d00ca..0145194 100644
31468--- a/drivers/ide/aec62xx.c
31469+++ b/drivers/ide/aec62xx.c
31470@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
31471 .cable_detect = atp86x_cable_detect,
31472 };
31473
31474-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
31475+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
31476 { /* 0: AEC6210 */
31477 .name = DRV_NAME,
31478 .init_chipset = init_chipset_aec62xx,
31479diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
31480index 2c8016a..911a27c 100644
31481--- a/drivers/ide/alim15x3.c
31482+++ b/drivers/ide/alim15x3.c
31483@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
31484 .dma_sff_read_status = ide_dma_sff_read_status,
31485 };
31486
31487-static const struct ide_port_info ali15x3_chipset __devinitdata = {
31488+static const struct ide_port_info ali15x3_chipset __devinitconst = {
31489 .name = DRV_NAME,
31490 .init_chipset = init_chipset_ali15x3,
31491 .init_hwif = init_hwif_ali15x3,
31492diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
31493index 3747b25..56fc995 100644
31494--- a/drivers/ide/amd74xx.c
31495+++ b/drivers/ide/amd74xx.c
31496@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
31497 .udma_mask = udma, \
31498 }
31499
31500-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
31501+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
31502 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
31503 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
31504 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
31505diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
31506index 15f0ead..cb43480 100644
31507--- a/drivers/ide/atiixp.c
31508+++ b/drivers/ide/atiixp.c
31509@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
31510 .cable_detect = atiixp_cable_detect,
31511 };
31512
31513-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
31514+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
31515 { /* 0: IXP200/300/400/700 */
31516 .name = DRV_NAME,
31517 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
31518diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
31519index 5f80312..d1fc438 100644
31520--- a/drivers/ide/cmd64x.c
31521+++ b/drivers/ide/cmd64x.c
31522@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
31523 .dma_sff_read_status = ide_dma_sff_read_status,
31524 };
31525
31526-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
31527+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
31528 { /* 0: CMD643 */
31529 .name = DRV_NAME,
31530 .init_chipset = init_chipset_cmd64x,
31531diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
31532index 2c1e5f7..1444762 100644
31533--- a/drivers/ide/cs5520.c
31534+++ b/drivers/ide/cs5520.c
31535@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
31536 .set_dma_mode = cs5520_set_dma_mode,
31537 };
31538
31539-static const struct ide_port_info cyrix_chipset __devinitdata = {
31540+static const struct ide_port_info cyrix_chipset __devinitconst = {
31541 .name = DRV_NAME,
31542 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
31543 .port_ops = &cs5520_port_ops,
31544diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
31545index 4dc4eb9..49b40ad 100644
31546--- a/drivers/ide/cs5530.c
31547+++ b/drivers/ide/cs5530.c
31548@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
31549 .udma_filter = cs5530_udma_filter,
31550 };
31551
31552-static const struct ide_port_info cs5530_chipset __devinitdata = {
31553+static const struct ide_port_info cs5530_chipset __devinitconst = {
31554 .name = DRV_NAME,
31555 .init_chipset = init_chipset_cs5530,
31556 .init_hwif = init_hwif_cs5530,
31557diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
31558index 5059faf..18d4c85 100644
31559--- a/drivers/ide/cs5535.c
31560+++ b/drivers/ide/cs5535.c
31561@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
31562 .cable_detect = cs5535_cable_detect,
31563 };
31564
31565-static const struct ide_port_info cs5535_chipset __devinitdata = {
31566+static const struct ide_port_info cs5535_chipset __devinitconst = {
31567 .name = DRV_NAME,
31568 .port_ops = &cs5535_port_ops,
31569 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
31570diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
31571index 847553f..3ffb49d 100644
31572--- a/drivers/ide/cy82c693.c
31573+++ b/drivers/ide/cy82c693.c
31574@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
31575 .set_dma_mode = cy82c693_set_dma_mode,
31576 };
31577
31578-static const struct ide_port_info cy82c693_chipset __devinitdata = {
31579+static const struct ide_port_info cy82c693_chipset __devinitconst = {
31580 .name = DRV_NAME,
31581 .init_iops = init_iops_cy82c693,
31582 .port_ops = &cy82c693_port_ops,
31583diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
31584index 58c51cd..4aec3b8 100644
31585--- a/drivers/ide/hpt366.c
31586+++ b/drivers/ide/hpt366.c
31587@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
31588 }
31589 };
31590
31591-static const struct hpt_info hpt36x __devinitdata = {
31592+static const struct hpt_info hpt36x __devinitconst = {
31593 .chip_name = "HPT36x",
31594 .chip_type = HPT36x,
31595 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
31596@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
31597 .timings = &hpt36x_timings
31598 };
31599
31600-static const struct hpt_info hpt370 __devinitdata = {
31601+static const struct hpt_info hpt370 __devinitconst = {
31602 .chip_name = "HPT370",
31603 .chip_type = HPT370,
31604 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31605@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
31606 .timings = &hpt37x_timings
31607 };
31608
31609-static const struct hpt_info hpt370a __devinitdata = {
31610+static const struct hpt_info hpt370a __devinitconst = {
31611 .chip_name = "HPT370A",
31612 .chip_type = HPT370A,
31613 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31614@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
31615 .timings = &hpt37x_timings
31616 };
31617
31618-static const struct hpt_info hpt374 __devinitdata = {
31619+static const struct hpt_info hpt374 __devinitconst = {
31620 .chip_name = "HPT374",
31621 .chip_type = HPT374,
31622 .udma_mask = ATA_UDMA5,
31623@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
31624 .timings = &hpt37x_timings
31625 };
31626
31627-static const struct hpt_info hpt372 __devinitdata = {
31628+static const struct hpt_info hpt372 __devinitconst = {
31629 .chip_name = "HPT372",
31630 .chip_type = HPT372,
31631 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31632@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
31633 .timings = &hpt37x_timings
31634 };
31635
31636-static const struct hpt_info hpt372a __devinitdata = {
31637+static const struct hpt_info hpt372a __devinitconst = {
31638 .chip_name = "HPT372A",
31639 .chip_type = HPT372A,
31640 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31641@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
31642 .timings = &hpt37x_timings
31643 };
31644
31645-static const struct hpt_info hpt302 __devinitdata = {
31646+static const struct hpt_info hpt302 __devinitconst = {
31647 .chip_name = "HPT302",
31648 .chip_type = HPT302,
31649 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31650@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
31651 .timings = &hpt37x_timings
31652 };
31653
31654-static const struct hpt_info hpt371 __devinitdata = {
31655+static const struct hpt_info hpt371 __devinitconst = {
31656 .chip_name = "HPT371",
31657 .chip_type = HPT371,
31658 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31659@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
31660 .timings = &hpt37x_timings
31661 };
31662
31663-static const struct hpt_info hpt372n __devinitdata = {
31664+static const struct hpt_info hpt372n __devinitconst = {
31665 .chip_name = "HPT372N",
31666 .chip_type = HPT372N,
31667 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31668@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
31669 .timings = &hpt37x_timings
31670 };
31671
31672-static const struct hpt_info hpt302n __devinitdata = {
31673+static const struct hpt_info hpt302n __devinitconst = {
31674 .chip_name = "HPT302N",
31675 .chip_type = HPT302N,
31676 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31677@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
31678 .timings = &hpt37x_timings
31679 };
31680
31681-static const struct hpt_info hpt371n __devinitdata = {
31682+static const struct hpt_info hpt371n __devinitconst = {
31683 .chip_name = "HPT371N",
31684 .chip_type = HPT371N,
31685 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31686@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
31687 .dma_sff_read_status = ide_dma_sff_read_status,
31688 };
31689
31690-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
31691+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
31692 { /* 0: HPT36x */
31693 .name = DRV_NAME,
31694 .init_chipset = init_chipset_hpt366,
31695diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
31696index 8126824..55a2798 100644
31697--- a/drivers/ide/ide-cd.c
31698+++ b/drivers/ide/ide-cd.c
31699@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
31700 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
31701 if ((unsigned long)buf & alignment
31702 || blk_rq_bytes(rq) & q->dma_pad_mask
31703- || object_is_on_stack(buf))
31704+ || object_starts_on_stack(buf))
31705 drive->dma = 0;
31706 }
31707 }
31708diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
31709index 7f56b73..dab5b67 100644
31710--- a/drivers/ide/ide-pci-generic.c
31711+++ b/drivers/ide/ide-pci-generic.c
31712@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
31713 .udma_mask = ATA_UDMA6, \
31714 }
31715
31716-static const struct ide_port_info generic_chipsets[] __devinitdata = {
31717+static const struct ide_port_info generic_chipsets[] __devinitconst = {
31718 /* 0: Unknown */
31719 DECLARE_GENERIC_PCI_DEV(0),
31720
31721diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
31722index 560e66d..d5dd180 100644
31723--- a/drivers/ide/it8172.c
31724+++ b/drivers/ide/it8172.c
31725@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
31726 .set_dma_mode = it8172_set_dma_mode,
31727 };
31728
31729-static const struct ide_port_info it8172_port_info __devinitdata = {
31730+static const struct ide_port_info it8172_port_info __devinitconst = {
31731 .name = DRV_NAME,
31732 .port_ops = &it8172_port_ops,
31733 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
31734diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
31735index 46816ba..1847aeb 100644
31736--- a/drivers/ide/it8213.c
31737+++ b/drivers/ide/it8213.c
31738@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
31739 .cable_detect = it8213_cable_detect,
31740 };
31741
31742-static const struct ide_port_info it8213_chipset __devinitdata = {
31743+static const struct ide_port_info it8213_chipset __devinitconst = {
31744 .name = DRV_NAME,
31745 .enablebits = { {0x41, 0x80, 0x80} },
31746 .port_ops = &it8213_port_ops,
31747diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
31748index 2e3169f..c5611db 100644
31749--- a/drivers/ide/it821x.c
31750+++ b/drivers/ide/it821x.c
31751@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
31752 .cable_detect = it821x_cable_detect,
31753 };
31754
31755-static const struct ide_port_info it821x_chipset __devinitdata = {
31756+static const struct ide_port_info it821x_chipset __devinitconst = {
31757 .name = DRV_NAME,
31758 .init_chipset = init_chipset_it821x,
31759 .init_hwif = init_hwif_it821x,
31760diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
31761index 74c2c4a..efddd7d 100644
31762--- a/drivers/ide/jmicron.c
31763+++ b/drivers/ide/jmicron.c
31764@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
31765 .cable_detect = jmicron_cable_detect,
31766 };
31767
31768-static const struct ide_port_info jmicron_chipset __devinitdata = {
31769+static const struct ide_port_info jmicron_chipset __devinitconst = {
31770 .name = DRV_NAME,
31771 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
31772 .port_ops = &jmicron_port_ops,
31773diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
31774index 95327a2..73f78d8 100644
31775--- a/drivers/ide/ns87415.c
31776+++ b/drivers/ide/ns87415.c
31777@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
31778 .dma_sff_read_status = superio_dma_sff_read_status,
31779 };
31780
31781-static const struct ide_port_info ns87415_chipset __devinitdata = {
31782+static const struct ide_port_info ns87415_chipset __devinitconst = {
31783 .name = DRV_NAME,
31784 .init_hwif = init_hwif_ns87415,
31785 .tp_ops = &ns87415_tp_ops,
31786diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
31787index 1a53a4c..39edc66 100644
31788--- a/drivers/ide/opti621.c
31789+++ b/drivers/ide/opti621.c
31790@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
31791 .set_pio_mode = opti621_set_pio_mode,
31792 };
31793
31794-static const struct ide_port_info opti621_chipset __devinitdata = {
31795+static const struct ide_port_info opti621_chipset __devinitconst = {
31796 .name = DRV_NAME,
31797 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
31798 .port_ops = &opti621_port_ops,
31799diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
31800index 9546fe2..2e5ceb6 100644
31801--- a/drivers/ide/pdc202xx_new.c
31802+++ b/drivers/ide/pdc202xx_new.c
31803@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
31804 .udma_mask = udma, \
31805 }
31806
31807-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
31808+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
31809 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
31810 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
31811 };
31812diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
31813index 3a35ec6..5634510 100644
31814--- a/drivers/ide/pdc202xx_old.c
31815+++ b/drivers/ide/pdc202xx_old.c
31816@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
31817 .max_sectors = sectors, \
31818 }
31819
31820-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
31821+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
31822 { /* 0: PDC20246 */
31823 .name = DRV_NAME,
31824 .init_chipset = init_chipset_pdc202xx,
31825diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
31826index 1892e81..fe0fd60 100644
31827--- a/drivers/ide/piix.c
31828+++ b/drivers/ide/piix.c
31829@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
31830 .udma_mask = udma, \
31831 }
31832
31833-static const struct ide_port_info piix_pci_info[] __devinitdata = {
31834+static const struct ide_port_info piix_pci_info[] __devinitconst = {
31835 /* 0: MPIIX */
31836 { /*
31837 * MPIIX actually has only a single IDE channel mapped to
31838diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
31839index a6414a8..c04173e 100644
31840--- a/drivers/ide/rz1000.c
31841+++ b/drivers/ide/rz1000.c
31842@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
31843 }
31844 }
31845
31846-static const struct ide_port_info rz1000_chipset __devinitdata = {
31847+static const struct ide_port_info rz1000_chipset __devinitconst = {
31848 .name = DRV_NAME,
31849 .host_flags = IDE_HFLAG_NO_DMA,
31850 };
31851diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
31852index 356b9b5..d4758eb 100644
31853--- a/drivers/ide/sc1200.c
31854+++ b/drivers/ide/sc1200.c
31855@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
31856 .dma_sff_read_status = ide_dma_sff_read_status,
31857 };
31858
31859-static const struct ide_port_info sc1200_chipset __devinitdata = {
31860+static const struct ide_port_info sc1200_chipset __devinitconst = {
31861 .name = DRV_NAME,
31862 .port_ops = &sc1200_port_ops,
31863 .dma_ops = &sc1200_dma_ops,
31864diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
31865index b7f5b0c..9701038 100644
31866--- a/drivers/ide/scc_pata.c
31867+++ b/drivers/ide/scc_pata.c
31868@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
31869 .dma_sff_read_status = scc_dma_sff_read_status,
31870 };
31871
31872-static const struct ide_port_info scc_chipset __devinitdata = {
31873+static const struct ide_port_info scc_chipset __devinitconst = {
31874 .name = "sccIDE",
31875 .init_iops = init_iops_scc,
31876 .init_dma = scc_init_dma,
31877diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
31878index 35fb8da..24d72ef 100644
31879--- a/drivers/ide/serverworks.c
31880+++ b/drivers/ide/serverworks.c
31881@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
31882 .cable_detect = svwks_cable_detect,
31883 };
31884
31885-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
31886+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
31887 { /* 0: OSB4 */
31888 .name = DRV_NAME,
31889 .init_chipset = init_chipset_svwks,
31890diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
31891index ddeda44..46f7e30 100644
31892--- a/drivers/ide/siimage.c
31893+++ b/drivers/ide/siimage.c
31894@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
31895 .udma_mask = ATA_UDMA6, \
31896 }
31897
31898-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
31899+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
31900 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
31901 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
31902 };
31903diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
31904index 4a00225..09e61b4 100644
31905--- a/drivers/ide/sis5513.c
31906+++ b/drivers/ide/sis5513.c
31907@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
31908 .cable_detect = sis_cable_detect,
31909 };
31910
31911-static const struct ide_port_info sis5513_chipset __devinitdata = {
31912+static const struct ide_port_info sis5513_chipset __devinitconst = {
31913 .name = DRV_NAME,
31914 .init_chipset = init_chipset_sis5513,
31915 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
31916diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
31917index f21dc2a..d051cd2 100644
31918--- a/drivers/ide/sl82c105.c
31919+++ b/drivers/ide/sl82c105.c
31920@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
31921 .dma_sff_read_status = ide_dma_sff_read_status,
31922 };
31923
31924-static const struct ide_port_info sl82c105_chipset __devinitdata = {
31925+static const struct ide_port_info sl82c105_chipset __devinitconst = {
31926 .name = DRV_NAME,
31927 .init_chipset = init_chipset_sl82c105,
31928 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
31929diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
31930index 864ffe0..863a5e9 100644
31931--- a/drivers/ide/slc90e66.c
31932+++ b/drivers/ide/slc90e66.c
31933@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
31934 .cable_detect = slc90e66_cable_detect,
31935 };
31936
31937-static const struct ide_port_info slc90e66_chipset __devinitdata = {
31938+static const struct ide_port_info slc90e66_chipset __devinitconst = {
31939 .name = DRV_NAME,
31940 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
31941 .port_ops = &slc90e66_port_ops,
31942diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
31943index 4799d5c..1794678 100644
31944--- a/drivers/ide/tc86c001.c
31945+++ b/drivers/ide/tc86c001.c
31946@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
31947 .dma_sff_read_status = ide_dma_sff_read_status,
31948 };
31949
31950-static const struct ide_port_info tc86c001_chipset __devinitdata = {
31951+static const struct ide_port_info tc86c001_chipset __devinitconst = {
31952 .name = DRV_NAME,
31953 .init_hwif = init_hwif_tc86c001,
31954 .port_ops = &tc86c001_port_ops,
31955diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
31956index 281c914..55ce1b8 100644
31957--- a/drivers/ide/triflex.c
31958+++ b/drivers/ide/triflex.c
31959@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
31960 .set_dma_mode = triflex_set_mode,
31961 };
31962
31963-static const struct ide_port_info triflex_device __devinitdata = {
31964+static const struct ide_port_info triflex_device __devinitconst = {
31965 .name = DRV_NAME,
31966 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
31967 .port_ops = &triflex_port_ops,
31968diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
31969index 4b42ca0..e494a98 100644
31970--- a/drivers/ide/trm290.c
31971+++ b/drivers/ide/trm290.c
31972@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
31973 .dma_check = trm290_dma_check,
31974 };
31975
31976-static const struct ide_port_info trm290_chipset __devinitdata = {
31977+static const struct ide_port_info trm290_chipset __devinitconst = {
31978 .name = DRV_NAME,
31979 .init_hwif = init_hwif_trm290,
31980 .tp_ops = &trm290_tp_ops,
31981diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
31982index f46f49c..eb77678 100644
31983--- a/drivers/ide/via82cxxx.c
31984+++ b/drivers/ide/via82cxxx.c
31985@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
31986 .cable_detect = via82cxxx_cable_detect,
31987 };
31988
31989-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
31990+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
31991 .name = DRV_NAME,
31992 .init_chipset = init_chipset_via82cxxx,
31993 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
31994diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
31995index 73d4531..c90cd2d 100644
31996--- a/drivers/ieee802154/fakehard.c
31997+++ b/drivers/ieee802154/fakehard.c
31998@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
31999 phy->transmit_power = 0xbf;
32000
32001 dev->netdev_ops = &fake_ops;
32002- dev->ml_priv = &fake_mlme;
32003+ dev->ml_priv = (void *)&fake_mlme;
32004
32005 priv = netdev_priv(dev);
32006 priv->phy = phy;
32007diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
32008index c889aae..6cf5aa7 100644
32009--- a/drivers/infiniband/core/cm.c
32010+++ b/drivers/infiniband/core/cm.c
32011@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
32012
32013 struct cm_counter_group {
32014 struct kobject obj;
32015- atomic_long_t counter[CM_ATTR_COUNT];
32016+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
32017 };
32018
32019 struct cm_counter_attribute {
32020@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
32021 struct ib_mad_send_buf *msg = NULL;
32022 int ret;
32023
32024- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32025+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32026 counter[CM_REQ_COUNTER]);
32027
32028 /* Quick state check to discard duplicate REQs. */
32029@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
32030 if (!cm_id_priv)
32031 return;
32032
32033- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32034+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32035 counter[CM_REP_COUNTER]);
32036 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
32037 if (ret)
32038@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
32039 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
32040 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
32041 spin_unlock_irq(&cm_id_priv->lock);
32042- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32043+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32044 counter[CM_RTU_COUNTER]);
32045 goto out;
32046 }
32047@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
32048 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
32049 dreq_msg->local_comm_id);
32050 if (!cm_id_priv) {
32051- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32052+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32053 counter[CM_DREQ_COUNTER]);
32054 cm_issue_drep(work->port, work->mad_recv_wc);
32055 return -EINVAL;
32056@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
32057 case IB_CM_MRA_REP_RCVD:
32058 break;
32059 case IB_CM_TIMEWAIT:
32060- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32061+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32062 counter[CM_DREQ_COUNTER]);
32063 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32064 goto unlock;
32065@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
32066 cm_free_msg(msg);
32067 goto deref;
32068 case IB_CM_DREQ_RCVD:
32069- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32070+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32071 counter[CM_DREQ_COUNTER]);
32072 goto unlock;
32073 default:
32074@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
32075 ib_modify_mad(cm_id_priv->av.port->mad_agent,
32076 cm_id_priv->msg, timeout)) {
32077 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
32078- atomic_long_inc(&work->port->
32079+ atomic_long_inc_unchecked(&work->port->
32080 counter_group[CM_RECV_DUPLICATES].
32081 counter[CM_MRA_COUNTER]);
32082 goto out;
32083@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
32084 break;
32085 case IB_CM_MRA_REQ_RCVD:
32086 case IB_CM_MRA_REP_RCVD:
32087- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32088+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32089 counter[CM_MRA_COUNTER]);
32090 /* fall through */
32091 default:
32092@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
32093 case IB_CM_LAP_IDLE:
32094 break;
32095 case IB_CM_MRA_LAP_SENT:
32096- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32097+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32098 counter[CM_LAP_COUNTER]);
32099 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32100 goto unlock;
32101@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
32102 cm_free_msg(msg);
32103 goto deref;
32104 case IB_CM_LAP_RCVD:
32105- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32106+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32107 counter[CM_LAP_COUNTER]);
32108 goto unlock;
32109 default:
32110@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
32111 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
32112 if (cur_cm_id_priv) {
32113 spin_unlock_irq(&cm.lock);
32114- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32115+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32116 counter[CM_SIDR_REQ_COUNTER]);
32117 goto out; /* Duplicate message. */
32118 }
32119@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
32120 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
32121 msg->retries = 1;
32122
32123- atomic_long_add(1 + msg->retries,
32124+ atomic_long_add_unchecked(1 + msg->retries,
32125 &port->counter_group[CM_XMIT].counter[attr_index]);
32126 if (msg->retries)
32127- atomic_long_add(msg->retries,
32128+ atomic_long_add_unchecked(msg->retries,
32129 &port->counter_group[CM_XMIT_RETRIES].
32130 counter[attr_index]);
32131
32132@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
32133 }
32134
32135 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
32136- atomic_long_inc(&port->counter_group[CM_RECV].
32137+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
32138 counter[attr_id - CM_ATTR_ID_OFFSET]);
32139
32140 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
32141@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
32142 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
32143
32144 return sprintf(buf, "%ld\n",
32145- atomic_long_read(&group->counter[cm_attr->index]));
32146+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
32147 }
32148
32149 static const struct sysfs_ops cm_counter_ops = {
32150diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
32151index 176c8f9..2627b62 100644
32152--- a/drivers/infiniband/core/fmr_pool.c
32153+++ b/drivers/infiniband/core/fmr_pool.c
32154@@ -98,8 +98,8 @@ struct ib_fmr_pool {
32155
32156 struct task_struct *thread;
32157
32158- atomic_t req_ser;
32159- atomic_t flush_ser;
32160+ atomic_unchecked_t req_ser;
32161+ atomic_unchecked_t flush_ser;
32162
32163 wait_queue_head_t force_wait;
32164 };
32165@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32166 struct ib_fmr_pool *pool = pool_ptr;
32167
32168 do {
32169- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
32170+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
32171 ib_fmr_batch_release(pool);
32172
32173- atomic_inc(&pool->flush_ser);
32174+ atomic_inc_unchecked(&pool->flush_ser);
32175 wake_up_interruptible(&pool->force_wait);
32176
32177 if (pool->flush_function)
32178@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32179 }
32180
32181 set_current_state(TASK_INTERRUPTIBLE);
32182- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
32183+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
32184 !kthread_should_stop())
32185 schedule();
32186 __set_current_state(TASK_RUNNING);
32187@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
32188 pool->dirty_watermark = params->dirty_watermark;
32189 pool->dirty_len = 0;
32190 spin_lock_init(&pool->pool_lock);
32191- atomic_set(&pool->req_ser, 0);
32192- atomic_set(&pool->flush_ser, 0);
32193+ atomic_set_unchecked(&pool->req_ser, 0);
32194+ atomic_set_unchecked(&pool->flush_ser, 0);
32195 init_waitqueue_head(&pool->force_wait);
32196
32197 pool->thread = kthread_run(ib_fmr_cleanup_thread,
32198@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
32199 }
32200 spin_unlock_irq(&pool->pool_lock);
32201
32202- serial = atomic_inc_return(&pool->req_ser);
32203+ serial = atomic_inc_return_unchecked(&pool->req_ser);
32204 wake_up_process(pool->thread);
32205
32206 if (wait_event_interruptible(pool->force_wait,
32207- atomic_read(&pool->flush_ser) - serial >= 0))
32208+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
32209 return -EINTR;
32210
32211 return 0;
32212@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
32213 } else {
32214 list_add_tail(&fmr->list, &pool->dirty_list);
32215 if (++pool->dirty_len >= pool->dirty_watermark) {
32216- atomic_inc(&pool->req_ser);
32217+ atomic_inc_unchecked(&pool->req_ser);
32218 wake_up_process(pool->thread);
32219 }
32220 }
32221diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
32222index 40c8353..946b0e4 100644
32223--- a/drivers/infiniband/hw/cxgb4/mem.c
32224+++ b/drivers/infiniband/hw/cxgb4/mem.c
32225@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32226 int err;
32227 struct fw_ri_tpte tpt;
32228 u32 stag_idx;
32229- static atomic_t key;
32230+ static atomic_unchecked_t key;
32231
32232 if (c4iw_fatal_error(rdev))
32233 return -EIO;
32234@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32235 &rdev->resource.tpt_fifo_lock);
32236 if (!stag_idx)
32237 return -ENOMEM;
32238- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
32239+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
32240 }
32241 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
32242 __func__, stag_state, type, pdid, stag_idx);
32243diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
32244index 79b3dbc..96e5fcc 100644
32245--- a/drivers/infiniband/hw/ipath/ipath_rc.c
32246+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
32247@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32248 struct ib_atomic_eth *ateth;
32249 struct ipath_ack_entry *e;
32250 u64 vaddr;
32251- atomic64_t *maddr;
32252+ atomic64_unchecked_t *maddr;
32253 u64 sdata;
32254 u32 rkey;
32255 u8 next;
32256@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32257 IB_ACCESS_REMOTE_ATOMIC)))
32258 goto nack_acc_unlck;
32259 /* Perform atomic OP and save result. */
32260- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32261+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32262 sdata = be64_to_cpu(ateth->swap_data);
32263 e = &qp->s_ack_queue[qp->r_head_ack_queue];
32264 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
32265- (u64) atomic64_add_return(sdata, maddr) - sdata :
32266+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32267 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32268 be64_to_cpu(ateth->compare_data),
32269 sdata);
32270diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
32271index 1f95bba..9530f87 100644
32272--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
32273+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
32274@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
32275 unsigned long flags;
32276 struct ib_wc wc;
32277 u64 sdata;
32278- atomic64_t *maddr;
32279+ atomic64_unchecked_t *maddr;
32280 enum ib_wc_status send_status;
32281
32282 /*
32283@@ -382,11 +382,11 @@ again:
32284 IB_ACCESS_REMOTE_ATOMIC)))
32285 goto acc_err;
32286 /* Perform atomic OP and save result. */
32287- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32288+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32289 sdata = wqe->wr.wr.atomic.compare_add;
32290 *(u64 *) sqp->s_sge.sge.vaddr =
32291 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
32292- (u64) atomic64_add_return(sdata, maddr) - sdata :
32293+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32294 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32295 sdata, wqe->wr.wr.atomic.swap);
32296 goto send_comp;
32297diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
32298index 7140199..da60063 100644
32299--- a/drivers/infiniband/hw/nes/nes.c
32300+++ b/drivers/infiniband/hw/nes/nes.c
32301@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
32302 LIST_HEAD(nes_adapter_list);
32303 static LIST_HEAD(nes_dev_list);
32304
32305-atomic_t qps_destroyed;
32306+atomic_unchecked_t qps_destroyed;
32307
32308 static unsigned int ee_flsh_adapter;
32309 static unsigned int sysfs_nonidx_addr;
32310@@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
32311 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
32312 struct nes_adapter *nesadapter = nesdev->nesadapter;
32313
32314- atomic_inc(&qps_destroyed);
32315+ atomic_inc_unchecked(&qps_destroyed);
32316
32317 /* Free the control structures */
32318
32319diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
32320index c438e46..ca30356 100644
32321--- a/drivers/infiniband/hw/nes/nes.h
32322+++ b/drivers/infiniband/hw/nes/nes.h
32323@@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
32324 extern unsigned int wqm_quanta;
32325 extern struct list_head nes_adapter_list;
32326
32327-extern atomic_t cm_connects;
32328-extern atomic_t cm_accepts;
32329-extern atomic_t cm_disconnects;
32330-extern atomic_t cm_closes;
32331-extern atomic_t cm_connecteds;
32332-extern atomic_t cm_connect_reqs;
32333-extern atomic_t cm_rejects;
32334-extern atomic_t mod_qp_timouts;
32335-extern atomic_t qps_created;
32336-extern atomic_t qps_destroyed;
32337-extern atomic_t sw_qps_destroyed;
32338+extern atomic_unchecked_t cm_connects;
32339+extern atomic_unchecked_t cm_accepts;
32340+extern atomic_unchecked_t cm_disconnects;
32341+extern atomic_unchecked_t cm_closes;
32342+extern atomic_unchecked_t cm_connecteds;
32343+extern atomic_unchecked_t cm_connect_reqs;
32344+extern atomic_unchecked_t cm_rejects;
32345+extern atomic_unchecked_t mod_qp_timouts;
32346+extern atomic_unchecked_t qps_created;
32347+extern atomic_unchecked_t qps_destroyed;
32348+extern atomic_unchecked_t sw_qps_destroyed;
32349 extern u32 mh_detected;
32350 extern u32 mh_pauses_sent;
32351 extern u32 cm_packets_sent;
32352@@ -197,16 +197,16 @@ extern u32 cm_packets_created;
32353 extern u32 cm_packets_received;
32354 extern u32 cm_packets_dropped;
32355 extern u32 cm_packets_retrans;
32356-extern atomic_t cm_listens_created;
32357-extern atomic_t cm_listens_destroyed;
32358+extern atomic_unchecked_t cm_listens_created;
32359+extern atomic_unchecked_t cm_listens_destroyed;
32360 extern u32 cm_backlog_drops;
32361-extern atomic_t cm_loopbacks;
32362-extern atomic_t cm_nodes_created;
32363-extern atomic_t cm_nodes_destroyed;
32364-extern atomic_t cm_accel_dropped_pkts;
32365-extern atomic_t cm_resets_recvd;
32366-extern atomic_t pau_qps_created;
32367-extern atomic_t pau_qps_destroyed;
32368+extern atomic_unchecked_t cm_loopbacks;
32369+extern atomic_unchecked_t cm_nodes_created;
32370+extern atomic_unchecked_t cm_nodes_destroyed;
32371+extern atomic_unchecked_t cm_accel_dropped_pkts;
32372+extern atomic_unchecked_t cm_resets_recvd;
32373+extern atomic_unchecked_t pau_qps_created;
32374+extern atomic_unchecked_t pau_qps_destroyed;
32375
32376 extern u32 int_mod_timer_init;
32377 extern u32 int_mod_cq_depth_256;
32378diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
32379index 71edfbb..15b62ae 100644
32380--- a/drivers/infiniband/hw/nes/nes_cm.c
32381+++ b/drivers/infiniband/hw/nes/nes_cm.c
32382@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
32383 u32 cm_packets_retrans;
32384 u32 cm_packets_created;
32385 u32 cm_packets_received;
32386-atomic_t cm_listens_created;
32387-atomic_t cm_listens_destroyed;
32388+atomic_unchecked_t cm_listens_created;
32389+atomic_unchecked_t cm_listens_destroyed;
32390 u32 cm_backlog_drops;
32391-atomic_t cm_loopbacks;
32392-atomic_t cm_nodes_created;
32393-atomic_t cm_nodes_destroyed;
32394-atomic_t cm_accel_dropped_pkts;
32395-atomic_t cm_resets_recvd;
32396+atomic_unchecked_t cm_loopbacks;
32397+atomic_unchecked_t cm_nodes_created;
32398+atomic_unchecked_t cm_nodes_destroyed;
32399+atomic_unchecked_t cm_accel_dropped_pkts;
32400+atomic_unchecked_t cm_resets_recvd;
32401
32402 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
32403 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
32404@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
32405
32406 static struct nes_cm_core *g_cm_core;
32407
32408-atomic_t cm_connects;
32409-atomic_t cm_accepts;
32410-atomic_t cm_disconnects;
32411-atomic_t cm_closes;
32412-atomic_t cm_connecteds;
32413-atomic_t cm_connect_reqs;
32414-atomic_t cm_rejects;
32415+atomic_unchecked_t cm_connects;
32416+atomic_unchecked_t cm_accepts;
32417+atomic_unchecked_t cm_disconnects;
32418+atomic_unchecked_t cm_closes;
32419+atomic_unchecked_t cm_connecteds;
32420+atomic_unchecked_t cm_connect_reqs;
32421+atomic_unchecked_t cm_rejects;
32422
32423 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
32424 {
32425@@ -1279,7 +1279,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
32426 kfree(listener);
32427 listener = NULL;
32428 ret = 0;
32429- atomic_inc(&cm_listens_destroyed);
32430+ atomic_inc_unchecked(&cm_listens_destroyed);
32431 } else {
32432 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
32433 }
32434@@ -1482,7 +1482,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
32435 cm_node->rem_mac);
32436
32437 add_hte_node(cm_core, cm_node);
32438- atomic_inc(&cm_nodes_created);
32439+ atomic_inc_unchecked(&cm_nodes_created);
32440
32441 return cm_node;
32442 }
32443@@ -1540,7 +1540,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
32444 }
32445
32446 atomic_dec(&cm_core->node_cnt);
32447- atomic_inc(&cm_nodes_destroyed);
32448+ atomic_inc_unchecked(&cm_nodes_destroyed);
32449 nesqp = cm_node->nesqp;
32450 if (nesqp) {
32451 nesqp->cm_node = NULL;
32452@@ -1604,7 +1604,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
32453
32454 static void drop_packet(struct sk_buff *skb)
32455 {
32456- atomic_inc(&cm_accel_dropped_pkts);
32457+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
32458 dev_kfree_skb_any(skb);
32459 }
32460
32461@@ -1667,7 +1667,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
32462 {
32463
32464 int reset = 0; /* whether to send reset in case of err.. */
32465- atomic_inc(&cm_resets_recvd);
32466+ atomic_inc_unchecked(&cm_resets_recvd);
32467 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
32468 " refcnt=%d\n", cm_node, cm_node->state,
32469 atomic_read(&cm_node->ref_count));
32470@@ -2308,7 +2308,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
32471 rem_ref_cm_node(cm_node->cm_core, cm_node);
32472 return NULL;
32473 }
32474- atomic_inc(&cm_loopbacks);
32475+ atomic_inc_unchecked(&cm_loopbacks);
32476 loopbackremotenode->loopbackpartner = cm_node;
32477 loopbackremotenode->tcp_cntxt.rcv_wscale =
32478 NES_CM_DEFAULT_RCV_WND_SCALE;
32479@@ -2583,7 +2583,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
32480 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
32481 else {
32482 rem_ref_cm_node(cm_core, cm_node);
32483- atomic_inc(&cm_accel_dropped_pkts);
32484+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
32485 dev_kfree_skb_any(skb);
32486 }
32487 break;
32488@@ -2890,7 +2890,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32489
32490 if ((cm_id) && (cm_id->event_handler)) {
32491 if (issue_disconn) {
32492- atomic_inc(&cm_disconnects);
32493+ atomic_inc_unchecked(&cm_disconnects);
32494 cm_event.event = IW_CM_EVENT_DISCONNECT;
32495 cm_event.status = disconn_status;
32496 cm_event.local_addr = cm_id->local_addr;
32497@@ -2912,7 +2912,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32498 }
32499
32500 if (issue_close) {
32501- atomic_inc(&cm_closes);
32502+ atomic_inc_unchecked(&cm_closes);
32503 nes_disconnect(nesqp, 1);
32504
32505 cm_id->provider_data = nesqp;
32506@@ -3048,7 +3048,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32507
32508 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
32509 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
32510- atomic_inc(&cm_accepts);
32511+ atomic_inc_unchecked(&cm_accepts);
32512
32513 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
32514 netdev_refcnt_read(nesvnic->netdev));
32515@@ -3250,7 +3250,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
32516 struct nes_cm_core *cm_core;
32517 u8 *start_buff;
32518
32519- atomic_inc(&cm_rejects);
32520+ atomic_inc_unchecked(&cm_rejects);
32521 cm_node = (struct nes_cm_node *)cm_id->provider_data;
32522 loopback = cm_node->loopbackpartner;
32523 cm_core = cm_node->cm_core;
32524@@ -3310,7 +3310,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32525 ntohl(cm_id->local_addr.sin_addr.s_addr),
32526 ntohs(cm_id->local_addr.sin_port));
32527
32528- atomic_inc(&cm_connects);
32529+ atomic_inc_unchecked(&cm_connects);
32530 nesqp->active_conn = 1;
32531
32532 /* cache the cm_id in the qp */
32533@@ -3416,7 +3416,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
32534 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
32535 return err;
32536 }
32537- atomic_inc(&cm_listens_created);
32538+ atomic_inc_unchecked(&cm_listens_created);
32539 }
32540
32541 cm_id->add_ref(cm_id);
32542@@ -3517,7 +3517,7 @@ static void cm_event_connected(struct nes_cm_event *event)
32543
32544 if (nesqp->destroyed)
32545 return;
32546- atomic_inc(&cm_connecteds);
32547+ atomic_inc_unchecked(&cm_connecteds);
32548 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
32549 " local port 0x%04X. jiffies = %lu.\n",
32550 nesqp->hwqp.qp_id,
32551@@ -3704,7 +3704,7 @@ static void cm_event_reset(struct nes_cm_event *event)
32552
32553 cm_id->add_ref(cm_id);
32554 ret = cm_id->event_handler(cm_id, &cm_event);
32555- atomic_inc(&cm_closes);
32556+ atomic_inc_unchecked(&cm_closes);
32557 cm_event.event = IW_CM_EVENT_CLOSE;
32558 cm_event.status = 0;
32559 cm_event.provider_data = cm_id->provider_data;
32560@@ -3740,7 +3740,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
32561 return;
32562 cm_id = cm_node->cm_id;
32563
32564- atomic_inc(&cm_connect_reqs);
32565+ atomic_inc_unchecked(&cm_connect_reqs);
32566 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32567 cm_node, cm_id, jiffies);
32568
32569@@ -3780,7 +3780,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
32570 return;
32571 cm_id = cm_node->cm_id;
32572
32573- atomic_inc(&cm_connect_reqs);
32574+ atomic_inc_unchecked(&cm_connect_reqs);
32575 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32576 cm_node, cm_id, jiffies);
32577
32578diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
32579index 3ba7be3..c81f6ff 100644
32580--- a/drivers/infiniband/hw/nes/nes_mgt.c
32581+++ b/drivers/infiniband/hw/nes/nes_mgt.c
32582@@ -40,8 +40,8 @@
32583 #include "nes.h"
32584 #include "nes_mgt.h"
32585
32586-atomic_t pau_qps_created;
32587-atomic_t pau_qps_destroyed;
32588+atomic_unchecked_t pau_qps_created;
32589+atomic_unchecked_t pau_qps_destroyed;
32590
32591 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
32592 {
32593@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
32594 {
32595 struct sk_buff *skb;
32596 unsigned long flags;
32597- atomic_inc(&pau_qps_destroyed);
32598+ atomic_inc_unchecked(&pau_qps_destroyed);
32599
32600 /* Free packets that have not yet been forwarded */
32601 /* Lock is acquired by skb_dequeue when removing the skb */
32602@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
32603 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
32604 skb_queue_head_init(&nesqp->pau_list);
32605 spin_lock_init(&nesqp->pau_lock);
32606- atomic_inc(&pau_qps_created);
32607+ atomic_inc_unchecked(&pau_qps_created);
32608 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
32609 }
32610
32611diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
32612index f3a3ecf..57d311d 100644
32613--- a/drivers/infiniband/hw/nes/nes_nic.c
32614+++ b/drivers/infiniband/hw/nes/nes_nic.c
32615@@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
32616 target_stat_values[++index] = mh_detected;
32617 target_stat_values[++index] = mh_pauses_sent;
32618 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
32619- target_stat_values[++index] = atomic_read(&cm_connects);
32620- target_stat_values[++index] = atomic_read(&cm_accepts);
32621- target_stat_values[++index] = atomic_read(&cm_disconnects);
32622- target_stat_values[++index] = atomic_read(&cm_connecteds);
32623- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
32624- target_stat_values[++index] = atomic_read(&cm_rejects);
32625- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
32626- target_stat_values[++index] = atomic_read(&qps_created);
32627- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
32628- target_stat_values[++index] = atomic_read(&qps_destroyed);
32629- target_stat_values[++index] = atomic_read(&cm_closes);
32630+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
32631+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
32632+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
32633+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
32634+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
32635+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
32636+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
32637+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
32638+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
32639+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
32640+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
32641 target_stat_values[++index] = cm_packets_sent;
32642 target_stat_values[++index] = cm_packets_bounced;
32643 target_stat_values[++index] = cm_packets_created;
32644 target_stat_values[++index] = cm_packets_received;
32645 target_stat_values[++index] = cm_packets_dropped;
32646 target_stat_values[++index] = cm_packets_retrans;
32647- target_stat_values[++index] = atomic_read(&cm_listens_created);
32648- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
32649+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
32650+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
32651 target_stat_values[++index] = cm_backlog_drops;
32652- target_stat_values[++index] = atomic_read(&cm_loopbacks);
32653- target_stat_values[++index] = atomic_read(&cm_nodes_created);
32654- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
32655- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
32656- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
32657+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
32658+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
32659+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
32660+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
32661+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
32662 target_stat_values[++index] = nesadapter->free_4kpbl;
32663 target_stat_values[++index] = nesadapter->free_256pbl;
32664 target_stat_values[++index] = int_mod_timer_init;
32665 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
32666 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
32667 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
32668- target_stat_values[++index] = atomic_read(&pau_qps_created);
32669- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
32670+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
32671+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
32672 }
32673
32674 /**
32675diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
32676index 8b8812d..a5e1133 100644
32677--- a/drivers/infiniband/hw/nes/nes_verbs.c
32678+++ b/drivers/infiniband/hw/nes/nes_verbs.c
32679@@ -46,9 +46,9 @@
32680
32681 #include <rdma/ib_umem.h>
32682
32683-atomic_t mod_qp_timouts;
32684-atomic_t qps_created;
32685-atomic_t sw_qps_destroyed;
32686+atomic_unchecked_t mod_qp_timouts;
32687+atomic_unchecked_t qps_created;
32688+atomic_unchecked_t sw_qps_destroyed;
32689
32690 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
32691
32692@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
32693 if (init_attr->create_flags)
32694 return ERR_PTR(-EINVAL);
32695
32696- atomic_inc(&qps_created);
32697+ atomic_inc_unchecked(&qps_created);
32698 switch (init_attr->qp_type) {
32699 case IB_QPT_RC:
32700 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
32701@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
32702 struct iw_cm_event cm_event;
32703 int ret = 0;
32704
32705- atomic_inc(&sw_qps_destroyed);
32706+ atomic_inc_unchecked(&sw_qps_destroyed);
32707 nesqp->destroyed = 1;
32708
32709 /* Blow away the connection if it exists. */
32710diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
32711index 6b811e3..f8acf88 100644
32712--- a/drivers/infiniband/hw/qib/qib.h
32713+++ b/drivers/infiniband/hw/qib/qib.h
32714@@ -51,6 +51,7 @@
32715 #include <linux/completion.h>
32716 #include <linux/kref.h>
32717 #include <linux/sched.h>
32718+#include <linux/slab.h>
32719
32720 #include "qib_common.h"
32721 #include "qib_verbs.h"
32722diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
32723index da739d9..da1c7f4 100644
32724--- a/drivers/input/gameport/gameport.c
32725+++ b/drivers/input/gameport/gameport.c
32726@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
32727 */
32728 static void gameport_init_port(struct gameport *gameport)
32729 {
32730- static atomic_t gameport_no = ATOMIC_INIT(0);
32731+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
32732
32733 __module_get(THIS_MODULE);
32734
32735 mutex_init(&gameport->drv_mutex);
32736 device_initialize(&gameport->dev);
32737 dev_set_name(&gameport->dev, "gameport%lu",
32738- (unsigned long)atomic_inc_return(&gameport_no) - 1);
32739+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
32740 gameport->dev.bus = &gameport_bus;
32741 gameport->dev.release = gameport_release_port;
32742 if (gameport->parent)
32743diff --git a/drivers/input/input.c b/drivers/input/input.c
32744index 8921c61..f5cd63d 100644
32745--- a/drivers/input/input.c
32746+++ b/drivers/input/input.c
32747@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
32748 */
32749 int input_register_device(struct input_dev *dev)
32750 {
32751- static atomic_t input_no = ATOMIC_INIT(0);
32752+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
32753 struct input_handler *handler;
32754 const char *path;
32755 int error;
32756@@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
32757 dev->setkeycode = input_default_setkeycode;
32758
32759 dev_set_name(&dev->dev, "input%ld",
32760- (unsigned long) atomic_inc_return(&input_no) - 1);
32761+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
32762
32763 error = device_add(&dev->dev);
32764 if (error)
32765diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
32766index b8d8611..7a4a04b 100644
32767--- a/drivers/input/joystick/sidewinder.c
32768+++ b/drivers/input/joystick/sidewinder.c
32769@@ -30,6 +30,7 @@
32770 #include <linux/kernel.h>
32771 #include <linux/module.h>
32772 #include <linux/slab.h>
32773+#include <linux/sched.h>
32774 #include <linux/init.h>
32775 #include <linux/input.h>
32776 #include <linux/gameport.h>
32777diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
32778index 42f7b25..09fcf46 100644
32779--- a/drivers/input/joystick/xpad.c
32780+++ b/drivers/input/joystick/xpad.c
32781@@ -714,7 +714,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
32782
32783 static int xpad_led_probe(struct usb_xpad *xpad)
32784 {
32785- static atomic_t led_seq = ATOMIC_INIT(0);
32786+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
32787 long led_no;
32788 struct xpad_led *led;
32789 struct led_classdev *led_cdev;
32790@@ -727,7 +727,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
32791 if (!led)
32792 return -ENOMEM;
32793
32794- led_no = (long)atomic_inc_return(&led_seq) - 1;
32795+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
32796
32797 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
32798 led->xpad = xpad;
32799diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
32800index 0110b5a..d3ad144 100644
32801--- a/drivers/input/mousedev.c
32802+++ b/drivers/input/mousedev.c
32803@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
32804
32805 spin_unlock_irq(&client->packet_lock);
32806
32807- if (copy_to_user(buffer, data, count))
32808+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
32809 return -EFAULT;
32810
32811 return count;
32812diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
32813index d0f7533..fb8215b 100644
32814--- a/drivers/input/serio/serio.c
32815+++ b/drivers/input/serio/serio.c
32816@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
32817 */
32818 static void serio_init_port(struct serio *serio)
32819 {
32820- static atomic_t serio_no = ATOMIC_INIT(0);
32821+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
32822
32823 __module_get(THIS_MODULE);
32824
32825@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
32826 mutex_init(&serio->drv_mutex);
32827 device_initialize(&serio->dev);
32828 dev_set_name(&serio->dev, "serio%ld",
32829- (long)atomic_inc_return(&serio_no) - 1);
32830+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
32831 serio->dev.bus = &serio_bus;
32832 serio->dev.release = serio_release_port;
32833 serio->dev.groups = serio_device_attr_groups;
32834diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
32835index b902794..fc7b85b 100644
32836--- a/drivers/isdn/capi/capi.c
32837+++ b/drivers/isdn/capi/capi.c
32838@@ -83,8 +83,8 @@ struct capiminor {
32839
32840 struct capi20_appl *ap;
32841 u32 ncci;
32842- atomic_t datahandle;
32843- atomic_t msgid;
32844+ atomic_unchecked_t datahandle;
32845+ atomic_unchecked_t msgid;
32846
32847 struct tty_port port;
32848 int ttyinstop;
32849@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
32850 capimsg_setu16(s, 2, mp->ap->applid);
32851 capimsg_setu8 (s, 4, CAPI_DATA_B3);
32852 capimsg_setu8 (s, 5, CAPI_RESP);
32853- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
32854+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
32855 capimsg_setu32(s, 8, mp->ncci);
32856 capimsg_setu16(s, 12, datahandle);
32857 }
32858@@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
32859 mp->outbytes -= len;
32860 spin_unlock_bh(&mp->outlock);
32861
32862- datahandle = atomic_inc_return(&mp->datahandle);
32863+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
32864 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
32865 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32866 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32867 capimsg_setu16(skb->data, 2, mp->ap->applid);
32868 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
32869 capimsg_setu8 (skb->data, 5, CAPI_REQ);
32870- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
32871+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
32872 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
32873 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
32874 capimsg_setu16(skb->data, 16, len); /* Data length */
32875diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
32876index 821f7ac..28d4030 100644
32877--- a/drivers/isdn/hardware/avm/b1.c
32878+++ b/drivers/isdn/hardware/avm/b1.c
32879@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
32880 }
32881 if (left) {
32882 if (t4file->user) {
32883- if (copy_from_user(buf, dp, left))
32884+ if (left > sizeof buf || copy_from_user(buf, dp, left))
32885 return -EFAULT;
32886 } else {
32887 memcpy(buf, dp, left);
32888@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
32889 }
32890 if (left) {
32891 if (config->user) {
32892- if (copy_from_user(buf, dp, left))
32893+ if (left > sizeof buf || copy_from_user(buf, dp, left))
32894 return -EFAULT;
32895 } else {
32896 memcpy(buf, dp, left);
32897diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
32898index dd6b53a..19d9ee6 100644
32899--- a/drivers/isdn/hardware/eicon/divasync.h
32900+++ b/drivers/isdn/hardware/eicon/divasync.h
32901@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
32902 } diva_didd_add_adapter_t;
32903 typedef struct _diva_didd_remove_adapter {
32904 IDI_CALL p_request;
32905-} diva_didd_remove_adapter_t;
32906+} __no_const diva_didd_remove_adapter_t;
32907 typedef struct _diva_didd_read_adapter_array {
32908 void *buffer;
32909 dword length;
32910diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
32911index d303e65..28bcb7b 100644
32912--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
32913+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
32914@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
32915 typedef struct _diva_os_idi_adapter_interface {
32916 diva_init_card_proc_t cleanup_adapter_proc;
32917 diva_cmd_card_proc_t cmd_proc;
32918-} diva_os_idi_adapter_interface_t;
32919+} __no_const diva_os_idi_adapter_interface_t;
32920
32921 typedef struct _diva_os_xdi_adapter {
32922 struct list_head link;
32923diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
32924index e74df7c..03a03ba 100644
32925--- a/drivers/isdn/icn/icn.c
32926+++ b/drivers/isdn/icn/icn.c
32927@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
32928 if (count > len)
32929 count = len;
32930 if (user) {
32931- if (copy_from_user(msg, buf, count))
32932+ if (count > sizeof msg || copy_from_user(msg, buf, count))
32933 return -EFAULT;
32934 } else
32935 memcpy(msg, buf, count);
32936diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
32937index 8bc4915..4cc6a2e 100644
32938--- a/drivers/leds/leds-mc13783.c
32939+++ b/drivers/leds/leds-mc13783.c
32940@@ -280,7 +280,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
32941 return -EINVAL;
32942 }
32943
32944- led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
32945+ led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
32946 if (led == NULL) {
32947 dev_err(&pdev->dev, "failed to alloc memory\n");
32948 return -ENOMEM;
32949diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
32950index b5fdcb7..5b6c59f 100644
32951--- a/drivers/lguest/core.c
32952+++ b/drivers/lguest/core.c
32953@@ -92,9 +92,17 @@ static __init int map_switcher(void)
32954 * it's worked so far. The end address needs +1 because __get_vm_area
32955 * allocates an extra guard page, so we need space for that.
32956 */
32957+
32958+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32959+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32960+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
32961+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32962+#else
32963 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32964 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
32965 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32966+#endif
32967+
32968 if (!switcher_vma) {
32969 err = -ENOMEM;
32970 printk("lguest: could not map switcher pages high\n");
32971@@ -119,7 +127,7 @@ static __init int map_switcher(void)
32972 * Now the Switcher is mapped at the right address, we can't fail!
32973 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
32974 */
32975- memcpy(switcher_vma->addr, start_switcher_text,
32976+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
32977 end_switcher_text - start_switcher_text);
32978
32979 printk(KERN_INFO "lguest: mapped switcher at %p\n",
32980diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
32981index 39809035..ce25c5e 100644
32982--- a/drivers/lguest/x86/core.c
32983+++ b/drivers/lguest/x86/core.c
32984@@ -59,7 +59,7 @@ static struct {
32985 /* Offset from where switcher.S was compiled to where we've copied it */
32986 static unsigned long switcher_offset(void)
32987 {
32988- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
32989+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
32990 }
32991
32992 /* This cpu's struct lguest_pages. */
32993@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
32994 * These copies are pretty cheap, so we do them unconditionally: */
32995 /* Save the current Host top-level page directory.
32996 */
32997+
32998+#ifdef CONFIG_PAX_PER_CPU_PGD
32999+ pages->state.host_cr3 = read_cr3();
33000+#else
33001 pages->state.host_cr3 = __pa(current->mm->pgd);
33002+#endif
33003+
33004 /*
33005 * Set up the Guest's page tables to see this CPU's pages (and no
33006 * other CPU's pages).
33007@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
33008 * compiled-in switcher code and the high-mapped copy we just made.
33009 */
33010 for (i = 0; i < IDT_ENTRIES; i++)
33011- default_idt_entries[i] += switcher_offset();
33012+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
33013
33014 /*
33015 * Set up the Switcher's per-cpu areas.
33016@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
33017 * it will be undisturbed when we switch. To change %cs and jump we
33018 * need this structure to feed to Intel's "lcall" instruction.
33019 */
33020- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
33021+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
33022 lguest_entry.segment = LGUEST_CS;
33023
33024 /*
33025diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
33026index 40634b0..4f5855e 100644
33027--- a/drivers/lguest/x86/switcher_32.S
33028+++ b/drivers/lguest/x86/switcher_32.S
33029@@ -87,6 +87,7 @@
33030 #include <asm/page.h>
33031 #include <asm/segment.h>
33032 #include <asm/lguest.h>
33033+#include <asm/processor-flags.h>
33034
33035 // We mark the start of the code to copy
33036 // It's placed in .text tho it's never run here
33037@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
33038 // Changes type when we load it: damn Intel!
33039 // For after we switch over our page tables
33040 // That entry will be read-only: we'd crash.
33041+
33042+#ifdef CONFIG_PAX_KERNEXEC
33043+ mov %cr0, %edx
33044+ xor $X86_CR0_WP, %edx
33045+ mov %edx, %cr0
33046+#endif
33047+
33048 movl $(GDT_ENTRY_TSS*8), %edx
33049 ltr %dx
33050
33051@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
33052 // Let's clear it again for our return.
33053 // The GDT descriptor of the Host
33054 // Points to the table after two "size" bytes
33055- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
33056+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
33057 // Clear "used" from type field (byte 5, bit 2)
33058- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
33059+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
33060+
33061+#ifdef CONFIG_PAX_KERNEXEC
33062+ mov %cr0, %eax
33063+ xor $X86_CR0_WP, %eax
33064+ mov %eax, %cr0
33065+#endif
33066
33067 // Once our page table's switched, the Guest is live!
33068 // The Host fades as we run this final step.
33069@@ -295,13 +309,12 @@ deliver_to_host:
33070 // I consulted gcc, and it gave
33071 // These instructions, which I gladly credit:
33072 leal (%edx,%ebx,8), %eax
33073- movzwl (%eax),%edx
33074- movl 4(%eax), %eax
33075- xorw %ax, %ax
33076- orl %eax, %edx
33077+ movl 4(%eax), %edx
33078+ movw (%eax), %dx
33079 // Now the address of the handler's in %edx
33080 // We call it now: its "iret" drops us home.
33081- jmp *%edx
33082+ ljmp $__KERNEL_CS, $1f
33083+1: jmp *%edx
33084
33085 // Every interrupt can come to us here
33086 // But we must truly tell each apart.
33087diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
33088index 20e5c2c..9e849a9 100644
33089--- a/drivers/macintosh/macio_asic.c
33090+++ b/drivers/macintosh/macio_asic.c
33091@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
33092 * MacIO is matched against any Apple ID, it's probe() function
33093 * will then decide wether it applies or not
33094 */
33095-static const struct pci_device_id __devinitdata pci_ids [] = { {
33096+static const struct pci_device_id __devinitconst pci_ids [] = { {
33097 .vendor = PCI_VENDOR_ID_APPLE,
33098 .device = PCI_ANY_ID,
33099 .subvendor = PCI_ANY_ID,
33100diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
33101index 17e2b47..bcbeec4 100644
33102--- a/drivers/md/bitmap.c
33103+++ b/drivers/md/bitmap.c
33104@@ -1823,7 +1823,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
33105 chunk_kb ? "KB" : "B");
33106 if (bitmap->file) {
33107 seq_printf(seq, ", file: ");
33108- seq_path(seq, &bitmap->file->f_path, " \t\n");
33109+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
33110 }
33111
33112 seq_printf(seq, "\n");
33113diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
33114index a1a3e6d..1918bfc 100644
33115--- a/drivers/md/dm-ioctl.c
33116+++ b/drivers/md/dm-ioctl.c
33117@@ -1590,7 +1590,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
33118 cmd == DM_LIST_VERSIONS_CMD)
33119 return 0;
33120
33121- if ((cmd == DM_DEV_CREATE_CMD)) {
33122+ if (cmd == DM_DEV_CREATE_CMD) {
33123 if (!*param->name) {
33124 DMWARN("name not supplied when creating device");
33125 return -EINVAL;
33126diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
33127index d039de8..0cf5b87 100644
33128--- a/drivers/md/dm-raid1.c
33129+++ b/drivers/md/dm-raid1.c
33130@@ -40,7 +40,7 @@ enum dm_raid1_error {
33131
33132 struct mirror {
33133 struct mirror_set *ms;
33134- atomic_t error_count;
33135+ atomic_unchecked_t error_count;
33136 unsigned long error_type;
33137 struct dm_dev *dev;
33138 sector_t offset;
33139@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
33140 struct mirror *m;
33141
33142 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
33143- if (!atomic_read(&m->error_count))
33144+ if (!atomic_read_unchecked(&m->error_count))
33145 return m;
33146
33147 return NULL;
33148@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
33149 * simple way to tell if a device has encountered
33150 * errors.
33151 */
33152- atomic_inc(&m->error_count);
33153+ atomic_inc_unchecked(&m->error_count);
33154
33155 if (test_and_set_bit(error_type, &m->error_type))
33156 return;
33157@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
33158 struct mirror *m = get_default_mirror(ms);
33159
33160 do {
33161- if (likely(!atomic_read(&m->error_count)))
33162+ if (likely(!atomic_read_unchecked(&m->error_count)))
33163 return m;
33164
33165 if (m-- == ms->mirror)
33166@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
33167 {
33168 struct mirror *default_mirror = get_default_mirror(m->ms);
33169
33170- return !atomic_read(&default_mirror->error_count);
33171+ return !atomic_read_unchecked(&default_mirror->error_count);
33172 }
33173
33174 static int mirror_available(struct mirror_set *ms, struct bio *bio)
33175@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
33176 */
33177 if (likely(region_in_sync(ms, region, 1)))
33178 m = choose_mirror(ms, bio->bi_sector);
33179- else if (m && atomic_read(&m->error_count))
33180+ else if (m && atomic_read_unchecked(&m->error_count))
33181 m = NULL;
33182
33183 if (likely(m))
33184@@ -938,7 +938,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
33185 }
33186
33187 ms->mirror[mirror].ms = ms;
33188- atomic_set(&(ms->mirror[mirror].error_count), 0);
33189+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
33190 ms->mirror[mirror].error_type = 0;
33191 ms->mirror[mirror].offset = offset;
33192
33193@@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
33194 */
33195 static char device_status_char(struct mirror *m)
33196 {
33197- if (!atomic_read(&(m->error_count)))
33198+ if (!atomic_read_unchecked(&(m->error_count)))
33199 return 'A';
33200
33201 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
33202diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
33203index 35c94ff..20d4c17 100644
33204--- a/drivers/md/dm-stripe.c
33205+++ b/drivers/md/dm-stripe.c
33206@@ -20,7 +20,7 @@ struct stripe {
33207 struct dm_dev *dev;
33208 sector_t physical_start;
33209
33210- atomic_t error_count;
33211+ atomic_unchecked_t error_count;
33212 };
33213
33214 struct stripe_c {
33215@@ -193,7 +193,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
33216 kfree(sc);
33217 return r;
33218 }
33219- atomic_set(&(sc->stripe[i].error_count), 0);
33220+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
33221 }
33222
33223 ti->private = sc;
33224@@ -315,7 +315,7 @@ static int stripe_status(struct dm_target *ti,
33225 DMEMIT("%d ", sc->stripes);
33226 for (i = 0; i < sc->stripes; i++) {
33227 DMEMIT("%s ", sc->stripe[i].dev->name);
33228- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
33229+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
33230 'D' : 'A';
33231 }
33232 buffer[i] = '\0';
33233@@ -362,8 +362,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
33234 */
33235 for (i = 0; i < sc->stripes; i++)
33236 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
33237- atomic_inc(&(sc->stripe[i].error_count));
33238- if (atomic_read(&(sc->stripe[i].error_count)) <
33239+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
33240+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
33241 DM_IO_ERROR_THRESHOLD)
33242 schedule_work(&sc->trigger_event);
33243 }
33244diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
33245index 2e227fb..44ead1f 100644
33246--- a/drivers/md/dm-table.c
33247+++ b/drivers/md/dm-table.c
33248@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
33249 if (!dev_size)
33250 return 0;
33251
33252- if ((start >= dev_size) || (start + len > dev_size)) {
33253+ if ((start >= dev_size) || (len > dev_size - start)) {
33254 DMWARN("%s: %s too small for target: "
33255 "start=%llu, len=%llu, dev_size=%llu",
33256 dm_device_name(ti->table->md), bdevname(bdev, b),
33257diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
33258index 737d388..811ad5a 100644
33259--- a/drivers/md/dm-thin-metadata.c
33260+++ b/drivers/md/dm-thin-metadata.c
33261@@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33262
33263 pmd->info.tm = tm;
33264 pmd->info.levels = 2;
33265- pmd->info.value_type.context = pmd->data_sm;
33266+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33267 pmd->info.value_type.size = sizeof(__le64);
33268 pmd->info.value_type.inc = data_block_inc;
33269 pmd->info.value_type.dec = data_block_dec;
33270@@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33271
33272 pmd->bl_info.tm = tm;
33273 pmd->bl_info.levels = 1;
33274- pmd->bl_info.value_type.context = pmd->data_sm;
33275+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33276 pmd->bl_info.value_type.size = sizeof(__le64);
33277 pmd->bl_info.value_type.inc = data_block_inc;
33278 pmd->bl_info.value_type.dec = data_block_dec;
33279diff --git a/drivers/md/dm.c b/drivers/md/dm.c
33280index e24143c..ce2f21a1 100644
33281--- a/drivers/md/dm.c
33282+++ b/drivers/md/dm.c
33283@@ -176,9 +176,9 @@ struct mapped_device {
33284 /*
33285 * Event handling.
33286 */
33287- atomic_t event_nr;
33288+ atomic_unchecked_t event_nr;
33289 wait_queue_head_t eventq;
33290- atomic_t uevent_seq;
33291+ atomic_unchecked_t uevent_seq;
33292 struct list_head uevent_list;
33293 spinlock_t uevent_lock; /* Protect access to uevent_list */
33294
33295@@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
33296 rwlock_init(&md->map_lock);
33297 atomic_set(&md->holders, 1);
33298 atomic_set(&md->open_count, 0);
33299- atomic_set(&md->event_nr, 0);
33300- atomic_set(&md->uevent_seq, 0);
33301+ atomic_set_unchecked(&md->event_nr, 0);
33302+ atomic_set_unchecked(&md->uevent_seq, 0);
33303 INIT_LIST_HEAD(&md->uevent_list);
33304 spin_lock_init(&md->uevent_lock);
33305
33306@@ -1980,7 +1980,7 @@ static void event_callback(void *context)
33307
33308 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
33309
33310- atomic_inc(&md->event_nr);
33311+ atomic_inc_unchecked(&md->event_nr);
33312 wake_up(&md->eventq);
33313 }
33314
33315@@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
33316
33317 uint32_t dm_next_uevent_seq(struct mapped_device *md)
33318 {
33319- return atomic_add_return(1, &md->uevent_seq);
33320+ return atomic_add_return_unchecked(1, &md->uevent_seq);
33321 }
33322
33323 uint32_t dm_get_event_nr(struct mapped_device *md)
33324 {
33325- return atomic_read(&md->event_nr);
33326+ return atomic_read_unchecked(&md->event_nr);
33327 }
33328
33329 int dm_wait_event(struct mapped_device *md, int event_nr)
33330 {
33331 return wait_event_interruptible(md->eventq,
33332- (event_nr != atomic_read(&md->event_nr)));
33333+ (event_nr != atomic_read_unchecked(&md->event_nr)));
33334 }
33335
33336 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
33337diff --git a/drivers/md/md.c b/drivers/md/md.c
33338index 2b30ffd..362b519 100644
33339--- a/drivers/md/md.c
33340+++ b/drivers/md/md.c
33341@@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
33342 * start build, activate spare
33343 */
33344 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
33345-static atomic_t md_event_count;
33346+static atomic_unchecked_t md_event_count;
33347 void md_new_event(struct mddev *mddev)
33348 {
33349- atomic_inc(&md_event_count);
33350+ atomic_inc_unchecked(&md_event_count);
33351 wake_up(&md_event_waiters);
33352 }
33353 EXPORT_SYMBOL_GPL(md_new_event);
33354@@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
33355 */
33356 static void md_new_event_inintr(struct mddev *mddev)
33357 {
33358- atomic_inc(&md_event_count);
33359+ atomic_inc_unchecked(&md_event_count);
33360 wake_up(&md_event_waiters);
33361 }
33362
33363@@ -1526,7 +1526,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
33364
33365 rdev->preferred_minor = 0xffff;
33366 rdev->data_offset = le64_to_cpu(sb->data_offset);
33367- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33368+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33369
33370 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
33371 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
33372@@ -1745,7 +1745,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
33373 else
33374 sb->resync_offset = cpu_to_le64(0);
33375
33376- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
33377+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
33378
33379 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
33380 sb->size = cpu_to_le64(mddev->dev_sectors);
33381@@ -2691,7 +2691,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
33382 static ssize_t
33383 errors_show(struct md_rdev *rdev, char *page)
33384 {
33385- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
33386+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
33387 }
33388
33389 static ssize_t
33390@@ -2700,7 +2700,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
33391 char *e;
33392 unsigned long n = simple_strtoul(buf, &e, 10);
33393 if (*buf && (*e == 0 || *e == '\n')) {
33394- atomic_set(&rdev->corrected_errors, n);
33395+ atomic_set_unchecked(&rdev->corrected_errors, n);
33396 return len;
33397 }
33398 return -EINVAL;
33399@@ -3086,8 +3086,8 @@ int md_rdev_init(struct md_rdev *rdev)
33400 rdev->sb_loaded = 0;
33401 rdev->bb_page = NULL;
33402 atomic_set(&rdev->nr_pending, 0);
33403- atomic_set(&rdev->read_errors, 0);
33404- atomic_set(&rdev->corrected_errors, 0);
33405+ atomic_set_unchecked(&rdev->read_errors, 0);
33406+ atomic_set_unchecked(&rdev->corrected_errors, 0);
33407
33408 INIT_LIST_HEAD(&rdev->same_set);
33409 init_waitqueue_head(&rdev->blocked_wait);
33410@@ -3744,8 +3744,8 @@ array_state_show(struct mddev *mddev, char *page)
33411 return sprintf(page, "%s\n", array_states[st]);
33412 }
33413
33414-static int do_md_stop(struct mddev * mddev, int ro, int is_open);
33415-static int md_set_readonly(struct mddev * mddev, int is_open);
33416+static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
33417+static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
33418 static int do_md_run(struct mddev * mddev);
33419 static int restart_array(struct mddev *mddev);
33420
33421@@ -3761,14 +3761,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
33422 /* stopping an active array */
33423 if (atomic_read(&mddev->openers) > 0)
33424 return -EBUSY;
33425- err = do_md_stop(mddev, 0, 0);
33426+ err = do_md_stop(mddev, 0, NULL);
33427 break;
33428 case inactive:
33429 /* stopping an active array */
33430 if (mddev->pers) {
33431 if (atomic_read(&mddev->openers) > 0)
33432 return -EBUSY;
33433- err = do_md_stop(mddev, 2, 0);
33434+ err = do_md_stop(mddev, 2, NULL);
33435 } else
33436 err = 0; /* already inactive */
33437 break;
33438@@ -3776,7 +3776,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
33439 break; /* not supported yet */
33440 case readonly:
33441 if (mddev->pers)
33442- err = md_set_readonly(mddev, 0);
33443+ err = md_set_readonly(mddev, NULL);
33444 else {
33445 mddev->ro = 1;
33446 set_disk_ro(mddev->gendisk, 1);
33447@@ -3786,7 +3786,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
33448 case read_auto:
33449 if (mddev->pers) {
33450 if (mddev->ro == 0)
33451- err = md_set_readonly(mddev, 0);
33452+ err = md_set_readonly(mddev, NULL);
33453 else if (mddev->ro == 1)
33454 err = restart_array(mddev);
33455 if (err == 0) {
33456@@ -5124,15 +5124,17 @@ void md_stop(struct mddev *mddev)
33457 }
33458 EXPORT_SYMBOL_GPL(md_stop);
33459
33460-static int md_set_readonly(struct mddev *mddev, int is_open)
33461+static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
33462 {
33463 int err = 0;
33464 mutex_lock(&mddev->open_mutex);
33465- if (atomic_read(&mddev->openers) > is_open) {
33466+ if (atomic_read(&mddev->openers) > !!bdev) {
33467 printk("md: %s still in use.\n",mdname(mddev));
33468 err = -EBUSY;
33469 goto out;
33470 }
33471+ if (bdev)
33472+ sync_blockdev(bdev);
33473 if (mddev->pers) {
33474 __md_stop_writes(mddev);
33475
33476@@ -5154,18 +5156,26 @@ out:
33477 * 0 - completely stop and dis-assemble array
33478 * 2 - stop but do not disassemble array
33479 */
33480-static int do_md_stop(struct mddev * mddev, int mode, int is_open)
33481+static int do_md_stop(struct mddev * mddev, int mode,
33482+ struct block_device *bdev)
33483 {
33484 struct gendisk *disk = mddev->gendisk;
33485 struct md_rdev *rdev;
33486
33487 mutex_lock(&mddev->open_mutex);
33488- if (atomic_read(&mddev->openers) > is_open ||
33489+ if (atomic_read(&mddev->openers) > !!bdev ||
33490 mddev->sysfs_active) {
33491 printk("md: %s still in use.\n",mdname(mddev));
33492 mutex_unlock(&mddev->open_mutex);
33493 return -EBUSY;
33494 }
33495+ if (bdev)
33496+ /* It is possible IO was issued on some other
33497+ * open file which was closed before we took ->open_mutex.
33498+ * As that was not the last close __blkdev_put will not
33499+ * have called sync_blockdev, so we must.
33500+ */
33501+ sync_blockdev(bdev);
33502
33503 if (mddev->pers) {
33504 if (mddev->ro)
33505@@ -5239,7 +5249,7 @@ static void autorun_array(struct mddev *mddev)
33506 err = do_md_run(mddev);
33507 if (err) {
33508 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
33509- do_md_stop(mddev, 0, 0);
33510+ do_md_stop(mddev, 0, NULL);
33511 }
33512 }
33513
33514@@ -6237,11 +6247,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
33515 goto done_unlock;
33516
33517 case STOP_ARRAY:
33518- err = do_md_stop(mddev, 0, 1);
33519+ err = do_md_stop(mddev, 0, bdev);
33520 goto done_unlock;
33521
33522 case STOP_ARRAY_RO:
33523- err = md_set_readonly(mddev, 1);
33524+ err = md_set_readonly(mddev, bdev);
33525 goto done_unlock;
33526
33527 case BLKROSET:
33528@@ -6738,7 +6748,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
33529
33530 spin_unlock(&pers_lock);
33531 seq_printf(seq, "\n");
33532- seq->poll_event = atomic_read(&md_event_count);
33533+ seq->poll_event = atomic_read_unchecked(&md_event_count);
33534 return 0;
33535 }
33536 if (v == (void*)2) {
33537@@ -6841,7 +6851,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
33538 return error;
33539
33540 seq = file->private_data;
33541- seq->poll_event = atomic_read(&md_event_count);
33542+ seq->poll_event = atomic_read_unchecked(&md_event_count);
33543 return error;
33544 }
33545
33546@@ -6855,7 +6865,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
33547 /* always allow read */
33548 mask = POLLIN | POLLRDNORM;
33549
33550- if (seq->poll_event != atomic_read(&md_event_count))
33551+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
33552 mask |= POLLERR | POLLPRI;
33553 return mask;
33554 }
33555@@ -6899,7 +6909,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
33556 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
33557 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
33558 (int)part_stat_read(&disk->part0, sectors[1]) -
33559- atomic_read(&disk->sync_io);
33560+ atomic_read_unchecked(&disk->sync_io);
33561 /* sync IO will cause sync_io to increase before the disk_stats
33562 * as sync_io is counted when a request starts, and
33563 * disk_stats is counted when it completes.
33564diff --git a/drivers/md/md.h b/drivers/md/md.h
33565index 1c2063c..9639970 100644
33566--- a/drivers/md/md.h
33567+++ b/drivers/md/md.h
33568@@ -93,13 +93,13 @@ struct md_rdev {
33569 * only maintained for arrays that
33570 * support hot removal
33571 */
33572- atomic_t read_errors; /* number of consecutive read errors that
33573+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
33574 * we have tried to ignore.
33575 */
33576 struct timespec last_read_error; /* monotonic time since our
33577 * last read error
33578 */
33579- atomic_t corrected_errors; /* number of corrected read errors,
33580+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
33581 * for reporting to userspace and storing
33582 * in superblock.
33583 */
33584@@ -429,7 +429,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
33585
33586 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
33587 {
33588- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33589+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33590 }
33591
33592 struct md_personality
33593diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
33594index fc90c11..c8cd9a9 100644
33595--- a/drivers/md/persistent-data/dm-space-map-checker.c
33596+++ b/drivers/md/persistent-data/dm-space-map-checker.c
33597@@ -167,7 +167,7 @@ static int ca_commit(struct count_array *old, struct count_array *new)
33598 /*----------------------------------------------------------------*/
33599
33600 struct sm_checker {
33601- struct dm_space_map sm;
33602+ dm_space_map_no_const sm;
33603
33604 struct count_array old_counts;
33605 struct count_array counts;
33606diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
33607index 3d0ed53..35dc592 100644
33608--- a/drivers/md/persistent-data/dm-space-map-disk.c
33609+++ b/drivers/md/persistent-data/dm-space-map-disk.c
33610@@ -23,7 +23,7 @@
33611 * Space map interface.
33612 */
33613 struct sm_disk {
33614- struct dm_space_map sm;
33615+ dm_space_map_no_const sm;
33616
33617 struct ll_disk ll;
33618 struct ll_disk old_ll;
33619diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
33620index e89ae5e..062e4c2 100644
33621--- a/drivers/md/persistent-data/dm-space-map-metadata.c
33622+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
33623@@ -43,7 +43,7 @@ struct block_op {
33624 };
33625
33626 struct sm_metadata {
33627- struct dm_space_map sm;
33628+ dm_space_map_no_const sm;
33629
33630 struct ll_disk ll;
33631 struct ll_disk old_ll;
33632diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
33633index 1cbfc6b..56e1dbb 100644
33634--- a/drivers/md/persistent-data/dm-space-map.h
33635+++ b/drivers/md/persistent-data/dm-space-map.h
33636@@ -60,6 +60,7 @@ struct dm_space_map {
33637 int (*root_size)(struct dm_space_map *sm, size_t *result);
33638 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
33639 };
33640+typedef struct dm_space_map __no_const dm_space_map_no_const;
33641
33642 /*----------------------------------------------------------------*/
33643
33644diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
33645index d1f74ab..d1b24fd 100644
33646--- a/drivers/md/raid1.c
33647+++ b/drivers/md/raid1.c
33648@@ -1688,7 +1688,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
33649 if (r1_sync_page_io(rdev, sect, s,
33650 bio->bi_io_vec[idx].bv_page,
33651 READ) != 0)
33652- atomic_add(s, &rdev->corrected_errors);
33653+ atomic_add_unchecked(s, &rdev->corrected_errors);
33654 }
33655 sectors -= s;
33656 sect += s;
33657@@ -1902,7 +1902,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
33658 test_bit(In_sync, &rdev->flags)) {
33659 if (r1_sync_page_io(rdev, sect, s,
33660 conf->tmppage, READ)) {
33661- atomic_add(s, &rdev->corrected_errors);
33662+ atomic_add_unchecked(s, &rdev->corrected_errors);
33663 printk(KERN_INFO
33664 "md/raid1:%s: read error corrected "
33665 "(%d sectors at %llu on %s)\n",
33666diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
33667index a954c95..6e7a21c 100644
33668--- a/drivers/md/raid10.c
33669+++ b/drivers/md/raid10.c
33670@@ -1684,7 +1684,7 @@ static void end_sync_read(struct bio *bio, int error)
33671 /* The write handler will notice the lack of
33672 * R10BIO_Uptodate and record any errors etc
33673 */
33674- atomic_add(r10_bio->sectors,
33675+ atomic_add_unchecked(r10_bio->sectors,
33676 &conf->mirrors[d].rdev->corrected_errors);
33677
33678 /* for reconstruct, we always reschedule after a read.
33679@@ -2033,7 +2033,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33680 {
33681 struct timespec cur_time_mon;
33682 unsigned long hours_since_last;
33683- unsigned int read_errors = atomic_read(&rdev->read_errors);
33684+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
33685
33686 ktime_get_ts(&cur_time_mon);
33687
33688@@ -2055,9 +2055,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33689 * overflowing the shift of read_errors by hours_since_last.
33690 */
33691 if (hours_since_last >= 8 * sizeof(read_errors))
33692- atomic_set(&rdev->read_errors, 0);
33693+ atomic_set_unchecked(&rdev->read_errors, 0);
33694 else
33695- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
33696+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
33697 }
33698
33699 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
33700@@ -2111,8 +2111,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33701 return;
33702
33703 check_decay_read_errors(mddev, rdev);
33704- atomic_inc(&rdev->read_errors);
33705- if (atomic_read(&rdev->read_errors) > max_read_errors) {
33706+ atomic_inc_unchecked(&rdev->read_errors);
33707+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
33708 char b[BDEVNAME_SIZE];
33709 bdevname(rdev->bdev, b);
33710
33711@@ -2120,7 +2120,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33712 "md/raid10:%s: %s: Raid device exceeded "
33713 "read_error threshold [cur %d:max %d]\n",
33714 mdname(mddev), b,
33715- atomic_read(&rdev->read_errors), max_read_errors);
33716+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
33717 printk(KERN_NOTICE
33718 "md/raid10:%s: %s: Failing raid device\n",
33719 mdname(mddev), b);
33720@@ -2271,7 +2271,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33721 (unsigned long long)(
33722 sect + rdev->data_offset),
33723 bdevname(rdev->bdev, b));
33724- atomic_add(s, &rdev->corrected_errors);
33725+ atomic_add_unchecked(s, &rdev->corrected_errors);
33726 }
33727
33728 rdev_dec_pending(rdev, mddev);
33729diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
33730index 73a5800..2b0e3b1 100644
33731--- a/drivers/md/raid5.c
33732+++ b/drivers/md/raid5.c
33733@@ -1694,18 +1694,18 @@ static void raid5_end_read_request(struct bio * bi, int error)
33734 (unsigned long long)(sh->sector
33735 + rdev->data_offset),
33736 bdevname(rdev->bdev, b));
33737- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
33738+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
33739 clear_bit(R5_ReadError, &sh->dev[i].flags);
33740 clear_bit(R5_ReWrite, &sh->dev[i].flags);
33741 }
33742- if (atomic_read(&rdev->read_errors))
33743- atomic_set(&rdev->read_errors, 0);
33744+ if (atomic_read_unchecked(&rdev->read_errors))
33745+ atomic_set_unchecked(&rdev->read_errors, 0);
33746 } else {
33747 const char *bdn = bdevname(rdev->bdev, b);
33748 int retry = 0;
33749
33750 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
33751- atomic_inc(&rdev->read_errors);
33752+ atomic_inc_unchecked(&rdev->read_errors);
33753 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
33754 printk_ratelimited(
33755 KERN_WARNING
33756@@ -1734,7 +1734,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
33757 (unsigned long long)(sh->sector
33758 + rdev->data_offset),
33759 bdn);
33760- else if (atomic_read(&rdev->read_errors)
33761+ else if (atomic_read_unchecked(&rdev->read_errors)
33762 > conf->max_nr_stripes)
33763 printk(KERN_WARNING
33764 "md/raid:%s: Too many read errors, failing device %s.\n",
33765diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
33766index d88c4aa..17c80b1 100644
33767--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
33768+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
33769@@ -1679,7 +1679,7 @@ static struct ddb_info ddb_v6 = {
33770 .subvendor = _subvend, .subdevice = _subdev, \
33771 .driver_data = (unsigned long)&_driverdata }
33772
33773-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
33774+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
33775 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
33776 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
33777 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
33778diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
33779index a7d876f..8c21b61 100644
33780--- a/drivers/media/dvb/dvb-core/dvb_demux.h
33781+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
33782@@ -73,7 +73,7 @@ struct dvb_demux_feed {
33783 union {
33784 dmx_ts_cb ts;
33785 dmx_section_cb sec;
33786- } cb;
33787+ } __no_const cb;
33788
33789 struct dvb_demux *demux;
33790 void *priv;
33791diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
33792index 39eab73..60033e7 100644
33793--- a/drivers/media/dvb/dvb-core/dvbdev.c
33794+++ b/drivers/media/dvb/dvb-core/dvbdev.c
33795@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
33796 const struct dvb_device *template, void *priv, int type)
33797 {
33798 struct dvb_device *dvbdev;
33799- struct file_operations *dvbdevfops;
33800+ file_operations_no_const *dvbdevfops;
33801 struct device *clsdev;
33802 int minor;
33803 int id;
33804diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
33805index 3940bb0..fb3952a 100644
33806--- a/drivers/media/dvb/dvb-usb/cxusb.c
33807+++ b/drivers/media/dvb/dvb-usb/cxusb.c
33808@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
33809
33810 struct dib0700_adapter_state {
33811 int (*set_param_save) (struct dvb_frontend *);
33812-};
33813+} __no_const;
33814
33815 static int dib7070_set_param_override(struct dvb_frontend *fe)
33816 {
33817diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
33818index 451c5a7..649f711 100644
33819--- a/drivers/media/dvb/dvb-usb/dw2102.c
33820+++ b/drivers/media/dvb/dvb-usb/dw2102.c
33821@@ -95,7 +95,7 @@ struct su3000_state {
33822
33823 struct s6x0_state {
33824 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
33825-};
33826+} __no_const;
33827
33828 /* debug */
33829 static int dvb_usb_dw2102_debug;
33830diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
33831index 404f63a..4796533 100644
33832--- a/drivers/media/dvb/frontends/dib3000.h
33833+++ b/drivers/media/dvb/frontends/dib3000.h
33834@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
33835 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
33836 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
33837 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
33838-};
33839+} __no_const;
33840
33841 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
33842 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
33843diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
33844index 7539a5d..06531a6 100644
33845--- a/drivers/media/dvb/ngene/ngene-cards.c
33846+++ b/drivers/media/dvb/ngene/ngene-cards.c
33847@@ -478,7 +478,7 @@ static struct ngene_info ngene_info_m780 = {
33848
33849 /****************************************************************************/
33850
33851-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
33852+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
33853 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
33854 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
33855 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
33856diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
33857index 16a089f..1661b11 100644
33858--- a/drivers/media/radio/radio-cadet.c
33859+++ b/drivers/media/radio/radio-cadet.c
33860@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33861 unsigned char readbuf[RDS_BUFFER];
33862 int i = 0;
33863
33864+ if (count > RDS_BUFFER)
33865+ return -EFAULT;
33866 mutex_lock(&dev->lock);
33867 if (dev->rdsstat == 0) {
33868 dev->rdsstat = 1;
33869@@ -347,7 +349,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33870 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
33871 mutex_unlock(&dev->lock);
33872
33873- if (copy_to_user(data, readbuf, i))
33874+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
33875 return -EFAULT;
33876 return i;
33877 }
33878diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
33879index 9cde353..8c6a1c3 100644
33880--- a/drivers/media/video/au0828/au0828.h
33881+++ b/drivers/media/video/au0828/au0828.h
33882@@ -191,7 +191,7 @@ struct au0828_dev {
33883
33884 /* I2C */
33885 struct i2c_adapter i2c_adap;
33886- struct i2c_algorithm i2c_algo;
33887+ i2c_algorithm_no_const i2c_algo;
33888 struct i2c_client i2c_client;
33889 u32 i2c_rc;
33890
33891diff --git a/drivers/media/video/cx25821/cx25821-core.c b/drivers/media/video/cx25821/cx25821-core.c
33892index 7930ca5..235bf7d 100644
33893--- a/drivers/media/video/cx25821/cx25821-core.c
33894+++ b/drivers/media/video/cx25821/cx25821-core.c
33895@@ -912,9 +912,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
33896 list_add_tail(&dev->devlist, &cx25821_devlist);
33897 mutex_unlock(&cx25821_devlist_mutex);
33898
33899- strcpy(cx25821_boards[UNKNOWN_BOARD].name, "unknown");
33900- strcpy(cx25821_boards[CX25821_BOARD].name, "cx25821");
33901-
33902 if (dev->pci->device != 0x8210) {
33903 pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
33904 __func__, dev->pci->device);
33905diff --git a/drivers/media/video/cx25821/cx25821.h b/drivers/media/video/cx25821/cx25821.h
33906index b9aa801..029f293 100644
33907--- a/drivers/media/video/cx25821/cx25821.h
33908+++ b/drivers/media/video/cx25821/cx25821.h
33909@@ -187,7 +187,7 @@ enum port {
33910 };
33911
33912 struct cx25821_board {
33913- char *name;
33914+ const char *name;
33915 enum port porta;
33916 enum port portb;
33917 enum port portc;
33918diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
33919index 04bf662..e0ac026 100644
33920--- a/drivers/media/video/cx88/cx88-alsa.c
33921+++ b/drivers/media/video/cx88/cx88-alsa.c
33922@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
33923 * Only boards with eeprom and byte 1 at eeprom=1 have it
33924 */
33925
33926-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
33927+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
33928 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33929 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33930 {0, }
33931diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
33932index 88cf9d9..bbc4b2c 100644
33933--- a/drivers/media/video/omap/omap_vout.c
33934+++ b/drivers/media/video/omap/omap_vout.c
33935@@ -64,7 +64,6 @@ enum omap_vout_channels {
33936 OMAP_VIDEO2,
33937 };
33938
33939-static struct videobuf_queue_ops video_vbq_ops;
33940 /* Variables configurable through module params*/
33941 static u32 video1_numbuffers = 3;
33942 static u32 video2_numbuffers = 3;
33943@@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *file)
33944 {
33945 struct videobuf_queue *q;
33946 struct omap_vout_device *vout = NULL;
33947+ static struct videobuf_queue_ops video_vbq_ops = {
33948+ .buf_setup = omap_vout_buffer_setup,
33949+ .buf_prepare = omap_vout_buffer_prepare,
33950+ .buf_release = omap_vout_buffer_release,
33951+ .buf_queue = omap_vout_buffer_queue,
33952+ };
33953
33954 vout = video_drvdata(file);
33955 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
33956@@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *file)
33957 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
33958
33959 q = &vout->vbq;
33960- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
33961- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
33962- video_vbq_ops.buf_release = omap_vout_buffer_release;
33963- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
33964 spin_lock_init(&vout->vbq_lock);
33965
33966 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
33967diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33968index 305e6aa..0143317 100644
33969--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33970+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33971@@ -196,7 +196,7 @@ struct pvr2_hdw {
33972
33973 /* I2C stuff */
33974 struct i2c_adapter i2c_adap;
33975- struct i2c_algorithm i2c_algo;
33976+ i2c_algorithm_no_const i2c_algo;
33977 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
33978 int i2c_cx25840_hack_state;
33979 int i2c_linked;
33980diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
33981index 02194c0..091733b 100644
33982--- a/drivers/media/video/timblogiw.c
33983+++ b/drivers/media/video/timblogiw.c
33984@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
33985
33986 /* Platform device functions */
33987
33988-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33989+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
33990 .vidioc_querycap = timblogiw_querycap,
33991 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
33992 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
33993@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33994 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
33995 };
33996
33997-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
33998+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
33999 .owner = THIS_MODULE,
34000 .open = timblogiw_open,
34001 .release = timblogiw_close,
34002diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
34003index a5c591f..db692a3 100644
34004--- a/drivers/message/fusion/mptbase.c
34005+++ b/drivers/message/fusion/mptbase.c
34006@@ -6754,8 +6754,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
34007 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
34008 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
34009
34010+#ifdef CONFIG_GRKERNSEC_HIDESYM
34011+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
34012+#else
34013 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
34014 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
34015+#endif
34016+
34017 /*
34018 * Rounding UP to nearest 4-kB boundary here...
34019 */
34020diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
34021index 551262e..7551198 100644
34022--- a/drivers/message/fusion/mptsas.c
34023+++ b/drivers/message/fusion/mptsas.c
34024@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
34025 return 0;
34026 }
34027
34028+static inline void
34029+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34030+{
34031+ if (phy_info->port_details) {
34032+ phy_info->port_details->rphy = rphy;
34033+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34034+ ioc->name, rphy));
34035+ }
34036+
34037+ if (rphy) {
34038+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34039+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34040+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34041+ ioc->name, rphy, rphy->dev.release));
34042+ }
34043+}
34044+
34045 /* no mutex */
34046 static void
34047 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
34048@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
34049 return NULL;
34050 }
34051
34052-static inline void
34053-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34054-{
34055- if (phy_info->port_details) {
34056- phy_info->port_details->rphy = rphy;
34057- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34058- ioc->name, rphy));
34059- }
34060-
34061- if (rphy) {
34062- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34063- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34064- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34065- ioc->name, rphy, rphy->dev.release));
34066- }
34067-}
34068-
34069 static inline struct sas_port *
34070 mptsas_get_port(struct mptsas_phyinfo *phy_info)
34071 {
34072diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
34073index 0c3ced7..1fe34ec 100644
34074--- a/drivers/message/fusion/mptscsih.c
34075+++ b/drivers/message/fusion/mptscsih.c
34076@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
34077
34078 h = shost_priv(SChost);
34079
34080- if (h) {
34081- if (h->info_kbuf == NULL)
34082- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34083- return h->info_kbuf;
34084- h->info_kbuf[0] = '\0';
34085+ if (!h)
34086+ return NULL;
34087
34088- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34089- h->info_kbuf[size-1] = '\0';
34090- }
34091+ if (h->info_kbuf == NULL)
34092+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34093+ return h->info_kbuf;
34094+ h->info_kbuf[0] = '\0';
34095+
34096+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34097+ h->info_kbuf[size-1] = '\0';
34098
34099 return h->info_kbuf;
34100 }
34101diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
34102index 6d115c7..58ff7fd 100644
34103--- a/drivers/message/i2o/i2o_proc.c
34104+++ b/drivers/message/i2o/i2o_proc.c
34105@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
34106 "Array Controller Device"
34107 };
34108
34109-static char *chtostr(u8 * chars, int n)
34110-{
34111- char tmp[256];
34112- tmp[0] = 0;
34113- return strncat(tmp, (char *)chars, n);
34114-}
34115-
34116 static int i2o_report_query_status(struct seq_file *seq, int block_status,
34117 char *group)
34118 {
34119@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
34120
34121 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
34122 seq_printf(seq, "%-#8x", ddm_table.module_id);
34123- seq_printf(seq, "%-29s",
34124- chtostr(ddm_table.module_name_version, 28));
34125+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
34126 seq_printf(seq, "%9d ", ddm_table.data_size);
34127 seq_printf(seq, "%8d", ddm_table.code_size);
34128
34129@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
34130
34131 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
34132 seq_printf(seq, "%-#8x", dst->module_id);
34133- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
34134- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
34135+ seq_printf(seq, "%-.28s", dst->module_name_version);
34136+ seq_printf(seq, "%-.8s", dst->date);
34137 seq_printf(seq, "%8d ", dst->module_size);
34138 seq_printf(seq, "%8d ", dst->mpb_size);
34139 seq_printf(seq, "0x%04x", dst->module_flags);
34140@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
34141 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
34142 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
34143 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
34144- seq_printf(seq, "Vendor info : %s\n",
34145- chtostr((u8 *) (work32 + 2), 16));
34146- seq_printf(seq, "Product info : %s\n",
34147- chtostr((u8 *) (work32 + 6), 16));
34148- seq_printf(seq, "Description : %s\n",
34149- chtostr((u8 *) (work32 + 10), 16));
34150- seq_printf(seq, "Product rev. : %s\n",
34151- chtostr((u8 *) (work32 + 14), 8));
34152+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
34153+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
34154+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
34155+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
34156
34157 seq_printf(seq, "Serial number : ");
34158 print_serial_number(seq, (u8 *) (work32 + 16),
34159@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
34160 }
34161
34162 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
34163- seq_printf(seq, "Module name : %s\n",
34164- chtostr(result.module_name, 24));
34165- seq_printf(seq, "Module revision : %s\n",
34166- chtostr(result.module_rev, 8));
34167+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
34168+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
34169
34170 seq_printf(seq, "Serial number : ");
34171 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
34172@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
34173 return 0;
34174 }
34175
34176- seq_printf(seq, "Device name : %s\n",
34177- chtostr(result.device_name, 64));
34178- seq_printf(seq, "Service name : %s\n",
34179- chtostr(result.service_name, 64));
34180- seq_printf(seq, "Physical name : %s\n",
34181- chtostr(result.physical_location, 64));
34182- seq_printf(seq, "Instance number : %s\n",
34183- chtostr(result.instance_number, 4));
34184+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
34185+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
34186+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
34187+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
34188
34189 return 0;
34190 }
34191diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
34192index a8c08f3..155fe3d 100644
34193--- a/drivers/message/i2o/iop.c
34194+++ b/drivers/message/i2o/iop.c
34195@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
34196
34197 spin_lock_irqsave(&c->context_list_lock, flags);
34198
34199- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
34200- atomic_inc(&c->context_list_counter);
34201+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
34202+ atomic_inc_unchecked(&c->context_list_counter);
34203
34204- entry->context = atomic_read(&c->context_list_counter);
34205+ entry->context = atomic_read_unchecked(&c->context_list_counter);
34206
34207 list_add(&entry->list, &c->context_list);
34208
34209@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
34210
34211 #if BITS_PER_LONG == 64
34212 spin_lock_init(&c->context_list_lock);
34213- atomic_set(&c->context_list_counter, 0);
34214+ atomic_set_unchecked(&c->context_list_counter, 0);
34215 INIT_LIST_HEAD(&c->context_list);
34216 #endif
34217
34218diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
34219index 7ce65f4..e66e9bc 100644
34220--- a/drivers/mfd/abx500-core.c
34221+++ b/drivers/mfd/abx500-core.c
34222@@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
34223
34224 struct abx500_device_entry {
34225 struct list_head list;
34226- struct abx500_ops ops;
34227+ abx500_ops_no_const ops;
34228 struct device *dev;
34229 };
34230
34231diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
34232index a9223ed..4127b13 100644
34233--- a/drivers/mfd/janz-cmodio.c
34234+++ b/drivers/mfd/janz-cmodio.c
34235@@ -13,6 +13,7 @@
34236
34237 #include <linux/kernel.h>
34238 #include <linux/module.h>
34239+#include <linux/slab.h>
34240 #include <linux/init.h>
34241 #include <linux/pci.h>
34242 #include <linux/interrupt.h>
34243diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
34244index a981e2a..5ca0c8b 100644
34245--- a/drivers/misc/lis3lv02d/lis3lv02d.c
34246+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
34247@@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
34248 * the lid is closed. This leads to interrupts as soon as a little move
34249 * is done.
34250 */
34251- atomic_inc(&lis3->count);
34252+ atomic_inc_unchecked(&lis3->count);
34253
34254 wake_up_interruptible(&lis3->misc_wait);
34255 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
34256@@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34257 if (lis3->pm_dev)
34258 pm_runtime_get_sync(lis3->pm_dev);
34259
34260- atomic_set(&lis3->count, 0);
34261+ atomic_set_unchecked(&lis3->count, 0);
34262 return 0;
34263 }
34264
34265@@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34266 add_wait_queue(&lis3->misc_wait, &wait);
34267 while (true) {
34268 set_current_state(TASK_INTERRUPTIBLE);
34269- data = atomic_xchg(&lis3->count, 0);
34270+ data = atomic_xchg_unchecked(&lis3->count, 0);
34271 if (data)
34272 break;
34273
34274@@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34275 struct lis3lv02d, miscdev);
34276
34277 poll_wait(file, &lis3->misc_wait, wait);
34278- if (atomic_read(&lis3->count))
34279+ if (atomic_read_unchecked(&lis3->count))
34280 return POLLIN | POLLRDNORM;
34281 return 0;
34282 }
34283diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
34284index 2b1482a..5d33616 100644
34285--- a/drivers/misc/lis3lv02d/lis3lv02d.h
34286+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
34287@@ -266,7 +266,7 @@ struct lis3lv02d {
34288 struct input_polled_dev *idev; /* input device */
34289 struct platform_device *pdev; /* platform device */
34290 struct regulator_bulk_data regulators[2];
34291- atomic_t count; /* interrupt count after last read */
34292+ atomic_unchecked_t count; /* interrupt count after last read */
34293 union axis_conversion ac; /* hw -> logical axis */
34294 int mapped_btns[3];
34295
34296diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
34297index 2f30bad..c4c13d0 100644
34298--- a/drivers/misc/sgi-gru/gruhandles.c
34299+++ b/drivers/misc/sgi-gru/gruhandles.c
34300@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
34301 unsigned long nsec;
34302
34303 nsec = CLKS2NSEC(clks);
34304- atomic_long_inc(&mcs_op_statistics[op].count);
34305- atomic_long_add(nsec, &mcs_op_statistics[op].total);
34306+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
34307+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
34308 if (mcs_op_statistics[op].max < nsec)
34309 mcs_op_statistics[op].max = nsec;
34310 }
34311diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
34312index 950dbe9..eeef0f8 100644
34313--- a/drivers/misc/sgi-gru/gruprocfs.c
34314+++ b/drivers/misc/sgi-gru/gruprocfs.c
34315@@ -32,9 +32,9 @@
34316
34317 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
34318
34319-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
34320+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
34321 {
34322- unsigned long val = atomic_long_read(v);
34323+ unsigned long val = atomic_long_read_unchecked(v);
34324
34325 seq_printf(s, "%16lu %s\n", val, id);
34326 }
34327@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
34328
34329 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
34330 for (op = 0; op < mcsop_last; op++) {
34331- count = atomic_long_read(&mcs_op_statistics[op].count);
34332- total = atomic_long_read(&mcs_op_statistics[op].total);
34333+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
34334+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
34335 max = mcs_op_statistics[op].max;
34336 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
34337 count ? total / count : 0, max);
34338diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
34339index 5c3ce24..4915ccb 100644
34340--- a/drivers/misc/sgi-gru/grutables.h
34341+++ b/drivers/misc/sgi-gru/grutables.h
34342@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
34343 * GRU statistics.
34344 */
34345 struct gru_stats_s {
34346- atomic_long_t vdata_alloc;
34347- atomic_long_t vdata_free;
34348- atomic_long_t gts_alloc;
34349- atomic_long_t gts_free;
34350- atomic_long_t gms_alloc;
34351- atomic_long_t gms_free;
34352- atomic_long_t gts_double_allocate;
34353- atomic_long_t assign_context;
34354- atomic_long_t assign_context_failed;
34355- atomic_long_t free_context;
34356- atomic_long_t load_user_context;
34357- atomic_long_t load_kernel_context;
34358- atomic_long_t lock_kernel_context;
34359- atomic_long_t unlock_kernel_context;
34360- atomic_long_t steal_user_context;
34361- atomic_long_t steal_kernel_context;
34362- atomic_long_t steal_context_failed;
34363- atomic_long_t nopfn;
34364- atomic_long_t asid_new;
34365- atomic_long_t asid_next;
34366- atomic_long_t asid_wrap;
34367- atomic_long_t asid_reuse;
34368- atomic_long_t intr;
34369- atomic_long_t intr_cbr;
34370- atomic_long_t intr_tfh;
34371- atomic_long_t intr_spurious;
34372- atomic_long_t intr_mm_lock_failed;
34373- atomic_long_t call_os;
34374- atomic_long_t call_os_wait_queue;
34375- atomic_long_t user_flush_tlb;
34376- atomic_long_t user_unload_context;
34377- atomic_long_t user_exception;
34378- atomic_long_t set_context_option;
34379- atomic_long_t check_context_retarget_intr;
34380- atomic_long_t check_context_unload;
34381- atomic_long_t tlb_dropin;
34382- atomic_long_t tlb_preload_page;
34383- atomic_long_t tlb_dropin_fail_no_asid;
34384- atomic_long_t tlb_dropin_fail_upm;
34385- atomic_long_t tlb_dropin_fail_invalid;
34386- atomic_long_t tlb_dropin_fail_range_active;
34387- atomic_long_t tlb_dropin_fail_idle;
34388- atomic_long_t tlb_dropin_fail_fmm;
34389- atomic_long_t tlb_dropin_fail_no_exception;
34390- atomic_long_t tfh_stale_on_fault;
34391- atomic_long_t mmu_invalidate_range;
34392- atomic_long_t mmu_invalidate_page;
34393- atomic_long_t flush_tlb;
34394- atomic_long_t flush_tlb_gru;
34395- atomic_long_t flush_tlb_gru_tgh;
34396- atomic_long_t flush_tlb_gru_zero_asid;
34397+ atomic_long_unchecked_t vdata_alloc;
34398+ atomic_long_unchecked_t vdata_free;
34399+ atomic_long_unchecked_t gts_alloc;
34400+ atomic_long_unchecked_t gts_free;
34401+ atomic_long_unchecked_t gms_alloc;
34402+ atomic_long_unchecked_t gms_free;
34403+ atomic_long_unchecked_t gts_double_allocate;
34404+ atomic_long_unchecked_t assign_context;
34405+ atomic_long_unchecked_t assign_context_failed;
34406+ atomic_long_unchecked_t free_context;
34407+ atomic_long_unchecked_t load_user_context;
34408+ atomic_long_unchecked_t load_kernel_context;
34409+ atomic_long_unchecked_t lock_kernel_context;
34410+ atomic_long_unchecked_t unlock_kernel_context;
34411+ atomic_long_unchecked_t steal_user_context;
34412+ atomic_long_unchecked_t steal_kernel_context;
34413+ atomic_long_unchecked_t steal_context_failed;
34414+ atomic_long_unchecked_t nopfn;
34415+ atomic_long_unchecked_t asid_new;
34416+ atomic_long_unchecked_t asid_next;
34417+ atomic_long_unchecked_t asid_wrap;
34418+ atomic_long_unchecked_t asid_reuse;
34419+ atomic_long_unchecked_t intr;
34420+ atomic_long_unchecked_t intr_cbr;
34421+ atomic_long_unchecked_t intr_tfh;
34422+ atomic_long_unchecked_t intr_spurious;
34423+ atomic_long_unchecked_t intr_mm_lock_failed;
34424+ atomic_long_unchecked_t call_os;
34425+ atomic_long_unchecked_t call_os_wait_queue;
34426+ atomic_long_unchecked_t user_flush_tlb;
34427+ atomic_long_unchecked_t user_unload_context;
34428+ atomic_long_unchecked_t user_exception;
34429+ atomic_long_unchecked_t set_context_option;
34430+ atomic_long_unchecked_t check_context_retarget_intr;
34431+ atomic_long_unchecked_t check_context_unload;
34432+ atomic_long_unchecked_t tlb_dropin;
34433+ atomic_long_unchecked_t tlb_preload_page;
34434+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
34435+ atomic_long_unchecked_t tlb_dropin_fail_upm;
34436+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
34437+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
34438+ atomic_long_unchecked_t tlb_dropin_fail_idle;
34439+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
34440+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
34441+ atomic_long_unchecked_t tfh_stale_on_fault;
34442+ atomic_long_unchecked_t mmu_invalidate_range;
34443+ atomic_long_unchecked_t mmu_invalidate_page;
34444+ atomic_long_unchecked_t flush_tlb;
34445+ atomic_long_unchecked_t flush_tlb_gru;
34446+ atomic_long_unchecked_t flush_tlb_gru_tgh;
34447+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
34448
34449- atomic_long_t copy_gpa;
34450- atomic_long_t read_gpa;
34451+ atomic_long_unchecked_t copy_gpa;
34452+ atomic_long_unchecked_t read_gpa;
34453
34454- atomic_long_t mesq_receive;
34455- atomic_long_t mesq_receive_none;
34456- atomic_long_t mesq_send;
34457- atomic_long_t mesq_send_failed;
34458- atomic_long_t mesq_noop;
34459- atomic_long_t mesq_send_unexpected_error;
34460- atomic_long_t mesq_send_lb_overflow;
34461- atomic_long_t mesq_send_qlimit_reached;
34462- atomic_long_t mesq_send_amo_nacked;
34463- atomic_long_t mesq_send_put_nacked;
34464- atomic_long_t mesq_page_overflow;
34465- atomic_long_t mesq_qf_locked;
34466- atomic_long_t mesq_qf_noop_not_full;
34467- atomic_long_t mesq_qf_switch_head_failed;
34468- atomic_long_t mesq_qf_unexpected_error;
34469- atomic_long_t mesq_noop_unexpected_error;
34470- atomic_long_t mesq_noop_lb_overflow;
34471- atomic_long_t mesq_noop_qlimit_reached;
34472- atomic_long_t mesq_noop_amo_nacked;
34473- atomic_long_t mesq_noop_put_nacked;
34474- atomic_long_t mesq_noop_page_overflow;
34475+ atomic_long_unchecked_t mesq_receive;
34476+ atomic_long_unchecked_t mesq_receive_none;
34477+ atomic_long_unchecked_t mesq_send;
34478+ atomic_long_unchecked_t mesq_send_failed;
34479+ atomic_long_unchecked_t mesq_noop;
34480+ atomic_long_unchecked_t mesq_send_unexpected_error;
34481+ atomic_long_unchecked_t mesq_send_lb_overflow;
34482+ atomic_long_unchecked_t mesq_send_qlimit_reached;
34483+ atomic_long_unchecked_t mesq_send_amo_nacked;
34484+ atomic_long_unchecked_t mesq_send_put_nacked;
34485+ atomic_long_unchecked_t mesq_page_overflow;
34486+ atomic_long_unchecked_t mesq_qf_locked;
34487+ atomic_long_unchecked_t mesq_qf_noop_not_full;
34488+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
34489+ atomic_long_unchecked_t mesq_qf_unexpected_error;
34490+ atomic_long_unchecked_t mesq_noop_unexpected_error;
34491+ atomic_long_unchecked_t mesq_noop_lb_overflow;
34492+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
34493+ atomic_long_unchecked_t mesq_noop_amo_nacked;
34494+ atomic_long_unchecked_t mesq_noop_put_nacked;
34495+ atomic_long_unchecked_t mesq_noop_page_overflow;
34496
34497 };
34498
34499@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
34500 tghop_invalidate, mcsop_last};
34501
34502 struct mcs_op_statistic {
34503- atomic_long_t count;
34504- atomic_long_t total;
34505+ atomic_long_unchecked_t count;
34506+ atomic_long_unchecked_t total;
34507 unsigned long max;
34508 };
34509
34510@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
34511
34512 #define STAT(id) do { \
34513 if (gru_options & OPT_STATS) \
34514- atomic_long_inc(&gru_stats.id); \
34515+ atomic_long_inc_unchecked(&gru_stats.id); \
34516 } while (0)
34517
34518 #ifdef CONFIG_SGI_GRU_DEBUG
34519diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
34520index c862cd4..0d176fe 100644
34521--- a/drivers/misc/sgi-xp/xp.h
34522+++ b/drivers/misc/sgi-xp/xp.h
34523@@ -288,7 +288,7 @@ struct xpc_interface {
34524 xpc_notify_func, void *);
34525 void (*received) (short, int, void *);
34526 enum xp_retval (*partid_to_nasids) (short, void *);
34527-};
34528+} __no_const;
34529
34530 extern struct xpc_interface xpc_interface;
34531
34532diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
34533index b94d5f7..7f494c5 100644
34534--- a/drivers/misc/sgi-xp/xpc.h
34535+++ b/drivers/misc/sgi-xp/xpc.h
34536@@ -835,6 +835,7 @@ struct xpc_arch_operations {
34537 void (*received_payload) (struct xpc_channel *, void *);
34538 void (*notify_senders_of_disconnect) (struct xpc_channel *);
34539 };
34540+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
34541
34542 /* struct xpc_partition act_state values (for XPC HB) */
34543
34544@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
34545 /* found in xpc_main.c */
34546 extern struct device *xpc_part;
34547 extern struct device *xpc_chan;
34548-extern struct xpc_arch_operations xpc_arch_ops;
34549+extern xpc_arch_operations_no_const xpc_arch_ops;
34550 extern int xpc_disengage_timelimit;
34551 extern int xpc_disengage_timedout;
34552 extern int xpc_activate_IRQ_rcvd;
34553diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
34554index 8d082b4..aa749ae 100644
34555--- a/drivers/misc/sgi-xp/xpc_main.c
34556+++ b/drivers/misc/sgi-xp/xpc_main.c
34557@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
34558 .notifier_call = xpc_system_die,
34559 };
34560
34561-struct xpc_arch_operations xpc_arch_ops;
34562+xpc_arch_operations_no_const xpc_arch_ops;
34563
34564 /*
34565 * Timer function to enforce the timelimit on the partition disengage.
34566diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
34567index 69ef0be..f3ef91e 100644
34568--- a/drivers/mmc/host/sdhci-pci.c
34569+++ b/drivers/mmc/host/sdhci-pci.c
34570@@ -652,7 +652,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
34571 .probe = via_probe,
34572 };
34573
34574-static const struct pci_device_id pci_ids[] __devinitdata = {
34575+static const struct pci_device_id pci_ids[] __devinitconst = {
34576 {
34577 .vendor = PCI_VENDOR_ID_RICOH,
34578 .device = PCI_DEVICE_ID_RICOH_R5C822,
34579diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
34580index a4eb8b5..8c0628f 100644
34581--- a/drivers/mtd/devices/doc2000.c
34582+++ b/drivers/mtd/devices/doc2000.c
34583@@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
34584
34585 /* The ECC will not be calculated correctly if less than 512 is written */
34586 /* DBB-
34587- if (len != 0x200 && eccbuf)
34588+ if (len != 0x200)
34589 printk(KERN_WARNING
34590 "ECC needs a full sector write (adr: %lx size %lx)\n",
34591 (long) to, (long) len);
34592diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
34593index a9e57d6..c6d8731 100644
34594--- a/drivers/mtd/nand/denali.c
34595+++ b/drivers/mtd/nand/denali.c
34596@@ -26,6 +26,7 @@
34597 #include <linux/pci.h>
34598 #include <linux/mtd/mtd.h>
34599 #include <linux/module.h>
34600+#include <linux/slab.h>
34601
34602 #include "denali.h"
34603
34604diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
34605index 51b9d6a..52af9a7 100644
34606--- a/drivers/mtd/nftlmount.c
34607+++ b/drivers/mtd/nftlmount.c
34608@@ -24,6 +24,7 @@
34609 #include <asm/errno.h>
34610 #include <linux/delay.h>
34611 #include <linux/slab.h>
34612+#include <linux/sched.h>
34613 #include <linux/mtd/mtd.h>
34614 #include <linux/mtd/nand.h>
34615 #include <linux/mtd/nftl.h>
34616diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
34617index 6762dc4..9956862 100644
34618--- a/drivers/net/ethernet/atheros/atlx/atl2.c
34619+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
34620@@ -2859,7 +2859,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
34621 */
34622
34623 #define ATL2_PARAM(X, desc) \
34624- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34625+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34626 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
34627 MODULE_PARM_DESC(X, desc);
34628 #else
34629diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34630index 61a7670..7da6e34 100644
34631--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34632+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34633@@ -483,7 +483,7 @@ struct bnx2x_rx_mode_obj {
34634
34635 int (*wait_comp)(struct bnx2x *bp,
34636 struct bnx2x_rx_mode_ramrod_params *p);
34637-};
34638+} __no_const;
34639
34640 /********************** Set multicast group ***********************************/
34641
34642diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
34643index 93865f8..5448741 100644
34644--- a/drivers/net/ethernet/broadcom/tg3.h
34645+++ b/drivers/net/ethernet/broadcom/tg3.h
34646@@ -140,6 +140,7 @@
34647 #define CHIPREV_ID_5750_A0 0x4000
34648 #define CHIPREV_ID_5750_A1 0x4001
34649 #define CHIPREV_ID_5750_A3 0x4003
34650+#define CHIPREV_ID_5750_C1 0x4201
34651 #define CHIPREV_ID_5750_C2 0x4202
34652 #define CHIPREV_ID_5752_A0_HW 0x5000
34653 #define CHIPREV_ID_5752_A0 0x6000
34654diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34655index c4e8643..0979484 100644
34656--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34657+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34658@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
34659 */
34660 struct l2t_skb_cb {
34661 arp_failure_handler_func arp_failure_handler;
34662-};
34663+} __no_const;
34664
34665 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
34666
34667diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
34668index 18b106c..2b38d36 100644
34669--- a/drivers/net/ethernet/dec/tulip/de4x5.c
34670+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
34671@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34672 for (i=0; i<ETH_ALEN; i++) {
34673 tmp.addr[i] = dev->dev_addr[i];
34674 }
34675- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34676+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34677 break;
34678
34679 case DE4X5_SET_HWADDR: /* Set the hardware address */
34680@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34681 spin_lock_irqsave(&lp->lock, flags);
34682 memcpy(&statbuf, &lp->pktStats, ioc->len);
34683 spin_unlock_irqrestore(&lp->lock, flags);
34684- if (copy_to_user(ioc->data, &statbuf, ioc->len))
34685+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34686 return -EFAULT;
34687 break;
34688 }
34689diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
34690index ed7d1dc..d426748 100644
34691--- a/drivers/net/ethernet/dec/tulip/eeprom.c
34692+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
34693@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
34694 {NULL}};
34695
34696
34697-static const char *block_name[] __devinitdata = {
34698+static const char *block_name[] __devinitconst = {
34699 "21140 non-MII",
34700 "21140 MII PHY",
34701 "21142 Serial PHY",
34702diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
34703index 2ac6fff..2d127d0 100644
34704--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
34705+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
34706@@ -236,7 +236,7 @@ struct pci_id_info {
34707 int drv_flags; /* Driver use, intended as capability flags. */
34708 };
34709
34710-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34711+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34712 { /* Sometime a Level-One switch card. */
34713 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34714 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
34715diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
34716index d783f4f..97fa1b0 100644
34717--- a/drivers/net/ethernet/dlink/sundance.c
34718+++ b/drivers/net/ethernet/dlink/sundance.c
34719@@ -218,7 +218,7 @@ enum {
34720 struct pci_id_info {
34721 const char *name;
34722 };
34723-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34724+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34725 {"D-Link DFE-550TX FAST Ethernet Adapter"},
34726 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
34727 {"D-Link DFE-580TX 4 port Server Adapter"},
34728diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
34729index 1bbf6b3..430dcd0 100644
34730--- a/drivers/net/ethernet/emulex/benet/be_main.c
34731+++ b/drivers/net/ethernet/emulex/benet/be_main.c
34732@@ -403,7 +403,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
34733
34734 if (wrapped)
34735 newacc += 65536;
34736- ACCESS_ONCE(*acc) = newacc;
34737+ ACCESS_ONCE_RW(*acc) = newacc;
34738 }
34739
34740 void be_parse_stats(struct be_adapter *adapter)
34741diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
34742index 16b0704..d2c07d7 100644
34743--- a/drivers/net/ethernet/faraday/ftgmac100.c
34744+++ b/drivers/net/ethernet/faraday/ftgmac100.c
34745@@ -31,6 +31,8 @@
34746 #include <linux/netdevice.h>
34747 #include <linux/phy.h>
34748 #include <linux/platform_device.h>
34749+#include <linux/interrupt.h>
34750+#include <linux/irqreturn.h>
34751 #include <net/ip.h>
34752
34753 #include "ftgmac100.h"
34754diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
34755index 829b109..4ae5f6a 100644
34756--- a/drivers/net/ethernet/faraday/ftmac100.c
34757+++ b/drivers/net/ethernet/faraday/ftmac100.c
34758@@ -31,6 +31,8 @@
34759 #include <linux/module.h>
34760 #include <linux/netdevice.h>
34761 #include <linux/platform_device.h>
34762+#include <linux/interrupt.h>
34763+#include <linux/irqreturn.h>
34764
34765 #include "ftmac100.h"
34766
34767diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
34768index 1637b98..c42f87b 100644
34769--- a/drivers/net/ethernet/fealnx.c
34770+++ b/drivers/net/ethernet/fealnx.c
34771@@ -150,7 +150,7 @@ struct chip_info {
34772 int flags;
34773 };
34774
34775-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
34776+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
34777 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34778 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
34779 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34780diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
34781index b83897f..b2d970f 100644
34782--- a/drivers/net/ethernet/intel/e1000e/e1000.h
34783+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
34784@@ -181,7 +181,7 @@ struct e1000_info;
34785 #define E1000_TXDCTL_DMA_BURST_ENABLE \
34786 (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
34787 E1000_TXDCTL_COUNT_DESC | \
34788- (5 << 16) | /* wthresh must be +1 more than desired */\
34789+ (1 << 16) | /* wthresh must be +1 more than desired */\
34790 (1 << 8) | /* hthresh */ \
34791 0x1f) /* pthresh */
34792
34793diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
34794index f82ecf5..7d59ecb 100644
34795--- a/drivers/net/ethernet/intel/e1000e/hw.h
34796+++ b/drivers/net/ethernet/intel/e1000e/hw.h
34797@@ -784,6 +784,7 @@ struct e1000_mac_operations {
34798 void (*config_collision_dist)(struct e1000_hw *);
34799 s32 (*read_mac_addr)(struct e1000_hw *);
34800 };
34801+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34802
34803 /*
34804 * When to use various PHY register access functions:
34805@@ -824,6 +825,7 @@ struct e1000_phy_operations {
34806 void (*power_up)(struct e1000_hw *);
34807 void (*power_down)(struct e1000_hw *);
34808 };
34809+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34810
34811 /* Function pointers for the NVM. */
34812 struct e1000_nvm_operations {
34813@@ -836,9 +838,10 @@ struct e1000_nvm_operations {
34814 s32 (*validate)(struct e1000_hw *);
34815 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34816 };
34817+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34818
34819 struct e1000_mac_info {
34820- struct e1000_mac_operations ops;
34821+ e1000_mac_operations_no_const ops;
34822 u8 addr[ETH_ALEN];
34823 u8 perm_addr[ETH_ALEN];
34824
34825@@ -879,7 +882,7 @@ struct e1000_mac_info {
34826 };
34827
34828 struct e1000_phy_info {
34829- struct e1000_phy_operations ops;
34830+ e1000_phy_operations_no_const ops;
34831
34832 enum e1000_phy_type type;
34833
34834@@ -913,7 +916,7 @@ struct e1000_phy_info {
34835 };
34836
34837 struct e1000_nvm_info {
34838- struct e1000_nvm_operations ops;
34839+ e1000_nvm_operations_no_const ops;
34840
34841 enum e1000_nvm_type type;
34842 enum e1000_nvm_override override;
34843diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
34844index f67cbd3..cef9e3d 100644
34845--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
34846+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
34847@@ -314,6 +314,7 @@ struct e1000_mac_operations {
34848 s32 (*read_mac_addr)(struct e1000_hw *);
34849 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
34850 };
34851+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34852
34853 struct e1000_phy_operations {
34854 s32 (*acquire)(struct e1000_hw *);
34855@@ -330,6 +331,7 @@ struct e1000_phy_operations {
34856 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34857 s32 (*write_reg)(struct e1000_hw *, u32, u16);
34858 };
34859+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34860
34861 struct e1000_nvm_operations {
34862 s32 (*acquire)(struct e1000_hw *);
34863@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
34864 s32 (*update)(struct e1000_hw *);
34865 s32 (*validate)(struct e1000_hw *);
34866 };
34867+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34868
34869 struct e1000_info {
34870 s32 (*get_invariants)(struct e1000_hw *);
34871@@ -350,7 +353,7 @@ struct e1000_info {
34872 extern const struct e1000_info e1000_82575_info;
34873
34874 struct e1000_mac_info {
34875- struct e1000_mac_operations ops;
34876+ e1000_mac_operations_no_const ops;
34877
34878 u8 addr[6];
34879 u8 perm_addr[6];
34880@@ -388,7 +391,7 @@ struct e1000_mac_info {
34881 };
34882
34883 struct e1000_phy_info {
34884- struct e1000_phy_operations ops;
34885+ e1000_phy_operations_no_const ops;
34886
34887 enum e1000_phy_type type;
34888
34889@@ -423,7 +426,7 @@ struct e1000_phy_info {
34890 };
34891
34892 struct e1000_nvm_info {
34893- struct e1000_nvm_operations ops;
34894+ e1000_nvm_operations_no_const ops;
34895 enum e1000_nvm_type type;
34896 enum e1000_nvm_override override;
34897
34898@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
34899 s32 (*check_for_ack)(struct e1000_hw *, u16);
34900 s32 (*check_for_rst)(struct e1000_hw *, u16);
34901 };
34902+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34903
34904 struct e1000_mbx_stats {
34905 u32 msgs_tx;
34906@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
34907 };
34908
34909 struct e1000_mbx_info {
34910- struct e1000_mbx_operations ops;
34911+ e1000_mbx_operations_no_const ops;
34912 struct e1000_mbx_stats stats;
34913 u32 timeout;
34914 u32 usec_delay;
34915diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
34916index 57db3c6..aa825fc 100644
34917--- a/drivers/net/ethernet/intel/igbvf/vf.h
34918+++ b/drivers/net/ethernet/intel/igbvf/vf.h
34919@@ -189,9 +189,10 @@ struct e1000_mac_operations {
34920 s32 (*read_mac_addr)(struct e1000_hw *);
34921 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
34922 };
34923+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34924
34925 struct e1000_mac_info {
34926- struct e1000_mac_operations ops;
34927+ e1000_mac_operations_no_const ops;
34928 u8 addr[6];
34929 u8 perm_addr[6];
34930
34931@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
34932 s32 (*check_for_ack)(struct e1000_hw *);
34933 s32 (*check_for_rst)(struct e1000_hw *);
34934 };
34935+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34936
34937 struct e1000_mbx_stats {
34938 u32 msgs_tx;
34939@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
34940 };
34941
34942 struct e1000_mbx_info {
34943- struct e1000_mbx_operations ops;
34944+ e1000_mbx_operations_no_const ops;
34945 struct e1000_mbx_stats stats;
34946 u32 timeout;
34947 u32 usec_delay;
34948diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34949index 8636e83..ab9bbc3 100644
34950--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34951+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34952@@ -2710,6 +2710,7 @@ struct ixgbe_eeprom_operations {
34953 s32 (*update_checksum)(struct ixgbe_hw *);
34954 u16 (*calc_checksum)(struct ixgbe_hw *);
34955 };
34956+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
34957
34958 struct ixgbe_mac_operations {
34959 s32 (*init_hw)(struct ixgbe_hw *);
34960@@ -2773,6 +2774,7 @@ struct ixgbe_mac_operations {
34961 /* Manageability interface */
34962 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
34963 };
34964+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34965
34966 struct ixgbe_phy_operations {
34967 s32 (*identify)(struct ixgbe_hw *);
34968@@ -2792,9 +2794,10 @@ struct ixgbe_phy_operations {
34969 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
34970 s32 (*check_overtemp)(struct ixgbe_hw *);
34971 };
34972+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
34973
34974 struct ixgbe_eeprom_info {
34975- struct ixgbe_eeprom_operations ops;
34976+ ixgbe_eeprom_operations_no_const ops;
34977 enum ixgbe_eeprom_type type;
34978 u32 semaphore_delay;
34979 u16 word_size;
34980@@ -2804,7 +2807,7 @@ struct ixgbe_eeprom_info {
34981
34982 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
34983 struct ixgbe_mac_info {
34984- struct ixgbe_mac_operations ops;
34985+ ixgbe_mac_operations_no_const ops;
34986 enum ixgbe_mac_type type;
34987 u8 addr[ETH_ALEN];
34988 u8 perm_addr[ETH_ALEN];
34989@@ -2832,7 +2835,7 @@ struct ixgbe_mac_info {
34990 };
34991
34992 struct ixgbe_phy_info {
34993- struct ixgbe_phy_operations ops;
34994+ ixgbe_phy_operations_no_const ops;
34995 struct mdio_if_info mdio;
34996 enum ixgbe_phy_type type;
34997 u32 id;
34998@@ -2860,6 +2863,7 @@ struct ixgbe_mbx_operations {
34999 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
35000 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
35001 };
35002+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35003
35004 struct ixgbe_mbx_stats {
35005 u32 msgs_tx;
35006@@ -2871,7 +2875,7 @@ struct ixgbe_mbx_stats {
35007 };
35008
35009 struct ixgbe_mbx_info {
35010- struct ixgbe_mbx_operations ops;
35011+ ixgbe_mbx_operations_no_const ops;
35012 struct ixgbe_mbx_stats stats;
35013 u32 timeout;
35014 u32 usec_delay;
35015diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
35016index 307611a..d8e4562 100644
35017--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
35018+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
35019@@ -969,8 +969,6 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
35020 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
35021 for (i = 0; i < q_vector->txr_count; i++) {
35022 tx_ring = &(adapter->tx_ring[r_idx]);
35023- tx_ring->total_bytes = 0;
35024- tx_ring->total_packets = 0;
35025 ixgbevf_clean_tx_irq(adapter, tx_ring);
35026 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
35027 r_idx + 1);
35028@@ -994,16 +992,6 @@ static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
35029 struct ixgbe_hw *hw = &adapter->hw;
35030 struct ixgbevf_ring *rx_ring;
35031 int r_idx;
35032- int i;
35033-
35034- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
35035- for (i = 0; i < q_vector->rxr_count; i++) {
35036- rx_ring = &(adapter->rx_ring[r_idx]);
35037- rx_ring->total_bytes = 0;
35038- rx_ring->total_packets = 0;
35039- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
35040- r_idx + 1);
35041- }
35042
35043 if (!q_vector->rxr_count)
35044 return IRQ_HANDLED;
35045diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
35046index 25c951d..cc7cf33 100644
35047--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
35048+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
35049@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
35050 s32 (*clear_vfta)(struct ixgbe_hw *);
35051 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
35052 };
35053+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
35054
35055 enum ixgbe_mac_type {
35056 ixgbe_mac_unknown = 0,
35057@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
35058 };
35059
35060 struct ixgbe_mac_info {
35061- struct ixgbe_mac_operations ops;
35062+ ixgbe_mac_operations_no_const ops;
35063 u8 addr[6];
35064 u8 perm_addr[6];
35065
35066@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
35067 s32 (*check_for_ack)(struct ixgbe_hw *);
35068 s32 (*check_for_rst)(struct ixgbe_hw *);
35069 };
35070+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35071
35072 struct ixgbe_mbx_stats {
35073 u32 msgs_tx;
35074@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
35075 };
35076
35077 struct ixgbe_mbx_info {
35078- struct ixgbe_mbx_operations ops;
35079+ ixgbe_mbx_operations_no_const ops;
35080 struct ixgbe_mbx_stats stats;
35081 u32 timeout;
35082 u32 udelay;
35083diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
35084index 8bb05b4..074796f 100644
35085--- a/drivers/net/ethernet/mellanox/mlx4/main.c
35086+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
35087@@ -41,6 +41,7 @@
35088 #include <linux/slab.h>
35089 #include <linux/io-mapping.h>
35090 #include <linux/delay.h>
35091+#include <linux/sched.h>
35092
35093 #include <linux/mlx4/device.h>
35094 #include <linux/mlx4/doorbell.h>
35095diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35096index 5046a64..71ca936 100644
35097--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
35098+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35099@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
35100 void (*link_down)(struct __vxge_hw_device *devh);
35101 void (*crit_err)(struct __vxge_hw_device *devh,
35102 enum vxge_hw_event type, u64 ext_data);
35103-};
35104+} __no_const;
35105
35106 /*
35107 * struct __vxge_hw_blockpool_entry - Block private data structure
35108diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35109index 4a518a3..936b334 100644
35110--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35111+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35112@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
35113 struct vxge_hw_mempool_dma *dma_object,
35114 u32 index,
35115 u32 is_last);
35116-};
35117+} __no_const;
35118
35119 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
35120 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
35121diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
35122index 161e045..0bb5b86 100644
35123--- a/drivers/net/ethernet/realtek/r8169.c
35124+++ b/drivers/net/ethernet/realtek/r8169.c
35125@@ -708,17 +708,17 @@ struct rtl8169_private {
35126 struct mdio_ops {
35127 void (*write)(void __iomem *, int, int);
35128 int (*read)(void __iomem *, int);
35129- } mdio_ops;
35130+ } __no_const mdio_ops;
35131
35132 struct pll_power_ops {
35133 void (*down)(struct rtl8169_private *);
35134 void (*up)(struct rtl8169_private *);
35135- } pll_power_ops;
35136+ } __no_const pll_power_ops;
35137
35138 struct jumbo_ops {
35139 void (*enable)(struct rtl8169_private *);
35140 void (*disable)(struct rtl8169_private *);
35141- } jumbo_ops;
35142+ } __no_const jumbo_ops;
35143
35144 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
35145 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
35146diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
35147index a9deda8..5507c31 100644
35148--- a/drivers/net/ethernet/sis/sis190.c
35149+++ b/drivers/net/ethernet/sis/sis190.c
35150@@ -1620,7 +1620,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
35151 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
35152 struct net_device *dev)
35153 {
35154- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
35155+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
35156 struct sis190_private *tp = netdev_priv(dev);
35157 struct pci_dev *isa_bridge;
35158 u8 reg, tmp8;
35159diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35160index c07cfe9..81cbf7e 100644
35161--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35162+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35163@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
35164
35165 writel(value, ioaddr + MMC_CNTRL);
35166
35167- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35168- MMC_CNTRL, value);
35169+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35170+// MMC_CNTRL, value);
35171 }
35172
35173 /* To mask all all interrupts.*/
35174diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
35175index 9bdfaba..3d8f8d4 100644
35176--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
35177+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
35178@@ -1587,7 +1587,7 @@ static const struct file_operations stmmac_rings_status_fops = {
35179 .open = stmmac_sysfs_ring_open,
35180 .read = seq_read,
35181 .llseek = seq_lseek,
35182- .release = seq_release,
35183+ .release = single_release,
35184 };
35185
35186 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
35187@@ -1659,7 +1659,7 @@ static const struct file_operations stmmac_dma_cap_fops = {
35188 .open = stmmac_sysfs_dma_cap_open,
35189 .read = seq_read,
35190 .llseek = seq_lseek,
35191- .release = seq_release,
35192+ .release = single_release,
35193 };
35194
35195 static int stmmac_init_fs(struct net_device *dev)
35196diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
35197index c358245..8c1de63 100644
35198--- a/drivers/net/hyperv/hyperv_net.h
35199+++ b/drivers/net/hyperv/hyperv_net.h
35200@@ -98,7 +98,7 @@ struct rndis_device {
35201
35202 enum rndis_device_state state;
35203 bool link_state;
35204- atomic_t new_req_id;
35205+ atomic_unchecked_t new_req_id;
35206
35207 spinlock_t request_lock;
35208 struct list_head req_list;
35209diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
35210index d6be64b..5d97e3b 100644
35211--- a/drivers/net/hyperv/rndis_filter.c
35212+++ b/drivers/net/hyperv/rndis_filter.c
35213@@ -97,7 +97,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
35214 * template
35215 */
35216 set = &rndis_msg->msg.set_req;
35217- set->req_id = atomic_inc_return(&dev->new_req_id);
35218+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35219
35220 /* Add to the request list */
35221 spin_lock_irqsave(&dev->request_lock, flags);
35222@@ -648,7 +648,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
35223
35224 /* Setup the rndis set */
35225 halt = &request->request_msg.msg.halt_req;
35226- halt->req_id = atomic_inc_return(&dev->new_req_id);
35227+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35228
35229 /* Ignore return since this msg is optional. */
35230 rndis_filter_send_request(dev, request);
35231diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
35232index 21d7151..8034208 100644
35233--- a/drivers/net/ppp/ppp_generic.c
35234+++ b/drivers/net/ppp/ppp_generic.c
35235@@ -986,7 +986,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35236 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
35237 struct ppp_stats stats;
35238 struct ppp_comp_stats cstats;
35239- char *vers;
35240
35241 switch (cmd) {
35242 case SIOCGPPPSTATS:
35243@@ -1008,8 +1007,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35244 break;
35245
35246 case SIOCGPPPVER:
35247- vers = PPP_VERSION;
35248- if (copy_to_user(addr, vers, strlen(vers) + 1))
35249+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
35250 break;
35251 err = 0;
35252 break;
35253diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
35254index b715e6b..6d2490f 100644
35255--- a/drivers/net/tokenring/abyss.c
35256+++ b/drivers/net/tokenring/abyss.c
35257@@ -450,10 +450,12 @@ static struct pci_driver abyss_driver = {
35258
35259 static int __init abyss_init (void)
35260 {
35261- abyss_netdev_ops = tms380tr_netdev_ops;
35262+ pax_open_kernel();
35263+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35264
35265- abyss_netdev_ops.ndo_open = abyss_open;
35266- abyss_netdev_ops.ndo_stop = abyss_close;
35267+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
35268+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
35269+ pax_close_kernel();
35270
35271 return pci_register_driver(&abyss_driver);
35272 }
35273diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
35274index 28adcdf..ae82f35 100644
35275--- a/drivers/net/tokenring/madgemc.c
35276+++ b/drivers/net/tokenring/madgemc.c
35277@@ -742,9 +742,11 @@ static struct mca_driver madgemc_driver = {
35278
35279 static int __init madgemc_init (void)
35280 {
35281- madgemc_netdev_ops = tms380tr_netdev_ops;
35282- madgemc_netdev_ops.ndo_open = madgemc_open;
35283- madgemc_netdev_ops.ndo_stop = madgemc_close;
35284+ pax_open_kernel();
35285+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35286+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
35287+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
35288+ pax_close_kernel();
35289
35290 return mca_register_driver (&madgemc_driver);
35291 }
35292diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
35293index 62d90e4..9d84237 100644
35294--- a/drivers/net/tokenring/proteon.c
35295+++ b/drivers/net/tokenring/proteon.c
35296@@ -352,9 +352,11 @@ static int __init proteon_init(void)
35297 struct platform_device *pdev;
35298 int i, num = 0, err = 0;
35299
35300- proteon_netdev_ops = tms380tr_netdev_ops;
35301- proteon_netdev_ops.ndo_open = proteon_open;
35302- proteon_netdev_ops.ndo_stop = tms380tr_close;
35303+ pax_open_kernel();
35304+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35305+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
35306+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
35307+ pax_close_kernel();
35308
35309 err = platform_driver_register(&proteon_driver);
35310 if (err)
35311diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
35312index ee11e93..c8f19c7 100644
35313--- a/drivers/net/tokenring/skisa.c
35314+++ b/drivers/net/tokenring/skisa.c
35315@@ -362,9 +362,11 @@ static int __init sk_isa_init(void)
35316 struct platform_device *pdev;
35317 int i, num = 0, err = 0;
35318
35319- sk_isa_netdev_ops = tms380tr_netdev_ops;
35320- sk_isa_netdev_ops.ndo_open = sk_isa_open;
35321- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35322+ pax_open_kernel();
35323+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35324+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
35325+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35326+ pax_close_kernel();
35327
35328 err = platform_driver_register(&sk_isa_driver);
35329 if (err)
35330diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
35331index 2d2a688..35f2372 100644
35332--- a/drivers/net/usb/hso.c
35333+++ b/drivers/net/usb/hso.c
35334@@ -71,7 +71,7 @@
35335 #include <asm/byteorder.h>
35336 #include <linux/serial_core.h>
35337 #include <linux/serial.h>
35338-
35339+#include <asm/local.h>
35340
35341 #define MOD_AUTHOR "Option Wireless"
35342 #define MOD_DESCRIPTION "USB High Speed Option driver"
35343@@ -257,7 +257,7 @@ struct hso_serial {
35344
35345 /* from usb_serial_port */
35346 struct tty_struct *tty;
35347- int open_count;
35348+ local_t open_count;
35349 spinlock_t serial_lock;
35350
35351 int (*write_data) (struct hso_serial *serial);
35352@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
35353 struct urb *urb;
35354
35355 urb = serial->rx_urb[0];
35356- if (serial->open_count > 0) {
35357+ if (local_read(&serial->open_count) > 0) {
35358 count = put_rxbuf_data(urb, serial);
35359 if (count == -1)
35360 return;
35361@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
35362 DUMP1(urb->transfer_buffer, urb->actual_length);
35363
35364 /* Anyone listening? */
35365- if (serial->open_count == 0)
35366+ if (local_read(&serial->open_count) == 0)
35367 return;
35368
35369 if (status == 0) {
35370@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35371 spin_unlock_irq(&serial->serial_lock);
35372
35373 /* check for port already opened, if not set the termios */
35374- serial->open_count++;
35375- if (serial->open_count == 1) {
35376+ if (local_inc_return(&serial->open_count) == 1) {
35377 serial->rx_state = RX_IDLE;
35378 /* Force default termio settings */
35379 _hso_serial_set_termios(tty, NULL);
35380@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35381 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35382 if (result) {
35383 hso_stop_serial_device(serial->parent);
35384- serial->open_count--;
35385+ local_dec(&serial->open_count);
35386 kref_put(&serial->parent->ref, hso_serial_ref_free);
35387 }
35388 } else {
35389@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
35390
35391 /* reset the rts and dtr */
35392 /* do the actual close */
35393- serial->open_count--;
35394+ local_dec(&serial->open_count);
35395
35396- if (serial->open_count <= 0) {
35397- serial->open_count = 0;
35398+ if (local_read(&serial->open_count) <= 0) {
35399+ local_set(&serial->open_count, 0);
35400 spin_lock_irq(&serial->serial_lock);
35401 if (serial->tty == tty) {
35402 serial->tty->driver_data = NULL;
35403@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
35404
35405 /* the actual setup */
35406 spin_lock_irqsave(&serial->serial_lock, flags);
35407- if (serial->open_count)
35408+ if (local_read(&serial->open_count))
35409 _hso_serial_set_termios(tty, old);
35410 else
35411 tty->termios = old;
35412@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
35413 D1("Pending read interrupt on port %d\n", i);
35414 spin_lock(&serial->serial_lock);
35415 if (serial->rx_state == RX_IDLE &&
35416- serial->open_count > 0) {
35417+ local_read(&serial->open_count) > 0) {
35418 /* Setup and send a ctrl req read on
35419 * port i */
35420 if (!serial->rx_urb_filled[0]) {
35421@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
35422 /* Start all serial ports */
35423 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35424 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35425- if (dev2ser(serial_table[i])->open_count) {
35426+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
35427 result =
35428 hso_start_serial_device(serial_table[i], GFP_NOIO);
35429 hso_kick_transmit(dev2ser(serial_table[i]));
35430diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
35431index 420d69b..74f90a2 100644
35432--- a/drivers/net/wireless/ath/ath.h
35433+++ b/drivers/net/wireless/ath/ath.h
35434@@ -119,6 +119,7 @@ struct ath_ops {
35435 void (*write_flush) (void *);
35436 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
35437 };
35438+typedef struct ath_ops __no_const ath_ops_no_const;
35439
35440 struct ath_common;
35441 struct ath_bus_ops;
35442diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35443index aa2abaf..5f5152d 100644
35444--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35445+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35446@@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35447 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
35448 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
35449
35450- ACCESS_ONCE(ads->ds_link) = i->link;
35451- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
35452+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
35453+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
35454
35455 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
35456 ctl6 = SM(i->keytype, AR_EncrType);
35457@@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35458
35459 if ((i->is_first || i->is_last) &&
35460 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
35461- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
35462+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
35463 | set11nTries(i->rates, 1)
35464 | set11nTries(i->rates, 2)
35465 | set11nTries(i->rates, 3)
35466 | (i->dur_update ? AR_DurUpdateEna : 0)
35467 | SM(0, AR_BurstDur);
35468
35469- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
35470+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
35471 | set11nRate(i->rates, 1)
35472 | set11nRate(i->rates, 2)
35473 | set11nRate(i->rates, 3);
35474 } else {
35475- ACCESS_ONCE(ads->ds_ctl2) = 0;
35476- ACCESS_ONCE(ads->ds_ctl3) = 0;
35477+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
35478+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
35479 }
35480
35481 if (!i->is_first) {
35482- ACCESS_ONCE(ads->ds_ctl0) = 0;
35483- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35484- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35485+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
35486+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35487+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35488 return;
35489 }
35490
35491@@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35492 break;
35493 }
35494
35495- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35496+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35497 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35498 | SM(i->txpower, AR_XmitPower)
35499 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35500@@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35501 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
35502 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
35503
35504- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35505- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35506+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35507+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35508
35509 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
35510 return;
35511
35512- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35513+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35514 | set11nPktDurRTSCTS(i->rates, 1);
35515
35516- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35517+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35518 | set11nPktDurRTSCTS(i->rates, 3);
35519
35520- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35521+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35522 | set11nRateFlags(i->rates, 1)
35523 | set11nRateFlags(i->rates, 2)
35524 | set11nRateFlags(i->rates, 3)
35525diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35526index a66a13b..0ef399e 100644
35527--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35528+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35529@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35530 (i->qcu << AR_TxQcuNum_S) | desc_len;
35531
35532 checksum += val;
35533- ACCESS_ONCE(ads->info) = val;
35534+ ACCESS_ONCE_RW(ads->info) = val;
35535
35536 checksum += i->link;
35537- ACCESS_ONCE(ads->link) = i->link;
35538+ ACCESS_ONCE_RW(ads->link) = i->link;
35539
35540 checksum += i->buf_addr[0];
35541- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
35542+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
35543 checksum += i->buf_addr[1];
35544- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
35545+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
35546 checksum += i->buf_addr[2];
35547- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
35548+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
35549 checksum += i->buf_addr[3];
35550- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
35551+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
35552
35553 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
35554- ACCESS_ONCE(ads->ctl3) = val;
35555+ ACCESS_ONCE_RW(ads->ctl3) = val;
35556 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
35557- ACCESS_ONCE(ads->ctl5) = val;
35558+ ACCESS_ONCE_RW(ads->ctl5) = val;
35559 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
35560- ACCESS_ONCE(ads->ctl7) = val;
35561+ ACCESS_ONCE_RW(ads->ctl7) = val;
35562 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
35563- ACCESS_ONCE(ads->ctl9) = val;
35564+ ACCESS_ONCE_RW(ads->ctl9) = val;
35565
35566 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
35567- ACCESS_ONCE(ads->ctl10) = checksum;
35568+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
35569
35570 if (i->is_first || i->is_last) {
35571- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
35572+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
35573 | set11nTries(i->rates, 1)
35574 | set11nTries(i->rates, 2)
35575 | set11nTries(i->rates, 3)
35576 | (i->dur_update ? AR_DurUpdateEna : 0)
35577 | SM(0, AR_BurstDur);
35578
35579- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
35580+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
35581 | set11nRate(i->rates, 1)
35582 | set11nRate(i->rates, 2)
35583 | set11nRate(i->rates, 3);
35584 } else {
35585- ACCESS_ONCE(ads->ctl13) = 0;
35586- ACCESS_ONCE(ads->ctl14) = 0;
35587+ ACCESS_ONCE_RW(ads->ctl13) = 0;
35588+ ACCESS_ONCE_RW(ads->ctl14) = 0;
35589 }
35590
35591 ads->ctl20 = 0;
35592@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35593
35594 ctl17 = SM(i->keytype, AR_EncrType);
35595 if (!i->is_first) {
35596- ACCESS_ONCE(ads->ctl11) = 0;
35597- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35598- ACCESS_ONCE(ads->ctl15) = 0;
35599- ACCESS_ONCE(ads->ctl16) = 0;
35600- ACCESS_ONCE(ads->ctl17) = ctl17;
35601- ACCESS_ONCE(ads->ctl18) = 0;
35602- ACCESS_ONCE(ads->ctl19) = 0;
35603+ ACCESS_ONCE_RW(ads->ctl11) = 0;
35604+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35605+ ACCESS_ONCE_RW(ads->ctl15) = 0;
35606+ ACCESS_ONCE_RW(ads->ctl16) = 0;
35607+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35608+ ACCESS_ONCE_RW(ads->ctl18) = 0;
35609+ ACCESS_ONCE_RW(ads->ctl19) = 0;
35610 return;
35611 }
35612
35613- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35614+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35615 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35616 | SM(i->txpower, AR_XmitPower)
35617 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35618@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35619 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
35620 ctl12 |= SM(val, AR_PAPRDChainMask);
35621
35622- ACCESS_ONCE(ads->ctl12) = ctl12;
35623- ACCESS_ONCE(ads->ctl17) = ctl17;
35624+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
35625+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35626
35627- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35628+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35629 | set11nPktDurRTSCTS(i->rates, 1);
35630
35631- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35632+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35633 | set11nPktDurRTSCTS(i->rates, 3);
35634
35635- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
35636+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
35637 | set11nRateFlags(i->rates, 1)
35638 | set11nRateFlags(i->rates, 2)
35639 | set11nRateFlags(i->rates, 3)
35640 | SM(i->rtscts_rate, AR_RTSCTSRate);
35641
35642- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
35643+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
35644 }
35645
35646 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
35647diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
35648index e88f182..4e57f5d 100644
35649--- a/drivers/net/wireless/ath/ath9k/hw.h
35650+++ b/drivers/net/wireless/ath/ath9k/hw.h
35651@@ -614,7 +614,7 @@ struct ath_hw_private_ops {
35652
35653 /* ANI */
35654 void (*ani_cache_ini_regs)(struct ath_hw *ah);
35655-};
35656+} __no_const;
35657
35658 /**
35659 * struct ath_hw_ops - callbacks used by hardware code and driver code
35660@@ -644,7 +644,7 @@ struct ath_hw_ops {
35661 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
35662 struct ath_hw_antcomb_conf *antconf);
35663
35664-};
35665+} __no_const;
35666
35667 struct ath_nf_limits {
35668 s16 max;
35669@@ -664,7 +664,7 @@ enum ath_cal_list {
35670 #define AH_FASTCC 0x4
35671
35672 struct ath_hw {
35673- struct ath_ops reg_ops;
35674+ ath_ops_no_const reg_ops;
35675
35676 struct ieee80211_hw *hw;
35677 struct ath_common common;
35678diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35679index af00e2c..ab04d34 100644
35680--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35681+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35682@@ -545,7 +545,7 @@ struct phy_func_ptr {
35683 void (*carrsuppr)(struct brcms_phy *);
35684 s32 (*rxsigpwr)(struct brcms_phy *, s32);
35685 void (*detach)(struct brcms_phy *);
35686-};
35687+} __no_const;
35688
35689 struct brcms_phy {
35690 struct brcms_phy_pub pubpi_ro;
35691diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
35692index faec404..a5277f1 100644
35693--- a/drivers/net/wireless/iwlegacy/3945-mac.c
35694+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
35695@@ -3611,7 +3611,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
35696 */
35697 if (il3945_mod_params.disable_hw_scan) {
35698 D_INFO("Disabling hw_scan\n");
35699- il3945_mac_ops.hw_scan = NULL;
35700+ pax_open_kernel();
35701+ *(void **)&il3945_mac_ops.hw_scan = NULL;
35702+ pax_close_kernel();
35703 }
35704
35705 D_INFO("*** LOAD DRIVER ***\n");
35706diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
35707index b7ce6a6..5649756 100644
35708--- a/drivers/net/wireless/mac80211_hwsim.c
35709+++ b/drivers/net/wireless/mac80211_hwsim.c
35710@@ -1721,9 +1721,11 @@ static int __init init_mac80211_hwsim(void)
35711 return -EINVAL;
35712
35713 if (fake_hw_scan) {
35714- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35715- mac80211_hwsim_ops.sw_scan_start = NULL;
35716- mac80211_hwsim_ops.sw_scan_complete = NULL;
35717+ pax_open_kernel();
35718+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35719+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
35720+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
35721+ pax_close_kernel();
35722 }
35723
35724 spin_lock_init(&hwsim_radio_lock);
35725diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
35726index 35225e9..95e6bf9 100644
35727--- a/drivers/net/wireless/mwifiex/main.h
35728+++ b/drivers/net/wireless/mwifiex/main.h
35729@@ -537,7 +537,7 @@ struct mwifiex_if_ops {
35730 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
35731 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
35732 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
35733-};
35734+} __no_const;
35735
35736 struct mwifiex_adapter {
35737 u8 iface_type;
35738diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
35739index d66e298..55b0a89 100644
35740--- a/drivers/net/wireless/rndis_wlan.c
35741+++ b/drivers/net/wireless/rndis_wlan.c
35742@@ -1278,7 +1278,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
35743
35744 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
35745
35746- if (rts_threshold < 0 || rts_threshold > 2347)
35747+ if (rts_threshold > 2347)
35748 rts_threshold = 2347;
35749
35750 tmp = cpu_to_le32(rts_threshold);
35751diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
35752index c264dfa..08ee30e 100644
35753--- a/drivers/net/wireless/rt2x00/rt2x00.h
35754+++ b/drivers/net/wireless/rt2x00/rt2x00.h
35755@@ -396,7 +396,7 @@ struct rt2x00_intf {
35756 * for hardware which doesn't support hardware
35757 * sequence counting.
35758 */
35759- atomic_t seqno;
35760+ atomic_unchecked_t seqno;
35761 };
35762
35763 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
35764diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
35765index 50f92d5..f3afc41 100644
35766--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
35767+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
35768@@ -229,9 +229,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
35769 * sequence counter given by mac80211.
35770 */
35771 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
35772- seqno = atomic_add_return(0x10, &intf->seqno);
35773+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
35774 else
35775- seqno = atomic_read(&intf->seqno);
35776+ seqno = atomic_read_unchecked(&intf->seqno);
35777
35778 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
35779 hdr->seq_ctrl |= cpu_to_le16(seqno);
35780diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
35781index 9d8f581..0f6589e 100644
35782--- a/drivers/net/wireless/wl1251/wl1251.h
35783+++ b/drivers/net/wireless/wl1251/wl1251.h
35784@@ -266,7 +266,7 @@ struct wl1251_if_operations {
35785 void (*reset)(struct wl1251 *wl);
35786 void (*enable_irq)(struct wl1251 *wl);
35787 void (*disable_irq)(struct wl1251 *wl);
35788-};
35789+} __no_const;
35790
35791 struct wl1251 {
35792 struct ieee80211_hw *hw;
35793diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
35794index f34b5b2..b5abb9f 100644
35795--- a/drivers/oprofile/buffer_sync.c
35796+++ b/drivers/oprofile/buffer_sync.c
35797@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
35798 if (cookie == NO_COOKIE)
35799 offset = pc;
35800 if (cookie == INVALID_COOKIE) {
35801- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35802+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35803 offset = pc;
35804 }
35805 if (cookie != last_cookie) {
35806@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
35807 /* add userspace sample */
35808
35809 if (!mm) {
35810- atomic_inc(&oprofile_stats.sample_lost_no_mm);
35811+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35812 return 0;
35813 }
35814
35815 cookie = lookup_dcookie(mm, s->eip, &offset);
35816
35817 if (cookie == INVALID_COOKIE) {
35818- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35819+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35820 return 0;
35821 }
35822
35823@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
35824 /* ignore backtraces if failed to add a sample */
35825 if (state == sb_bt_start) {
35826 state = sb_bt_ignore;
35827- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35828+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35829 }
35830 }
35831 release_mm(mm);
35832diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
35833index c0cc4e7..44d4e54 100644
35834--- a/drivers/oprofile/event_buffer.c
35835+++ b/drivers/oprofile/event_buffer.c
35836@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
35837 }
35838
35839 if (buffer_pos == buffer_size) {
35840- atomic_inc(&oprofile_stats.event_lost_overflow);
35841+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35842 return;
35843 }
35844
35845diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
35846index ed2c3ec..deda85a 100644
35847--- a/drivers/oprofile/oprof.c
35848+++ b/drivers/oprofile/oprof.c
35849@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
35850 if (oprofile_ops.switch_events())
35851 return;
35852
35853- atomic_inc(&oprofile_stats.multiplex_counter);
35854+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35855 start_switch_worker();
35856 }
35857
35858diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
35859index 917d28e..d62d981 100644
35860--- a/drivers/oprofile/oprofile_stats.c
35861+++ b/drivers/oprofile/oprofile_stats.c
35862@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35863 cpu_buf->sample_invalid_eip = 0;
35864 }
35865
35866- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35867- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35868- atomic_set(&oprofile_stats.event_lost_overflow, 0);
35869- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35870- atomic_set(&oprofile_stats.multiplex_counter, 0);
35871+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35872+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35873+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35874+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35875+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35876 }
35877
35878
35879diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35880index 38b6fc0..b5cbfce 100644
35881--- a/drivers/oprofile/oprofile_stats.h
35882+++ b/drivers/oprofile/oprofile_stats.h
35883@@ -13,11 +13,11 @@
35884 #include <linux/atomic.h>
35885
35886 struct oprofile_stat_struct {
35887- atomic_t sample_lost_no_mm;
35888- atomic_t sample_lost_no_mapping;
35889- atomic_t bt_lost_no_mapping;
35890- atomic_t event_lost_overflow;
35891- atomic_t multiplex_counter;
35892+ atomic_unchecked_t sample_lost_no_mm;
35893+ atomic_unchecked_t sample_lost_no_mapping;
35894+ atomic_unchecked_t bt_lost_no_mapping;
35895+ atomic_unchecked_t event_lost_overflow;
35896+ atomic_unchecked_t multiplex_counter;
35897 };
35898
35899 extern struct oprofile_stat_struct oprofile_stats;
35900diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
35901index 849357c..b83c1e0 100644
35902--- a/drivers/oprofile/oprofilefs.c
35903+++ b/drivers/oprofile/oprofilefs.c
35904@@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
35905
35906
35907 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35908- char const *name, atomic_t *val)
35909+ char const *name, atomic_unchecked_t *val)
35910 {
35911 return __oprofilefs_create_file(sb, root, name,
35912 &atomic_ro_fops, 0444, val);
35913diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
35914index 3f56bc0..707d642 100644
35915--- a/drivers/parport/procfs.c
35916+++ b/drivers/parport/procfs.c
35917@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
35918
35919 *ppos += len;
35920
35921- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35922+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35923 }
35924
35925 #ifdef CONFIG_PARPORT_1284
35926@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
35927
35928 *ppos += len;
35929
35930- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35931+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35932 }
35933 #endif /* IEEE1284.3 support. */
35934
35935diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
35936index 9fff878..ad0ad53 100644
35937--- a/drivers/pci/hotplug/cpci_hotplug.h
35938+++ b/drivers/pci/hotplug/cpci_hotplug.h
35939@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35940 int (*hardware_test) (struct slot* slot, u32 value);
35941 u8 (*get_power) (struct slot* slot);
35942 int (*set_power) (struct slot* slot, int value);
35943-};
35944+} __no_const;
35945
35946 struct cpci_hp_controller {
35947 unsigned int irq;
35948diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
35949index 76ba8a1..20ca857 100644
35950--- a/drivers/pci/hotplug/cpqphp_nvram.c
35951+++ b/drivers/pci/hotplug/cpqphp_nvram.c
35952@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
35953
35954 void compaq_nvram_init (void __iomem *rom_start)
35955 {
35956+
35957+#ifndef CONFIG_PAX_KERNEXEC
35958 if (rom_start) {
35959 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35960 }
35961+#endif
35962+
35963 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35964
35965 /* initialize our int15 lock */
35966diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
35967index b500840..d7159d3 100644
35968--- a/drivers/pci/pcie/aspm.c
35969+++ b/drivers/pci/pcie/aspm.c
35970@@ -27,9 +27,9 @@
35971 #define MODULE_PARAM_PREFIX "pcie_aspm."
35972
35973 /* Note: those are not register definitions */
35974-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35975-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35976-#define ASPM_STATE_L1 (4) /* L1 state */
35977+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35978+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35979+#define ASPM_STATE_L1 (4U) /* L1 state */
35980 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35981 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35982
35983diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
35984index 5e1ca3c..08082fe 100644
35985--- a/drivers/pci/probe.c
35986+++ b/drivers/pci/probe.c
35987@@ -215,7 +215,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
35988 u16 orig_cmd;
35989 struct pci_bus_region region;
35990
35991- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
35992+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
35993
35994 if (!dev->mmio_always_on) {
35995 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
35996diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
35997index 27911b5..5b6db88 100644
35998--- a/drivers/pci/proc.c
35999+++ b/drivers/pci/proc.c
36000@@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
36001 static int __init pci_proc_init(void)
36002 {
36003 struct pci_dev *dev = NULL;
36004+
36005+#ifdef CONFIG_GRKERNSEC_PROC_ADD
36006+#ifdef CONFIG_GRKERNSEC_PROC_USER
36007+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
36008+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
36009+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
36010+#endif
36011+#else
36012 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
36013+#endif
36014 proc_create("devices", 0, proc_bus_pci_dir,
36015 &proc_bus_pci_dev_operations);
36016 proc_initialized = 1;
36017diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
36018index d68c000..f6094ca 100644
36019--- a/drivers/platform/x86/thinkpad_acpi.c
36020+++ b/drivers/platform/x86/thinkpad_acpi.c
36021@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
36022 return 0;
36023 }
36024
36025-void static hotkey_mask_warn_incomplete_mask(void)
36026+static void hotkey_mask_warn_incomplete_mask(void)
36027 {
36028 /* log only what the user can fix... */
36029 const u32 wantedmask = hotkey_driver_mask &
36030@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
36031 }
36032 }
36033
36034-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36035- struct tp_nvram_state *newn,
36036- const u32 event_mask)
36037-{
36038-
36039 #define TPACPI_COMPARE_KEY(__scancode, __member) \
36040 do { \
36041 if ((event_mask & (1 << __scancode)) && \
36042@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36043 tpacpi_hotkey_send_key(__scancode); \
36044 } while (0)
36045
36046- void issue_volchange(const unsigned int oldvol,
36047- const unsigned int newvol)
36048- {
36049- unsigned int i = oldvol;
36050+static void issue_volchange(const unsigned int oldvol,
36051+ const unsigned int newvol,
36052+ const u32 event_mask)
36053+{
36054+ unsigned int i = oldvol;
36055
36056- while (i > newvol) {
36057- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36058- i--;
36059- }
36060- while (i < newvol) {
36061- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36062- i++;
36063- }
36064+ while (i > newvol) {
36065+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36066+ i--;
36067 }
36068+ while (i < newvol) {
36069+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36070+ i++;
36071+ }
36072+}
36073
36074- void issue_brightnesschange(const unsigned int oldbrt,
36075- const unsigned int newbrt)
36076- {
36077- unsigned int i = oldbrt;
36078+static void issue_brightnesschange(const unsigned int oldbrt,
36079+ const unsigned int newbrt,
36080+ const u32 event_mask)
36081+{
36082+ unsigned int i = oldbrt;
36083
36084- while (i > newbrt) {
36085- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36086- i--;
36087- }
36088- while (i < newbrt) {
36089- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36090- i++;
36091- }
36092+ while (i > newbrt) {
36093+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36094+ i--;
36095+ }
36096+ while (i < newbrt) {
36097+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36098+ i++;
36099 }
36100+}
36101
36102+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36103+ struct tp_nvram_state *newn,
36104+ const u32 event_mask)
36105+{
36106 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
36107 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
36108 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
36109@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36110 oldn->volume_level != newn->volume_level) {
36111 /* recently muted, or repeated mute keypress, or
36112 * multiple presses ending in mute */
36113- issue_volchange(oldn->volume_level, newn->volume_level);
36114+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36115 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
36116 }
36117 } else {
36118@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36119 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36120 }
36121 if (oldn->volume_level != newn->volume_level) {
36122- issue_volchange(oldn->volume_level, newn->volume_level);
36123+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36124 } else if (oldn->volume_toggle != newn->volume_toggle) {
36125 /* repeated vol up/down keypress at end of scale ? */
36126 if (newn->volume_level == 0)
36127@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36128 /* handle brightness */
36129 if (oldn->brightness_level != newn->brightness_level) {
36130 issue_brightnesschange(oldn->brightness_level,
36131- newn->brightness_level);
36132+ newn->brightness_level,
36133+ event_mask);
36134 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
36135 /* repeated key presses that didn't change state */
36136 if (newn->brightness_level == 0)
36137@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36138 && !tp_features.bright_unkfw)
36139 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36140 }
36141+}
36142
36143 #undef TPACPI_COMPARE_KEY
36144 #undef TPACPI_MAY_SEND_KEY
36145-}
36146
36147 /*
36148 * Polling driver
36149diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
36150index 769d265..a3a05ca 100644
36151--- a/drivers/pnp/pnpbios/bioscalls.c
36152+++ b/drivers/pnp/pnpbios/bioscalls.c
36153@@ -58,7 +58,7 @@ do { \
36154 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
36155 } while(0)
36156
36157-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
36158+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
36159 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
36160
36161 /*
36162@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
36163
36164 cpu = get_cpu();
36165 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
36166+
36167+ pax_open_kernel();
36168 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
36169+ pax_close_kernel();
36170
36171 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
36172 spin_lock_irqsave(&pnp_bios_lock, flags);
36173@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
36174 :"memory");
36175 spin_unlock_irqrestore(&pnp_bios_lock, flags);
36176
36177+ pax_open_kernel();
36178 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
36179+ pax_close_kernel();
36180+
36181 put_cpu();
36182
36183 /* If we get here and this is set then the PnP BIOS faulted on us. */
36184@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
36185 return status;
36186 }
36187
36188-void pnpbios_calls_init(union pnp_bios_install_struct *header)
36189+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
36190 {
36191 int i;
36192
36193@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
36194 pnp_bios_callpoint.offset = header->fields.pm16offset;
36195 pnp_bios_callpoint.segment = PNP_CS16;
36196
36197+ pax_open_kernel();
36198+
36199 for_each_possible_cpu(i) {
36200 struct desc_struct *gdt = get_cpu_gdt_table(i);
36201 if (!gdt)
36202@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
36203 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
36204 (unsigned long)__va(header->fields.pm16dseg));
36205 }
36206+
36207+ pax_close_kernel();
36208 }
36209diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
36210index b0ecacb..7c9da2e 100644
36211--- a/drivers/pnp/resource.c
36212+++ b/drivers/pnp/resource.c
36213@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
36214 return 1;
36215
36216 /* check if the resource is valid */
36217- if (*irq < 0 || *irq > 15)
36218+ if (*irq > 15)
36219 return 0;
36220
36221 /* check if the resource is reserved */
36222@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
36223 return 1;
36224
36225 /* check if the resource is valid */
36226- if (*dma < 0 || *dma == 4 || *dma > 7)
36227+ if (*dma == 4 || *dma > 7)
36228 return 0;
36229
36230 /* check if the resource is reserved */
36231diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
36232index 222ccd8..6275fa5 100644
36233--- a/drivers/power/bq27x00_battery.c
36234+++ b/drivers/power/bq27x00_battery.c
36235@@ -72,7 +72,7 @@
36236 struct bq27x00_device_info;
36237 struct bq27x00_access_methods {
36238 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
36239-};
36240+} __no_const;
36241
36242 enum bq27x00_chip { BQ27000, BQ27500 };
36243
36244diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
36245index 4c5b053..104263e 100644
36246--- a/drivers/regulator/max8660.c
36247+++ b/drivers/regulator/max8660.c
36248@@ -385,8 +385,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
36249 max8660->shadow_regs[MAX8660_OVER1] = 5;
36250 } else {
36251 /* Otherwise devices can be toggled via software */
36252- max8660_dcdc_ops.enable = max8660_dcdc_enable;
36253- max8660_dcdc_ops.disable = max8660_dcdc_disable;
36254+ pax_open_kernel();
36255+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
36256+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
36257+ pax_close_kernel();
36258 }
36259
36260 /*
36261diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
36262index 845aa22..99ec402 100644
36263--- a/drivers/regulator/mc13892-regulator.c
36264+++ b/drivers/regulator/mc13892-regulator.c
36265@@ -574,10 +574,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
36266 }
36267 mc13xxx_unlock(mc13892);
36268
36269- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36270+ pax_open_kernel();
36271+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36272 = mc13892_vcam_set_mode;
36273- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36274+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36275 = mc13892_vcam_get_mode;
36276+ pax_close_kernel();
36277
36278 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
36279 ARRAY_SIZE(mc13892_regulators));
36280diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
36281index cace6d3..f623fda 100644
36282--- a/drivers/rtc/rtc-dev.c
36283+++ b/drivers/rtc/rtc-dev.c
36284@@ -14,6 +14,7 @@
36285 #include <linux/module.h>
36286 #include <linux/rtc.h>
36287 #include <linux/sched.h>
36288+#include <linux/grsecurity.h>
36289 #include "rtc-core.h"
36290
36291 static dev_t rtc_devt;
36292@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
36293 if (copy_from_user(&tm, uarg, sizeof(tm)))
36294 return -EFAULT;
36295
36296+ gr_log_timechange();
36297+
36298 return rtc_set_time(rtc, &tm);
36299
36300 case RTC_PIE_ON:
36301diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
36302index 3fcf627..f334910 100644
36303--- a/drivers/scsi/aacraid/aacraid.h
36304+++ b/drivers/scsi/aacraid/aacraid.h
36305@@ -492,7 +492,7 @@ struct adapter_ops
36306 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36307 /* Administrative operations */
36308 int (*adapter_comm)(struct aac_dev * dev, int comm);
36309-};
36310+} __no_const;
36311
36312 /*
36313 * Define which interrupt handler needs to be installed
36314diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
36315index 0d279c44..3d25a97 100644
36316--- a/drivers/scsi/aacraid/linit.c
36317+++ b/drivers/scsi/aacraid/linit.c
36318@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
36319 #elif defined(__devinitconst)
36320 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36321 #else
36322-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
36323+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36324 #endif
36325 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
36326 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
36327diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
36328index ff80552..1c4120c 100644
36329--- a/drivers/scsi/aic94xx/aic94xx_init.c
36330+++ b/drivers/scsi/aic94xx/aic94xx_init.c
36331@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
36332 .lldd_ata_set_dmamode = asd_set_dmamode,
36333 };
36334
36335-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
36336+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
36337 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
36338 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
36339 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
36340diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
36341index 4ad7e36..d004679 100644
36342--- a/drivers/scsi/bfa/bfa.h
36343+++ b/drivers/scsi/bfa/bfa.h
36344@@ -196,7 +196,7 @@ struct bfa_hwif_s {
36345 u32 *end);
36346 int cpe_vec_q0;
36347 int rme_vec_q0;
36348-};
36349+} __no_const;
36350 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36351
36352 struct bfa_faa_cbfn_s {
36353diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
36354index f0f80e2..8ec946b 100644
36355--- a/drivers/scsi/bfa/bfa_fcpim.c
36356+++ b/drivers/scsi/bfa/bfa_fcpim.c
36357@@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
36358
36359 bfa_iotag_attach(fcp);
36360
36361- fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
36362+ fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
36363 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
36364 (fcp->num_itns * sizeof(struct bfa_itn_s));
36365 memset(fcp->itn_arr, 0,
36366@@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36367 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
36368 {
36369 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
36370- struct bfa_itn_s *itn;
36371+ bfa_itn_s_no_const *itn;
36372
36373 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
36374 itn->isr = isr;
36375diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
36376index 36f26da..38a34a8 100644
36377--- a/drivers/scsi/bfa/bfa_fcpim.h
36378+++ b/drivers/scsi/bfa/bfa_fcpim.h
36379@@ -37,6 +37,7 @@ struct bfa_iotag_s {
36380 struct bfa_itn_s {
36381 bfa_isr_func_t isr;
36382 };
36383+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
36384
36385 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36386 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
36387@@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
36388 struct list_head iotag_tio_free_q; /* free IO resources */
36389 struct list_head iotag_unused_q; /* unused IO resources*/
36390 struct bfa_iotag_s *iotag_arr;
36391- struct bfa_itn_s *itn_arr;
36392+ bfa_itn_s_no_const *itn_arr;
36393 int num_ioim_reqs;
36394 int num_fwtio_reqs;
36395 int num_itns;
36396diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
36397index 1a99d4b..e85d64b 100644
36398--- a/drivers/scsi/bfa/bfa_ioc.h
36399+++ b/drivers/scsi/bfa/bfa_ioc.h
36400@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
36401 bfa_ioc_disable_cbfn_t disable_cbfn;
36402 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36403 bfa_ioc_reset_cbfn_t reset_cbfn;
36404-};
36405+} __no_const;
36406
36407 /*
36408 * IOC event notification mechanism.
36409@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
36410 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
36411 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
36412 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
36413-};
36414+} __no_const;
36415
36416 /*
36417 * Queue element to wait for room in request queue. FIFO order is
36418diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
36419index a3a056a..b9bbc2f 100644
36420--- a/drivers/scsi/hosts.c
36421+++ b/drivers/scsi/hosts.c
36422@@ -42,7 +42,7 @@
36423 #include "scsi_logging.h"
36424
36425
36426-static atomic_t scsi_host_next_hn; /* host_no for next new host */
36427+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36428
36429
36430 static void scsi_host_cls_release(struct device *dev)
36431@@ -360,7 +360,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
36432 * subtract one because we increment first then return, but we need to
36433 * know what the next host number was before increment
36434 */
36435- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36436+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36437 shost->dma_channel = 0xff;
36438
36439 /* These three are default values which can be overridden */
36440diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
36441index 500e20d..ebd3059 100644
36442--- a/drivers/scsi/hpsa.c
36443+++ b/drivers/scsi/hpsa.c
36444@@ -521,7 +521,7 @@ static inline u32 next_command(struct ctlr_info *h)
36445 u32 a;
36446
36447 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36448- return h->access.command_completed(h);
36449+ return h->access->command_completed(h);
36450
36451 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
36452 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
36453@@ -3002,7 +3002,7 @@ static void start_io(struct ctlr_info *h)
36454 while (!list_empty(&h->reqQ)) {
36455 c = list_entry(h->reqQ.next, struct CommandList, list);
36456 /* can't do anything if fifo is full */
36457- if ((h->access.fifo_full(h))) {
36458+ if ((h->access->fifo_full(h))) {
36459 dev_warn(&h->pdev->dev, "fifo full\n");
36460 break;
36461 }
36462@@ -3012,7 +3012,7 @@ static void start_io(struct ctlr_info *h)
36463 h->Qdepth--;
36464
36465 /* Tell the controller execute command */
36466- h->access.submit_command(h, c);
36467+ h->access->submit_command(h, c);
36468
36469 /* Put job onto the completed Q */
36470 addQ(&h->cmpQ, c);
36471@@ -3021,17 +3021,17 @@ static void start_io(struct ctlr_info *h)
36472
36473 static inline unsigned long get_next_completion(struct ctlr_info *h)
36474 {
36475- return h->access.command_completed(h);
36476+ return h->access->command_completed(h);
36477 }
36478
36479 static inline bool interrupt_pending(struct ctlr_info *h)
36480 {
36481- return h->access.intr_pending(h);
36482+ return h->access->intr_pending(h);
36483 }
36484
36485 static inline long interrupt_not_for_us(struct ctlr_info *h)
36486 {
36487- return (h->access.intr_pending(h) == 0) ||
36488+ return (h->access->intr_pending(h) == 0) ||
36489 (h->interrupts_enabled == 0);
36490 }
36491
36492@@ -3930,7 +3930,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
36493 if (prod_index < 0)
36494 return -ENODEV;
36495 h->product_name = products[prod_index].product_name;
36496- h->access = *(products[prod_index].access);
36497+ h->access = products[prod_index].access;
36498
36499 if (hpsa_board_disabled(h->pdev)) {
36500 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
36501@@ -4175,7 +4175,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
36502
36503 assert_spin_locked(&lockup_detector_lock);
36504 remove_ctlr_from_lockup_detector_list(h);
36505- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36506+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36507 spin_lock_irqsave(&h->lock, flags);
36508 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
36509 spin_unlock_irqrestore(&h->lock, flags);
36510@@ -4355,7 +4355,7 @@ reinit_after_soft_reset:
36511 }
36512
36513 /* make sure the board interrupts are off */
36514- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36515+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36516
36517 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
36518 goto clean2;
36519@@ -4389,7 +4389,7 @@ reinit_after_soft_reset:
36520 * fake ones to scoop up any residual completions.
36521 */
36522 spin_lock_irqsave(&h->lock, flags);
36523- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36524+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36525 spin_unlock_irqrestore(&h->lock, flags);
36526 free_irq(h->intr[h->intr_mode], h);
36527 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
36528@@ -4408,9 +4408,9 @@ reinit_after_soft_reset:
36529 dev_info(&h->pdev->dev, "Board READY.\n");
36530 dev_info(&h->pdev->dev,
36531 "Waiting for stale completions to drain.\n");
36532- h->access.set_intr_mask(h, HPSA_INTR_ON);
36533+ h->access->set_intr_mask(h, HPSA_INTR_ON);
36534 msleep(10000);
36535- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36536+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36537
36538 rc = controller_reset_failed(h->cfgtable);
36539 if (rc)
36540@@ -4431,7 +4431,7 @@ reinit_after_soft_reset:
36541 }
36542
36543 /* Turn the interrupts on so we can service requests */
36544- h->access.set_intr_mask(h, HPSA_INTR_ON);
36545+ h->access->set_intr_mask(h, HPSA_INTR_ON);
36546
36547 hpsa_hba_inquiry(h);
36548 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
36549@@ -4483,7 +4483,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
36550 * To write all data in the battery backed cache to disks
36551 */
36552 hpsa_flush_cache(h);
36553- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36554+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36555 free_irq(h->intr[h->intr_mode], h);
36556 #ifdef CONFIG_PCI_MSI
36557 if (h->msix_vector)
36558@@ -4657,7 +4657,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
36559 return;
36560 }
36561 /* Change the access methods to the performant access methods */
36562- h->access = SA5_performant_access;
36563+ h->access = &SA5_performant_access;
36564 h->transMethod = CFGTBL_Trans_Performant;
36565 }
36566
36567diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
36568index 7b28d54..952f23a 100644
36569--- a/drivers/scsi/hpsa.h
36570+++ b/drivers/scsi/hpsa.h
36571@@ -72,7 +72,7 @@ struct ctlr_info {
36572 unsigned int msix_vector;
36573 unsigned int msi_vector;
36574 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
36575- struct access_method access;
36576+ struct access_method *access;
36577
36578 /* queue and queue Info */
36579 struct list_head reqQ;
36580diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
36581index f2df059..a3a9930 100644
36582--- a/drivers/scsi/ips.h
36583+++ b/drivers/scsi/ips.h
36584@@ -1027,7 +1027,7 @@ typedef struct {
36585 int (*intr)(struct ips_ha *);
36586 void (*enableint)(struct ips_ha *);
36587 uint32_t (*statupd)(struct ips_ha *);
36588-} ips_hw_func_t;
36589+} __no_const ips_hw_func_t;
36590
36591 typedef struct ips_ha {
36592 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36593diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
36594index aceffad..c35c08d 100644
36595--- a/drivers/scsi/libfc/fc_exch.c
36596+++ b/drivers/scsi/libfc/fc_exch.c
36597@@ -105,12 +105,12 @@ struct fc_exch_mgr {
36598 * all together if not used XXX
36599 */
36600 struct {
36601- atomic_t no_free_exch;
36602- atomic_t no_free_exch_xid;
36603- atomic_t xid_not_found;
36604- atomic_t xid_busy;
36605- atomic_t seq_not_found;
36606- atomic_t non_bls_resp;
36607+ atomic_unchecked_t no_free_exch;
36608+ atomic_unchecked_t no_free_exch_xid;
36609+ atomic_unchecked_t xid_not_found;
36610+ atomic_unchecked_t xid_busy;
36611+ atomic_unchecked_t seq_not_found;
36612+ atomic_unchecked_t non_bls_resp;
36613 } stats;
36614 };
36615
36616@@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
36617 /* allocate memory for exchange */
36618 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36619 if (!ep) {
36620- atomic_inc(&mp->stats.no_free_exch);
36621+ atomic_inc_unchecked(&mp->stats.no_free_exch);
36622 goto out;
36623 }
36624 memset(ep, 0, sizeof(*ep));
36625@@ -780,7 +780,7 @@ out:
36626 return ep;
36627 err:
36628 spin_unlock_bh(&pool->lock);
36629- atomic_inc(&mp->stats.no_free_exch_xid);
36630+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36631 mempool_free(ep, mp->ep_pool);
36632 return NULL;
36633 }
36634@@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36635 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36636 ep = fc_exch_find(mp, xid);
36637 if (!ep) {
36638- atomic_inc(&mp->stats.xid_not_found);
36639+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36640 reject = FC_RJT_OX_ID;
36641 goto out;
36642 }
36643@@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36644 ep = fc_exch_find(mp, xid);
36645 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36646 if (ep) {
36647- atomic_inc(&mp->stats.xid_busy);
36648+ atomic_inc_unchecked(&mp->stats.xid_busy);
36649 reject = FC_RJT_RX_ID;
36650 goto rel;
36651 }
36652@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36653 }
36654 xid = ep->xid; /* get our XID */
36655 } else if (!ep) {
36656- atomic_inc(&mp->stats.xid_not_found);
36657+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36658 reject = FC_RJT_RX_ID; /* XID not found */
36659 goto out;
36660 }
36661@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36662 } else {
36663 sp = &ep->seq;
36664 if (sp->id != fh->fh_seq_id) {
36665- atomic_inc(&mp->stats.seq_not_found);
36666+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36667 if (f_ctl & FC_FC_END_SEQ) {
36668 /*
36669 * Update sequence_id based on incoming last
36670@@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36671
36672 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36673 if (!ep) {
36674- atomic_inc(&mp->stats.xid_not_found);
36675+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36676 goto out;
36677 }
36678 if (ep->esb_stat & ESB_ST_COMPLETE) {
36679- atomic_inc(&mp->stats.xid_not_found);
36680+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36681 goto rel;
36682 }
36683 if (ep->rxid == FC_XID_UNKNOWN)
36684 ep->rxid = ntohs(fh->fh_rx_id);
36685 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36686- atomic_inc(&mp->stats.xid_not_found);
36687+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36688 goto rel;
36689 }
36690 if (ep->did != ntoh24(fh->fh_s_id) &&
36691 ep->did != FC_FID_FLOGI) {
36692- atomic_inc(&mp->stats.xid_not_found);
36693+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36694 goto rel;
36695 }
36696 sof = fr_sof(fp);
36697@@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36698 sp->ssb_stat |= SSB_ST_RESP;
36699 sp->id = fh->fh_seq_id;
36700 } else if (sp->id != fh->fh_seq_id) {
36701- atomic_inc(&mp->stats.seq_not_found);
36702+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36703 goto rel;
36704 }
36705
36706@@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36707 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36708
36709 if (!sp)
36710- atomic_inc(&mp->stats.xid_not_found);
36711+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36712 else
36713- atomic_inc(&mp->stats.non_bls_resp);
36714+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
36715
36716 fc_frame_free(fp);
36717 }
36718diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
36719index d109cc3..09f4e7d 100644
36720--- a/drivers/scsi/libsas/sas_ata.c
36721+++ b/drivers/scsi/libsas/sas_ata.c
36722@@ -529,7 +529,7 @@ static struct ata_port_operations sas_sata_ops = {
36723 .postreset = ata_std_postreset,
36724 .error_handler = ata_std_error_handler,
36725 .post_internal_cmd = sas_ata_post_internal,
36726- .qc_defer = ata_std_qc_defer,
36727+ .qc_defer = ata_std_qc_defer,
36728 .qc_prep = ata_noop_qc_prep,
36729 .qc_issue = sas_ata_qc_issue,
36730 .qc_fill_rtf = sas_ata_qc_fill_rtf,
36731diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
36732index 3a1ffdd..8eb7c71 100644
36733--- a/drivers/scsi/lpfc/lpfc.h
36734+++ b/drivers/scsi/lpfc/lpfc.h
36735@@ -413,7 +413,7 @@ struct lpfc_vport {
36736 struct dentry *debug_nodelist;
36737 struct dentry *vport_debugfs_root;
36738 struct lpfc_debugfs_trc *disc_trc;
36739- atomic_t disc_trc_cnt;
36740+ atomic_unchecked_t disc_trc_cnt;
36741 #endif
36742 uint8_t stat_data_enabled;
36743 uint8_t stat_data_blocked;
36744@@ -826,8 +826,8 @@ struct lpfc_hba {
36745 struct timer_list fabric_block_timer;
36746 unsigned long bit_flags;
36747 #define FABRIC_COMANDS_BLOCKED 0
36748- atomic_t num_rsrc_err;
36749- atomic_t num_cmd_success;
36750+ atomic_unchecked_t num_rsrc_err;
36751+ atomic_unchecked_t num_cmd_success;
36752 unsigned long last_rsrc_error_time;
36753 unsigned long last_ramp_down_time;
36754 unsigned long last_ramp_up_time;
36755@@ -863,7 +863,7 @@ struct lpfc_hba {
36756
36757 struct dentry *debug_slow_ring_trc;
36758 struct lpfc_debugfs_trc *slow_ring_trc;
36759- atomic_t slow_ring_trc_cnt;
36760+ atomic_unchecked_t slow_ring_trc_cnt;
36761 /* iDiag debugfs sub-directory */
36762 struct dentry *idiag_root;
36763 struct dentry *idiag_pci_cfg;
36764diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
36765index af04b0d..8f1a97e 100644
36766--- a/drivers/scsi/lpfc/lpfc_debugfs.c
36767+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
36768@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
36769
36770 #include <linux/debugfs.h>
36771
36772-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36773+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36774 static unsigned long lpfc_debugfs_start_time = 0L;
36775
36776 /* iDiag */
36777@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
36778 lpfc_debugfs_enable = 0;
36779
36780 len = 0;
36781- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36782+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36783 (lpfc_debugfs_max_disc_trc - 1);
36784 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36785 dtp = vport->disc_trc + i;
36786@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
36787 lpfc_debugfs_enable = 0;
36788
36789 len = 0;
36790- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36791+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36792 (lpfc_debugfs_max_slow_ring_trc - 1);
36793 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36794 dtp = phba->slow_ring_trc + i;
36795@@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
36796 !vport || !vport->disc_trc)
36797 return;
36798
36799- index = atomic_inc_return(&vport->disc_trc_cnt) &
36800+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36801 (lpfc_debugfs_max_disc_trc - 1);
36802 dtp = vport->disc_trc + index;
36803 dtp->fmt = fmt;
36804 dtp->data1 = data1;
36805 dtp->data2 = data2;
36806 dtp->data3 = data3;
36807- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36808+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36809 dtp->jif = jiffies;
36810 #endif
36811 return;
36812@@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
36813 !phba || !phba->slow_ring_trc)
36814 return;
36815
36816- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36817+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36818 (lpfc_debugfs_max_slow_ring_trc - 1);
36819 dtp = phba->slow_ring_trc + index;
36820 dtp->fmt = fmt;
36821 dtp->data1 = data1;
36822 dtp->data2 = data2;
36823 dtp->data3 = data3;
36824- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36825+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36826 dtp->jif = jiffies;
36827 #endif
36828 return;
36829@@ -4090,7 +4090,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36830 "slow_ring buffer\n");
36831 goto debug_failed;
36832 }
36833- atomic_set(&phba->slow_ring_trc_cnt, 0);
36834+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36835 memset(phba->slow_ring_trc, 0,
36836 (sizeof(struct lpfc_debugfs_trc) *
36837 lpfc_debugfs_max_slow_ring_trc));
36838@@ -4136,7 +4136,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36839 "buffer\n");
36840 goto debug_failed;
36841 }
36842- atomic_set(&vport->disc_trc_cnt, 0);
36843+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36844
36845 snprintf(name, sizeof(name), "discovery_trace");
36846 vport->debug_disc_trc =
36847diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
36848index 9598fdc..7e9f3d9 100644
36849--- a/drivers/scsi/lpfc/lpfc_init.c
36850+++ b/drivers/scsi/lpfc/lpfc_init.c
36851@@ -10266,8 +10266,10 @@ lpfc_init(void)
36852 "misc_register returned with status %d", error);
36853
36854 if (lpfc_enable_npiv) {
36855- lpfc_transport_functions.vport_create = lpfc_vport_create;
36856- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36857+ pax_open_kernel();
36858+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36859+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36860+ pax_close_kernel();
36861 }
36862 lpfc_transport_template =
36863 fc_attach_transport(&lpfc_transport_functions);
36864diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
36865index 88f3a83..686d3fa 100644
36866--- a/drivers/scsi/lpfc/lpfc_scsi.c
36867+++ b/drivers/scsi/lpfc/lpfc_scsi.c
36868@@ -311,7 +311,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
36869 uint32_t evt_posted;
36870
36871 spin_lock_irqsave(&phba->hbalock, flags);
36872- atomic_inc(&phba->num_rsrc_err);
36873+ atomic_inc_unchecked(&phba->num_rsrc_err);
36874 phba->last_rsrc_error_time = jiffies;
36875
36876 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36877@@ -352,7 +352,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
36878 unsigned long flags;
36879 struct lpfc_hba *phba = vport->phba;
36880 uint32_t evt_posted;
36881- atomic_inc(&phba->num_cmd_success);
36882+ atomic_inc_unchecked(&phba->num_cmd_success);
36883
36884 if (vport->cfg_lun_queue_depth <= queue_depth)
36885 return;
36886@@ -396,8 +396,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36887 unsigned long num_rsrc_err, num_cmd_success;
36888 int i;
36889
36890- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36891- num_cmd_success = atomic_read(&phba->num_cmd_success);
36892+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36893+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36894
36895 vports = lpfc_create_vport_work_array(phba);
36896 if (vports != NULL)
36897@@ -417,8 +417,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36898 }
36899 }
36900 lpfc_destroy_vport_work_array(phba, vports);
36901- atomic_set(&phba->num_rsrc_err, 0);
36902- atomic_set(&phba->num_cmd_success, 0);
36903+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36904+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36905 }
36906
36907 /**
36908@@ -452,8 +452,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
36909 }
36910 }
36911 lpfc_destroy_vport_work_array(phba, vports);
36912- atomic_set(&phba->num_rsrc_err, 0);
36913- atomic_set(&phba->num_cmd_success, 0);
36914+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36915+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36916 }
36917
36918 /**
36919diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
36920index ea8a0b4..812a124 100644
36921--- a/drivers/scsi/pmcraid.c
36922+++ b/drivers/scsi/pmcraid.c
36923@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
36924 res->scsi_dev = scsi_dev;
36925 scsi_dev->hostdata = res;
36926 res->change_detected = 0;
36927- atomic_set(&res->read_failures, 0);
36928- atomic_set(&res->write_failures, 0);
36929+ atomic_set_unchecked(&res->read_failures, 0);
36930+ atomic_set_unchecked(&res->write_failures, 0);
36931 rc = 0;
36932 }
36933 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36934@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
36935
36936 /* If this was a SCSI read/write command keep count of errors */
36937 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36938- atomic_inc(&res->read_failures);
36939+ atomic_inc_unchecked(&res->read_failures);
36940 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36941- atomic_inc(&res->write_failures);
36942+ atomic_inc_unchecked(&res->write_failures);
36943
36944 if (!RES_IS_GSCSI(res->cfg_entry) &&
36945 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36946@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
36947 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36948 * hrrq_id assigned here in queuecommand
36949 */
36950- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36951+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36952 pinstance->num_hrrq;
36953 cmd->cmd_done = pmcraid_io_done;
36954
36955@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
36956 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36957 * hrrq_id assigned here in queuecommand
36958 */
36959- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36960+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36961 pinstance->num_hrrq;
36962
36963 if (request_size) {
36964@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
36965
36966 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36967 /* add resources only after host is added into system */
36968- if (!atomic_read(&pinstance->expose_resources))
36969+ if (!atomic_read_unchecked(&pinstance->expose_resources))
36970 return;
36971
36972 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
36973@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
36974 init_waitqueue_head(&pinstance->reset_wait_q);
36975
36976 atomic_set(&pinstance->outstanding_cmds, 0);
36977- atomic_set(&pinstance->last_message_id, 0);
36978- atomic_set(&pinstance->expose_resources, 0);
36979+ atomic_set_unchecked(&pinstance->last_message_id, 0);
36980+ atomic_set_unchecked(&pinstance->expose_resources, 0);
36981
36982 INIT_LIST_HEAD(&pinstance->free_res_q);
36983 INIT_LIST_HEAD(&pinstance->used_res_q);
36984@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
36985 /* Schedule worker thread to handle CCN and take care of adding and
36986 * removing devices to OS
36987 */
36988- atomic_set(&pinstance->expose_resources, 1);
36989+ atomic_set_unchecked(&pinstance->expose_resources, 1);
36990 schedule_work(&pinstance->worker_q);
36991 return rc;
36992
36993diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
36994index e1d150f..6c6df44 100644
36995--- a/drivers/scsi/pmcraid.h
36996+++ b/drivers/scsi/pmcraid.h
36997@@ -748,7 +748,7 @@ struct pmcraid_instance {
36998 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
36999
37000 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
37001- atomic_t last_message_id;
37002+ atomic_unchecked_t last_message_id;
37003
37004 /* configuration table */
37005 struct pmcraid_config_table *cfg_table;
37006@@ -777,7 +777,7 @@ struct pmcraid_instance {
37007 atomic_t outstanding_cmds;
37008
37009 /* should add/delete resources to mid-layer now ?*/
37010- atomic_t expose_resources;
37011+ atomic_unchecked_t expose_resources;
37012
37013
37014
37015@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
37016 struct pmcraid_config_table_entry_ext cfg_entry_ext;
37017 };
37018 struct scsi_device *scsi_dev; /* Link scsi_device structure */
37019- atomic_t read_failures; /* count of failed READ commands */
37020- atomic_t write_failures; /* count of failed WRITE commands */
37021+ atomic_unchecked_t read_failures; /* count of failed READ commands */
37022+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
37023
37024 /* To indicate add/delete/modify during CCN */
37025 u8 change_detected;
37026diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
37027index a244303..6015eb7 100644
37028--- a/drivers/scsi/qla2xxx/qla_def.h
37029+++ b/drivers/scsi/qla2xxx/qla_def.h
37030@@ -2264,7 +2264,7 @@ struct isp_operations {
37031 int (*start_scsi) (srb_t *);
37032 int (*abort_isp) (struct scsi_qla_host *);
37033 int (*iospace_config)(struct qla_hw_data*);
37034-};
37035+} __no_const;
37036
37037 /* MSI-X Support *************************************************************/
37038
37039diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
37040index 7f2492e..5113877 100644
37041--- a/drivers/scsi/qla4xxx/ql4_def.h
37042+++ b/drivers/scsi/qla4xxx/ql4_def.h
37043@@ -268,7 +268,7 @@ struct ddb_entry {
37044 * (4000 only) */
37045 atomic_t relogin_timer; /* Max Time to wait for
37046 * relogin to complete */
37047- atomic_t relogin_retry_count; /* Num of times relogin has been
37048+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
37049 * retried */
37050 uint32_t default_time2wait; /* Default Min time between
37051 * relogins (+aens) */
37052diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
37053index ee47820..a83b1f4 100644
37054--- a/drivers/scsi/qla4xxx/ql4_os.c
37055+++ b/drivers/scsi/qla4xxx/ql4_os.c
37056@@ -2551,12 +2551,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
37057 */
37058 if (!iscsi_is_session_online(cls_sess)) {
37059 /* Reset retry relogin timer */
37060- atomic_inc(&ddb_entry->relogin_retry_count);
37061+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
37062 DEBUG2(ql4_printk(KERN_INFO, ha,
37063 "%s: index[%d] relogin timed out-retrying"
37064 " relogin (%d), retry (%d)\n", __func__,
37065 ddb_entry->fw_ddb_index,
37066- atomic_read(&ddb_entry->relogin_retry_count),
37067+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
37068 ddb_entry->default_time2wait + 4));
37069 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
37070 atomic_set(&ddb_entry->retry_relogin_timer,
37071@@ -4453,7 +4453,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
37072
37073 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
37074 atomic_set(&ddb_entry->relogin_timer, 0);
37075- atomic_set(&ddb_entry->relogin_retry_count, 0);
37076+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37077 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
37078 ddb_entry->default_relogin_timeout =
37079 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
37080diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
37081index 07322ec..91ccc23 100644
37082--- a/drivers/scsi/scsi.c
37083+++ b/drivers/scsi/scsi.c
37084@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
37085 unsigned long timeout;
37086 int rtn = 0;
37087
37088- atomic_inc(&cmd->device->iorequest_cnt);
37089+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37090
37091 /* check if the device is still usable */
37092 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
37093diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
37094index 4037fd5..a19fcc7 100644
37095--- a/drivers/scsi/scsi_lib.c
37096+++ b/drivers/scsi/scsi_lib.c
37097@@ -1415,7 +1415,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
37098 shost = sdev->host;
37099 scsi_init_cmd_errh(cmd);
37100 cmd->result = DID_NO_CONNECT << 16;
37101- atomic_inc(&cmd->device->iorequest_cnt);
37102+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37103
37104 /*
37105 * SCSI request completion path will do scsi_device_unbusy(),
37106@@ -1441,9 +1441,9 @@ static void scsi_softirq_done(struct request *rq)
37107
37108 INIT_LIST_HEAD(&cmd->eh_entry);
37109
37110- atomic_inc(&cmd->device->iodone_cnt);
37111+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
37112 if (cmd->result)
37113- atomic_inc(&cmd->device->ioerr_cnt);
37114+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
37115
37116 disposition = scsi_decide_disposition(cmd);
37117 if (disposition != SUCCESS &&
37118diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
37119index 04c2a27..9d8bd66 100644
37120--- a/drivers/scsi/scsi_sysfs.c
37121+++ b/drivers/scsi/scsi_sysfs.c
37122@@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
37123 char *buf) \
37124 { \
37125 struct scsi_device *sdev = to_scsi_device(dev); \
37126- unsigned long long count = atomic_read(&sdev->field); \
37127+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
37128 return snprintf(buf, 20, "0x%llx\n", count); \
37129 } \
37130 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
37131diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
37132index 84a1fdf..693b0d6 100644
37133--- a/drivers/scsi/scsi_tgt_lib.c
37134+++ b/drivers/scsi/scsi_tgt_lib.c
37135@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
37136 int err;
37137
37138 dprintk("%lx %u\n", uaddr, len);
37139- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
37140+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
37141 if (err) {
37142 /*
37143 * TODO: need to fixup sg_tablesize, max_segment_size,
37144diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
37145index 80fbe2a..efa223b 100644
37146--- a/drivers/scsi/scsi_transport_fc.c
37147+++ b/drivers/scsi/scsi_transport_fc.c
37148@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
37149 * Netlink Infrastructure
37150 */
37151
37152-static atomic_t fc_event_seq;
37153+static atomic_unchecked_t fc_event_seq;
37154
37155 /**
37156 * fc_get_event_number - Obtain the next sequential FC event number
37157@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
37158 u32
37159 fc_get_event_number(void)
37160 {
37161- return atomic_add_return(1, &fc_event_seq);
37162+ return atomic_add_return_unchecked(1, &fc_event_seq);
37163 }
37164 EXPORT_SYMBOL(fc_get_event_number);
37165
37166@@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
37167 {
37168 int error;
37169
37170- atomic_set(&fc_event_seq, 0);
37171+ atomic_set_unchecked(&fc_event_seq, 0);
37172
37173 error = transport_class_register(&fc_host_class);
37174 if (error)
37175@@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
37176 char *cp;
37177
37178 *val = simple_strtoul(buf, &cp, 0);
37179- if ((*cp && (*cp != '\n')) || (*val < 0))
37180+ if (*cp && (*cp != '\n'))
37181 return -EINVAL;
37182 /*
37183 * Check for overflow; dev_loss_tmo is u32
37184diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
37185index 1cf640e..78e9014 100644
37186--- a/drivers/scsi/scsi_transport_iscsi.c
37187+++ b/drivers/scsi/scsi_transport_iscsi.c
37188@@ -79,7 +79,7 @@ struct iscsi_internal {
37189 struct transport_container session_cont;
37190 };
37191
37192-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
37193+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
37194 static struct workqueue_struct *iscsi_eh_timer_workq;
37195
37196 static DEFINE_IDA(iscsi_sess_ida);
37197@@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
37198 int err;
37199
37200 ihost = shost->shost_data;
37201- session->sid = atomic_add_return(1, &iscsi_session_nr);
37202+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
37203
37204 if (target_id == ISCSI_MAX_TARGET) {
37205 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
37206@@ -2940,7 +2940,7 @@ static __init int iscsi_transport_init(void)
37207 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
37208 ISCSI_TRANSPORT_VERSION);
37209
37210- atomic_set(&iscsi_session_nr, 0);
37211+ atomic_set_unchecked(&iscsi_session_nr, 0);
37212
37213 err = class_register(&iscsi_transport_class);
37214 if (err)
37215diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
37216index 21a045e..ec89e03 100644
37217--- a/drivers/scsi/scsi_transport_srp.c
37218+++ b/drivers/scsi/scsi_transport_srp.c
37219@@ -33,7 +33,7 @@
37220 #include "scsi_transport_srp_internal.h"
37221
37222 struct srp_host_attrs {
37223- atomic_t next_port_id;
37224+ atomic_unchecked_t next_port_id;
37225 };
37226 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
37227
37228@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
37229 struct Scsi_Host *shost = dev_to_shost(dev);
37230 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
37231
37232- atomic_set(&srp_host->next_port_id, 0);
37233+ atomic_set_unchecked(&srp_host->next_port_id, 0);
37234 return 0;
37235 }
37236
37237@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
37238 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
37239 rport->roles = ids->roles;
37240
37241- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
37242+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
37243 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
37244
37245 transport_setup_device(&rport->dev);
37246diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
37247index eacd46b..e3f4d62 100644
37248--- a/drivers/scsi/sg.c
37249+++ b/drivers/scsi/sg.c
37250@@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
37251 sdp->disk->disk_name,
37252 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
37253 NULL,
37254- (char *)arg);
37255+ (char __user *)arg);
37256 case BLKTRACESTART:
37257 return blk_trace_startstop(sdp->device->request_queue, 1);
37258 case BLKTRACESTOP:
37259@@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
37260 const struct file_operations * fops;
37261 };
37262
37263-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
37264+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
37265 {"allow_dio", &adio_fops},
37266 {"debug", &debug_fops},
37267 {"def_reserved_size", &dressz_fops},
37268@@ -2332,7 +2332,7 @@ sg_proc_init(void)
37269 if (!sg_proc_sgp)
37270 return 1;
37271 for (k = 0; k < num_leaves; ++k) {
37272- struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
37273+ const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
37274 umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
37275 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
37276 }
37277diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
37278index 3d8f662..070f1a5 100644
37279--- a/drivers/spi/spi.c
37280+++ b/drivers/spi/spi.c
37281@@ -1361,7 +1361,7 @@ int spi_bus_unlock(struct spi_master *master)
37282 EXPORT_SYMBOL_GPL(spi_bus_unlock);
37283
37284 /* portable code must never pass more than 32 bytes */
37285-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37286+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
37287
37288 static u8 *buf;
37289
37290diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
37291index d91751f..a3a9e36 100644
37292--- a/drivers/staging/octeon/ethernet-rx.c
37293+++ b/drivers/staging/octeon/ethernet-rx.c
37294@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37295 /* Increment RX stats for virtual ports */
37296 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37297 #ifdef CONFIG_64BIT
37298- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37299- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37300+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37301+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37302 #else
37303- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37304- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37305+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37306+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37307 #endif
37308 }
37309 netif_receive_skb(skb);
37310@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37311 dev->name);
37312 */
37313 #ifdef CONFIG_64BIT
37314- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37315+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37316 #else
37317- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37318+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
37319 #endif
37320 dev_kfree_skb_irq(skb);
37321 }
37322diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
37323index 60cba81..71eb239 100644
37324--- a/drivers/staging/octeon/ethernet.c
37325+++ b/drivers/staging/octeon/ethernet.c
37326@@ -259,11 +259,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
37327 * since the RX tasklet also increments it.
37328 */
37329 #ifdef CONFIG_64BIT
37330- atomic64_add(rx_status.dropped_packets,
37331- (atomic64_t *)&priv->stats.rx_dropped);
37332+ atomic64_add_unchecked(rx_status.dropped_packets,
37333+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37334 #else
37335- atomic_add(rx_status.dropped_packets,
37336- (atomic_t *)&priv->stats.rx_dropped);
37337+ atomic_add_unchecked(rx_status.dropped_packets,
37338+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
37339 #endif
37340 }
37341
37342diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
37343index d3d8727..f9327bb8 100644
37344--- a/drivers/staging/rtl8712/rtl871x_io.h
37345+++ b/drivers/staging/rtl8712/rtl871x_io.h
37346@@ -108,7 +108,7 @@ struct _io_ops {
37347 u8 *pmem);
37348 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
37349 u8 *pmem);
37350-};
37351+} __no_const;
37352
37353 struct io_req {
37354 struct list_head list;
37355diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
37356index c7b5e8b..783d6cb 100644
37357--- a/drivers/staging/sbe-2t3e3/netdev.c
37358+++ b/drivers/staging/sbe-2t3e3/netdev.c
37359@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37360 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
37361
37362 if (rlen)
37363- if (copy_to_user(data, &resp, rlen))
37364+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
37365 return -EFAULT;
37366
37367 return 0;
37368diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
37369index 42cdafe..2769103 100644
37370--- a/drivers/staging/speakup/speakup_soft.c
37371+++ b/drivers/staging/speakup/speakup_soft.c
37372@@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
37373 break;
37374 } else if (!initialized) {
37375 if (*init) {
37376- ch = *init;
37377 init++;
37378 } else {
37379 initialized = 1;
37380 }
37381+ ch = *init;
37382 } else {
37383 ch = synth_buffer_getc();
37384 }
37385diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
37386index c7b888c..c94be93 100644
37387--- a/drivers/staging/usbip/usbip_common.h
37388+++ b/drivers/staging/usbip/usbip_common.h
37389@@ -289,7 +289,7 @@ struct usbip_device {
37390 void (*shutdown)(struct usbip_device *);
37391 void (*reset)(struct usbip_device *);
37392 void (*unusable)(struct usbip_device *);
37393- } eh_ops;
37394+ } __no_const eh_ops;
37395 };
37396
37397 /* usbip_common.c */
37398diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
37399index 88b3298..3783eee 100644
37400--- a/drivers/staging/usbip/vhci.h
37401+++ b/drivers/staging/usbip/vhci.h
37402@@ -88,7 +88,7 @@ struct vhci_hcd {
37403 unsigned resuming:1;
37404 unsigned long re_timeout;
37405
37406- atomic_t seqnum;
37407+ atomic_unchecked_t seqnum;
37408
37409 /*
37410 * NOTE:
37411diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
37412index dca9bf1..80735c9 100644
37413--- a/drivers/staging/usbip/vhci_hcd.c
37414+++ b/drivers/staging/usbip/vhci_hcd.c
37415@@ -488,7 +488,7 @@ static void vhci_tx_urb(struct urb *urb)
37416 return;
37417 }
37418
37419- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37420+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37421 if (priv->seqnum == 0xffff)
37422 dev_info(&urb->dev->dev, "seqnum max\n");
37423
37424@@ -740,7 +740,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
37425 return -ENOMEM;
37426 }
37427
37428- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37429+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37430 if (unlink->seqnum == 0xffff)
37431 pr_info("seqnum max\n");
37432
37433@@ -928,7 +928,7 @@ static int vhci_start(struct usb_hcd *hcd)
37434 vdev->rhport = rhport;
37435 }
37436
37437- atomic_set(&vhci->seqnum, 0);
37438+ atomic_set_unchecked(&vhci->seqnum, 0);
37439 spin_lock_init(&vhci->lock);
37440
37441 hcd->power_budget = 0; /* no limit */
37442diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
37443index f5fba732..210a16c 100644
37444--- a/drivers/staging/usbip/vhci_rx.c
37445+++ b/drivers/staging/usbip/vhci_rx.c
37446@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
37447 if (!urb) {
37448 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
37449 pr_info("max seqnum %d\n",
37450- atomic_read(&the_controller->seqnum));
37451+ atomic_read_unchecked(&the_controller->seqnum));
37452 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37453 return;
37454 }
37455diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
37456index 7735027..30eed13 100644
37457--- a/drivers/staging/vt6655/hostap.c
37458+++ b/drivers/staging/vt6655/hostap.c
37459@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
37460 *
37461 */
37462
37463+static net_device_ops_no_const apdev_netdev_ops;
37464+
37465 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37466 {
37467 PSDevice apdev_priv;
37468 struct net_device *dev = pDevice->dev;
37469 int ret;
37470- const struct net_device_ops apdev_netdev_ops = {
37471- .ndo_start_xmit = pDevice->tx_80211,
37472- };
37473
37474 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37475
37476@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37477 *apdev_priv = *pDevice;
37478 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37479
37480+ /* only half broken now */
37481+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37482 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37483
37484 pDevice->apdev->type = ARPHRD_IEEE80211;
37485diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
37486index 51b5adf..098e320 100644
37487--- a/drivers/staging/vt6656/hostap.c
37488+++ b/drivers/staging/vt6656/hostap.c
37489@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
37490 *
37491 */
37492
37493+static net_device_ops_no_const apdev_netdev_ops;
37494+
37495 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37496 {
37497 PSDevice apdev_priv;
37498 struct net_device *dev = pDevice->dev;
37499 int ret;
37500- const struct net_device_ops apdev_netdev_ops = {
37501- .ndo_start_xmit = pDevice->tx_80211,
37502- };
37503
37504 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37505
37506@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37507 *apdev_priv = *pDevice;
37508 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37509
37510+ /* only half broken now */
37511+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37512 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37513
37514 pDevice->apdev->type = ARPHRD_IEEE80211;
37515diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
37516index 7843dfd..3db105f 100644
37517--- a/drivers/staging/wlan-ng/hfa384x_usb.c
37518+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
37519@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
37520
37521 struct usbctlx_completor {
37522 int (*complete) (struct usbctlx_completor *);
37523-};
37524+} __no_const;
37525
37526 static int
37527 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
37528diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
37529index 1ca66ea..76f1343 100644
37530--- a/drivers/staging/zcache/tmem.c
37531+++ b/drivers/staging/zcache/tmem.c
37532@@ -39,7 +39,7 @@
37533 * A tmem host implementation must use this function to register callbacks
37534 * for memory allocation.
37535 */
37536-static struct tmem_hostops tmem_hostops;
37537+static tmem_hostops_no_const tmem_hostops;
37538
37539 static void tmem_objnode_tree_init(void);
37540
37541@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
37542 * A tmem host implementation must use this function to register
37543 * callbacks for a page-accessible memory (PAM) implementation
37544 */
37545-static struct tmem_pamops tmem_pamops;
37546+static tmem_pamops_no_const tmem_pamops;
37547
37548 void tmem_register_pamops(struct tmem_pamops *m)
37549 {
37550diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
37551index 0d4aa82..f7832d4 100644
37552--- a/drivers/staging/zcache/tmem.h
37553+++ b/drivers/staging/zcache/tmem.h
37554@@ -180,6 +180,7 @@ struct tmem_pamops {
37555 void (*new_obj)(struct tmem_obj *);
37556 int (*replace_in_obj)(void *, struct tmem_obj *);
37557 };
37558+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
37559 extern void tmem_register_pamops(struct tmem_pamops *m);
37560
37561 /* memory allocation methods provided by the host implementation */
37562@@ -189,6 +190,7 @@ struct tmem_hostops {
37563 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
37564 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
37565 };
37566+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
37567 extern void tmem_register_hostops(struct tmem_hostops *m);
37568
37569 /* core tmem accessor functions */
37570diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
37571index 30a6770..fa323f8 100644
37572--- a/drivers/target/target_core_cdb.c
37573+++ b/drivers/target/target_core_cdb.c
37574@@ -1107,7 +1107,7 @@ int target_emulate_write_same(struct se_task *task)
37575 if (num_blocks != 0)
37576 range = num_blocks;
37577 else
37578- range = (dev->transport->get_blocks(dev) - lba);
37579+ range = (dev->transport->get_blocks(dev) - lba) + 1;
37580
37581 pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
37582 (unsigned long long)lba, (unsigned long long)range);
37583diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
37584index c3148b1..89d10e6 100644
37585--- a/drivers/target/target_core_pr.c
37586+++ b/drivers/target/target_core_pr.c
37587@@ -2038,7 +2038,7 @@ static int __core_scsi3_write_aptpl_to_file(
37588 if (IS_ERR(file) || !file || !file->f_dentry) {
37589 pr_err("filp_open(%s) for APTPL metadata"
37590 " failed\n", path);
37591- return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
37592+ return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
37593 }
37594
37595 iov[0].iov_base = &buf[0];
37596@@ -3826,7 +3826,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
37597 " SPC-2 reservation is held, returning"
37598 " RESERVATION_CONFLICT\n");
37599 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
37600- ret = EINVAL;
37601+ ret = -EINVAL;
37602 goto out;
37603 }
37604
37605@@ -3836,7 +3836,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
37606 */
37607 if (!cmd->se_sess) {
37608 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
37609- return -EINVAL;
37610+ ret = -EINVAL;
37611+ goto out;
37612 }
37613
37614 if (cmd->data_length < 24) {
37615diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
37616index f015839..b15dfc4 100644
37617--- a/drivers/target/target_core_tmr.c
37618+++ b/drivers/target/target_core_tmr.c
37619@@ -327,7 +327,7 @@ static void core_tmr_drain_task_list(
37620 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
37621 cmd->t_task_list_num,
37622 atomic_read(&cmd->t_task_cdbs_left),
37623- atomic_read(&cmd->t_task_cdbs_sent),
37624+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37625 (cmd->transport_state & CMD_T_ACTIVE) != 0,
37626 (cmd->transport_state & CMD_T_STOP) != 0,
37627 (cmd->transport_state & CMD_T_SENT) != 0);
37628diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
37629index 443704f..92d3517 100644
37630--- a/drivers/target/target_core_transport.c
37631+++ b/drivers/target/target_core_transport.c
37632@@ -1355,7 +1355,7 @@ struct se_device *transport_add_device_to_core_hba(
37633 spin_lock_init(&dev->se_port_lock);
37634 spin_lock_init(&dev->se_tmr_lock);
37635 spin_lock_init(&dev->qf_cmd_lock);
37636- atomic_set(&dev->dev_ordered_id, 0);
37637+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
37638
37639 se_dev_set_default_attribs(dev, dev_limits);
37640
37641@@ -1542,7 +1542,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
37642 * Used to determine when ORDERED commands should go from
37643 * Dormant to Active status.
37644 */
37645- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
37646+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
37647 smp_mb__after_atomic_inc();
37648 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
37649 cmd->se_ordered_id, cmd->sam_task_attr,
37650@@ -1956,7 +1956,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
37651 " CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
37652 cmd->t_task_list_num,
37653 atomic_read(&cmd->t_task_cdbs_left),
37654- atomic_read(&cmd->t_task_cdbs_sent),
37655+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37656 atomic_read(&cmd->t_task_cdbs_ex_left),
37657 (cmd->transport_state & CMD_T_ACTIVE) != 0,
37658 (cmd->transport_state & CMD_T_STOP) != 0,
37659@@ -2216,9 +2216,9 @@ check_depth:
37660 cmd = task->task_se_cmd;
37661 spin_lock_irqsave(&cmd->t_state_lock, flags);
37662 task->task_flags |= (TF_ACTIVE | TF_SENT);
37663- atomic_inc(&cmd->t_task_cdbs_sent);
37664+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
37665
37666- if (atomic_read(&cmd->t_task_cdbs_sent) ==
37667+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
37668 cmd->t_task_list_num)
37669 cmd->transport_state |= CMD_T_SENT;
37670
37671diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
37672index a375f25..da90f64 100644
37673--- a/drivers/target/tcm_fc/tfc_cmd.c
37674+++ b/drivers/target/tcm_fc/tfc_cmd.c
37675@@ -240,6 +240,8 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
37676 {
37677 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
37678
37679+ if (cmd->aborted)
37680+ return ~0;
37681 return fc_seq_exch(cmd->seq)->rxid;
37682 }
37683
37684diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
37685index 3436436..772237b 100644
37686--- a/drivers/tty/hvc/hvcs.c
37687+++ b/drivers/tty/hvc/hvcs.c
37688@@ -83,6 +83,7 @@
37689 #include <asm/hvcserver.h>
37690 #include <asm/uaccess.h>
37691 #include <asm/vio.h>
37692+#include <asm/local.h>
37693
37694 /*
37695 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
37696@@ -270,7 +271,7 @@ struct hvcs_struct {
37697 unsigned int index;
37698
37699 struct tty_struct *tty;
37700- int open_count;
37701+ local_t open_count;
37702
37703 /*
37704 * Used to tell the driver kernel_thread what operations need to take
37705@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
37706
37707 spin_lock_irqsave(&hvcsd->lock, flags);
37708
37709- if (hvcsd->open_count > 0) {
37710+ if (local_read(&hvcsd->open_count) > 0) {
37711 spin_unlock_irqrestore(&hvcsd->lock, flags);
37712 printk(KERN_INFO "HVCS: vterm state unchanged. "
37713 "The hvcs device node is still in use.\n");
37714@@ -1138,7 +1139,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
37715 if ((retval = hvcs_partner_connect(hvcsd)))
37716 goto error_release;
37717
37718- hvcsd->open_count = 1;
37719+ local_set(&hvcsd->open_count, 1);
37720 hvcsd->tty = tty;
37721 tty->driver_data = hvcsd;
37722
37723@@ -1172,7 +1173,7 @@ fast_open:
37724
37725 spin_lock_irqsave(&hvcsd->lock, flags);
37726 kref_get(&hvcsd->kref);
37727- hvcsd->open_count++;
37728+ local_inc(&hvcsd->open_count);
37729 hvcsd->todo_mask |= HVCS_SCHED_READ;
37730 spin_unlock_irqrestore(&hvcsd->lock, flags);
37731
37732@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37733 hvcsd = tty->driver_data;
37734
37735 spin_lock_irqsave(&hvcsd->lock, flags);
37736- if (--hvcsd->open_count == 0) {
37737+ if (local_dec_and_test(&hvcsd->open_count)) {
37738
37739 vio_disable_interrupts(hvcsd->vdev);
37740
37741@@ -1242,10 +1243,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37742 free_irq(irq, hvcsd);
37743 kref_put(&hvcsd->kref, destroy_hvcs_struct);
37744 return;
37745- } else if (hvcsd->open_count < 0) {
37746+ } else if (local_read(&hvcsd->open_count) < 0) {
37747 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
37748 " is missmanaged.\n",
37749- hvcsd->vdev->unit_address, hvcsd->open_count);
37750+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
37751 }
37752
37753 spin_unlock_irqrestore(&hvcsd->lock, flags);
37754@@ -1261,7 +1262,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37755
37756 spin_lock_irqsave(&hvcsd->lock, flags);
37757 /* Preserve this so that we know how many kref refs to put */
37758- temp_open_count = hvcsd->open_count;
37759+ temp_open_count = local_read(&hvcsd->open_count);
37760
37761 /*
37762 * Don't kref put inside the spinlock because the destruction
37763@@ -1276,7 +1277,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37764 hvcsd->tty->driver_data = NULL;
37765 hvcsd->tty = NULL;
37766
37767- hvcsd->open_count = 0;
37768+ local_set(&hvcsd->open_count, 0);
37769
37770 /* This will drop any buffered data on the floor which is OK in a hangup
37771 * scenario. */
37772@@ -1347,7 +1348,7 @@ static int hvcs_write(struct tty_struct *tty,
37773 * the middle of a write operation? This is a crummy place to do this
37774 * but we want to keep it all in the spinlock.
37775 */
37776- if (hvcsd->open_count <= 0) {
37777+ if (local_read(&hvcsd->open_count) <= 0) {
37778 spin_unlock_irqrestore(&hvcsd->lock, flags);
37779 return -ENODEV;
37780 }
37781@@ -1421,7 +1422,7 @@ static int hvcs_write_room(struct tty_struct *tty)
37782 {
37783 struct hvcs_struct *hvcsd = tty->driver_data;
37784
37785- if (!hvcsd || hvcsd->open_count <= 0)
37786+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
37787 return 0;
37788
37789 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
37790diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
37791index 4daf962..b4a2281 100644
37792--- a/drivers/tty/ipwireless/tty.c
37793+++ b/drivers/tty/ipwireless/tty.c
37794@@ -29,6 +29,7 @@
37795 #include <linux/tty_driver.h>
37796 #include <linux/tty_flip.h>
37797 #include <linux/uaccess.h>
37798+#include <asm/local.h>
37799
37800 #include "tty.h"
37801 #include "network.h"
37802@@ -51,7 +52,7 @@ struct ipw_tty {
37803 int tty_type;
37804 struct ipw_network *network;
37805 struct tty_struct *linux_tty;
37806- int open_count;
37807+ local_t open_count;
37808 unsigned int control_lines;
37809 struct mutex ipw_tty_mutex;
37810 int tx_bytes_queued;
37811@@ -117,10 +118,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37812 mutex_unlock(&tty->ipw_tty_mutex);
37813 return -ENODEV;
37814 }
37815- if (tty->open_count == 0)
37816+ if (local_read(&tty->open_count) == 0)
37817 tty->tx_bytes_queued = 0;
37818
37819- tty->open_count++;
37820+ local_inc(&tty->open_count);
37821
37822 tty->linux_tty = linux_tty;
37823 linux_tty->driver_data = tty;
37824@@ -136,9 +137,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37825
37826 static void do_ipw_close(struct ipw_tty *tty)
37827 {
37828- tty->open_count--;
37829-
37830- if (tty->open_count == 0) {
37831+ if (local_dec_return(&tty->open_count) == 0) {
37832 struct tty_struct *linux_tty = tty->linux_tty;
37833
37834 if (linux_tty != NULL) {
37835@@ -159,7 +158,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
37836 return;
37837
37838 mutex_lock(&tty->ipw_tty_mutex);
37839- if (tty->open_count == 0) {
37840+ if (local_read(&tty->open_count) == 0) {
37841 mutex_unlock(&tty->ipw_tty_mutex);
37842 return;
37843 }
37844@@ -188,7 +187,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
37845 return;
37846 }
37847
37848- if (!tty->open_count) {
37849+ if (!local_read(&tty->open_count)) {
37850 mutex_unlock(&tty->ipw_tty_mutex);
37851 return;
37852 }
37853@@ -230,7 +229,7 @@ static int ipw_write(struct tty_struct *linux_tty,
37854 return -ENODEV;
37855
37856 mutex_lock(&tty->ipw_tty_mutex);
37857- if (!tty->open_count) {
37858+ if (!local_read(&tty->open_count)) {
37859 mutex_unlock(&tty->ipw_tty_mutex);
37860 return -EINVAL;
37861 }
37862@@ -270,7 +269,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
37863 if (!tty)
37864 return -ENODEV;
37865
37866- if (!tty->open_count)
37867+ if (!local_read(&tty->open_count))
37868 return -EINVAL;
37869
37870 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
37871@@ -312,7 +311,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
37872 if (!tty)
37873 return 0;
37874
37875- if (!tty->open_count)
37876+ if (!local_read(&tty->open_count))
37877 return 0;
37878
37879 return tty->tx_bytes_queued;
37880@@ -393,7 +392,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
37881 if (!tty)
37882 return -ENODEV;
37883
37884- if (!tty->open_count)
37885+ if (!local_read(&tty->open_count))
37886 return -EINVAL;
37887
37888 return get_control_lines(tty);
37889@@ -409,7 +408,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
37890 if (!tty)
37891 return -ENODEV;
37892
37893- if (!tty->open_count)
37894+ if (!local_read(&tty->open_count))
37895 return -EINVAL;
37896
37897 return set_control_lines(tty, set, clear);
37898@@ -423,7 +422,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
37899 if (!tty)
37900 return -ENODEV;
37901
37902- if (!tty->open_count)
37903+ if (!local_read(&tty->open_count))
37904 return -EINVAL;
37905
37906 /* FIXME: Exactly how is the tty object locked here .. */
37907@@ -572,7 +571,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
37908 against a parallel ioctl etc */
37909 mutex_lock(&ttyj->ipw_tty_mutex);
37910 }
37911- while (ttyj->open_count)
37912+ while (local_read(&ttyj->open_count))
37913 do_ipw_close(ttyj);
37914 ipwireless_disassociate_network_ttys(network,
37915 ttyj->channel_idx);
37916diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
37917index c43b683..0a88f1c 100644
37918--- a/drivers/tty/n_gsm.c
37919+++ b/drivers/tty/n_gsm.c
37920@@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
37921 kref_init(&dlci->ref);
37922 mutex_init(&dlci->mutex);
37923 dlci->fifo = &dlci->_fifo;
37924- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
37925+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
37926 kfree(dlci);
37927 return NULL;
37928 }
37929diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
37930index 94b6eda..15f7cec 100644
37931--- a/drivers/tty/n_tty.c
37932+++ b/drivers/tty/n_tty.c
37933@@ -2122,6 +2122,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
37934 {
37935 *ops = tty_ldisc_N_TTY;
37936 ops->owner = NULL;
37937- ops->refcount = ops->flags = 0;
37938+ atomic_set(&ops->refcount, 0);
37939+ ops->flags = 0;
37940 }
37941 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
37942diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
37943index eeae7fa..177a743 100644
37944--- a/drivers/tty/pty.c
37945+++ b/drivers/tty/pty.c
37946@@ -707,8 +707,10 @@ static void __init unix98_pty_init(void)
37947 panic("Couldn't register Unix98 pts driver");
37948
37949 /* Now create the /dev/ptmx special device */
37950+ pax_open_kernel();
37951 tty_default_fops(&ptmx_fops);
37952- ptmx_fops.open = ptmx_open;
37953+ *(void **)&ptmx_fops.open = ptmx_open;
37954+ pax_close_kernel();
37955
37956 cdev_init(&ptmx_cdev, &ptmx_fops);
37957 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
37958diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
37959index 2b42a01..32a2ed3 100644
37960--- a/drivers/tty/serial/kgdboc.c
37961+++ b/drivers/tty/serial/kgdboc.c
37962@@ -24,8 +24,9 @@
37963 #define MAX_CONFIG_LEN 40
37964
37965 static struct kgdb_io kgdboc_io_ops;
37966+static struct kgdb_io kgdboc_io_ops_console;
37967
37968-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37969+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
37970 static int configured = -1;
37971
37972 static char config[MAX_CONFIG_LEN];
37973@@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
37974 kgdboc_unregister_kbd();
37975 if (configured == 1)
37976 kgdb_unregister_io_module(&kgdboc_io_ops);
37977+ else if (configured == 2)
37978+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
37979 }
37980
37981 static int configure_kgdboc(void)
37982@@ -157,13 +160,13 @@ static int configure_kgdboc(void)
37983 int err;
37984 char *cptr = config;
37985 struct console *cons;
37986+ int is_console = 0;
37987
37988 err = kgdboc_option_setup(config);
37989 if (err || !strlen(config) || isspace(config[0]))
37990 goto noconfig;
37991
37992 err = -ENODEV;
37993- kgdboc_io_ops.is_console = 0;
37994 kgdb_tty_driver = NULL;
37995
37996 kgdboc_use_kms = 0;
37997@@ -184,7 +187,7 @@ static int configure_kgdboc(void)
37998 int idx;
37999 if (cons->device && cons->device(cons, &idx) == p &&
38000 idx == tty_line) {
38001- kgdboc_io_ops.is_console = 1;
38002+ is_console = 1;
38003 break;
38004 }
38005 cons = cons->next;
38006@@ -194,12 +197,16 @@ static int configure_kgdboc(void)
38007 kgdb_tty_line = tty_line;
38008
38009 do_register:
38010- err = kgdb_register_io_module(&kgdboc_io_ops);
38011+ if (is_console) {
38012+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
38013+ configured = 2;
38014+ } else {
38015+ err = kgdb_register_io_module(&kgdboc_io_ops);
38016+ configured = 1;
38017+ }
38018 if (err)
38019 goto noconfig;
38020
38021- configured = 1;
38022-
38023 return 0;
38024
38025 noconfig:
38026@@ -213,7 +220,7 @@ noconfig:
38027 static int __init init_kgdboc(void)
38028 {
38029 /* Already configured? */
38030- if (configured == 1)
38031+ if (configured >= 1)
38032 return 0;
38033
38034 return configure_kgdboc();
38035@@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
38036 if (config[len - 1] == '\n')
38037 config[len - 1] = '\0';
38038
38039- if (configured == 1)
38040+ if (configured >= 1)
38041 cleanup_kgdboc();
38042
38043 /* Go and configure with the new params. */
38044@@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
38045 .post_exception = kgdboc_post_exp_handler,
38046 };
38047
38048+static struct kgdb_io kgdboc_io_ops_console = {
38049+ .name = "kgdboc",
38050+ .read_char = kgdboc_get_char,
38051+ .write_char = kgdboc_put_char,
38052+ .pre_exception = kgdboc_pre_exp_handler,
38053+ .post_exception = kgdboc_post_exp_handler,
38054+ .is_console = 1
38055+};
38056+
38057 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
38058 /* This is only available if kgdboc is a built in for early debugging */
38059 static int __init kgdboc_early_init(char *opt)
38060diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
38061index 05728894..b9d44c6 100644
38062--- a/drivers/tty/sysrq.c
38063+++ b/drivers/tty/sysrq.c
38064@@ -865,7 +865,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
38065 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
38066 size_t count, loff_t *ppos)
38067 {
38068- if (count) {
38069+ if (count && capable(CAP_SYS_ADMIN)) {
38070 char c;
38071
38072 if (get_user(c, buf))
38073diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
38074index d939bd7..33d92cd 100644
38075--- a/drivers/tty/tty_io.c
38076+++ b/drivers/tty/tty_io.c
38077@@ -3278,7 +3278,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
38078
38079 void tty_default_fops(struct file_operations *fops)
38080 {
38081- *fops = tty_fops;
38082+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
38083 }
38084
38085 /*
38086diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
38087index 24b95db..9c078d0 100644
38088--- a/drivers/tty/tty_ldisc.c
38089+++ b/drivers/tty/tty_ldisc.c
38090@@ -57,7 +57,7 @@ static void put_ldisc(struct tty_ldisc *ld)
38091 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
38092 struct tty_ldisc_ops *ldo = ld->ops;
38093
38094- ldo->refcount--;
38095+ atomic_dec(&ldo->refcount);
38096 module_put(ldo->owner);
38097 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38098
38099@@ -92,7 +92,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
38100 spin_lock_irqsave(&tty_ldisc_lock, flags);
38101 tty_ldiscs[disc] = new_ldisc;
38102 new_ldisc->num = disc;
38103- new_ldisc->refcount = 0;
38104+ atomic_set(&new_ldisc->refcount, 0);
38105 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38106
38107 return ret;
38108@@ -120,7 +120,7 @@ int tty_unregister_ldisc(int disc)
38109 return -EINVAL;
38110
38111 spin_lock_irqsave(&tty_ldisc_lock, flags);
38112- if (tty_ldiscs[disc]->refcount)
38113+ if (atomic_read(&tty_ldiscs[disc]->refcount))
38114 ret = -EBUSY;
38115 else
38116 tty_ldiscs[disc] = NULL;
38117@@ -141,7 +141,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
38118 if (ldops) {
38119 ret = ERR_PTR(-EAGAIN);
38120 if (try_module_get(ldops->owner)) {
38121- ldops->refcount++;
38122+ atomic_inc(&ldops->refcount);
38123 ret = ldops;
38124 }
38125 }
38126@@ -154,7 +154,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
38127 unsigned long flags;
38128
38129 spin_lock_irqsave(&tty_ldisc_lock, flags);
38130- ldops->refcount--;
38131+ atomic_dec(&ldops->refcount);
38132 module_put(ldops->owner);
38133 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38134 }
38135diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
38136index 3b0c4e3..f98a992 100644
38137--- a/drivers/tty/vt/keyboard.c
38138+++ b/drivers/tty/vt/keyboard.c
38139@@ -663,6 +663,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
38140 kbd->kbdmode == VC_OFF) &&
38141 value != KVAL(K_SAK))
38142 return; /* SAK is allowed even in raw mode */
38143+
38144+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
38145+ {
38146+ void *func = fn_handler[value];
38147+ if (func == fn_show_state || func == fn_show_ptregs ||
38148+ func == fn_show_mem)
38149+ return;
38150+ }
38151+#endif
38152+
38153 fn_handler[value](vc);
38154 }
38155
38156@@ -1812,9 +1822,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
38157 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
38158 return -EFAULT;
38159
38160- if (!capable(CAP_SYS_TTY_CONFIG))
38161- perm = 0;
38162-
38163 switch (cmd) {
38164 case KDGKBENT:
38165 /* Ensure another thread doesn't free it under us */
38166@@ -1829,6 +1836,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
38167 spin_unlock_irqrestore(&kbd_event_lock, flags);
38168 return put_user(val, &user_kbe->kb_value);
38169 case KDSKBENT:
38170+ if (!capable(CAP_SYS_TTY_CONFIG))
38171+ perm = 0;
38172+
38173 if (!perm)
38174 return -EPERM;
38175 if (!i && v == K_NOSUCHMAP) {
38176@@ -1919,9 +1929,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38177 int i, j, k;
38178 int ret;
38179
38180- if (!capable(CAP_SYS_TTY_CONFIG))
38181- perm = 0;
38182-
38183 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
38184 if (!kbs) {
38185 ret = -ENOMEM;
38186@@ -1955,6 +1962,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38187 kfree(kbs);
38188 return ((p && *p) ? -EOVERFLOW : 0);
38189 case KDSKBSENT:
38190+ if (!capable(CAP_SYS_TTY_CONFIG))
38191+ perm = 0;
38192+
38193 if (!perm) {
38194 ret = -EPERM;
38195 goto reterr;
38196diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
38197index a783d53..cb30d94 100644
38198--- a/drivers/uio/uio.c
38199+++ b/drivers/uio/uio.c
38200@@ -25,6 +25,7 @@
38201 #include <linux/kobject.h>
38202 #include <linux/cdev.h>
38203 #include <linux/uio_driver.h>
38204+#include <asm/local.h>
38205
38206 #define UIO_MAX_DEVICES (1U << MINORBITS)
38207
38208@@ -32,10 +33,10 @@ struct uio_device {
38209 struct module *owner;
38210 struct device *dev;
38211 int minor;
38212- atomic_t event;
38213+ atomic_unchecked_t event;
38214 struct fasync_struct *async_queue;
38215 wait_queue_head_t wait;
38216- int vma_count;
38217+ local_t vma_count;
38218 struct uio_info *info;
38219 struct kobject *map_dir;
38220 struct kobject *portio_dir;
38221@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
38222 struct device_attribute *attr, char *buf)
38223 {
38224 struct uio_device *idev = dev_get_drvdata(dev);
38225- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
38226+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
38227 }
38228
38229 static struct device_attribute uio_class_attributes[] = {
38230@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
38231 {
38232 struct uio_device *idev = info->uio_dev;
38233
38234- atomic_inc(&idev->event);
38235+ atomic_inc_unchecked(&idev->event);
38236 wake_up_interruptible(&idev->wait);
38237 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38238 }
38239@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
38240 }
38241
38242 listener->dev = idev;
38243- listener->event_count = atomic_read(&idev->event);
38244+ listener->event_count = atomic_read_unchecked(&idev->event);
38245 filep->private_data = listener;
38246
38247 if (idev->info->open) {
38248@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
38249 return -EIO;
38250
38251 poll_wait(filep, &idev->wait, wait);
38252- if (listener->event_count != atomic_read(&idev->event))
38253+ if (listener->event_count != atomic_read_unchecked(&idev->event))
38254 return POLLIN | POLLRDNORM;
38255 return 0;
38256 }
38257@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
38258 do {
38259 set_current_state(TASK_INTERRUPTIBLE);
38260
38261- event_count = atomic_read(&idev->event);
38262+ event_count = atomic_read_unchecked(&idev->event);
38263 if (event_count != listener->event_count) {
38264 if (copy_to_user(buf, &event_count, count))
38265 retval = -EFAULT;
38266@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
38267 static void uio_vma_open(struct vm_area_struct *vma)
38268 {
38269 struct uio_device *idev = vma->vm_private_data;
38270- idev->vma_count++;
38271+ local_inc(&idev->vma_count);
38272 }
38273
38274 static void uio_vma_close(struct vm_area_struct *vma)
38275 {
38276 struct uio_device *idev = vma->vm_private_data;
38277- idev->vma_count--;
38278+ local_dec(&idev->vma_count);
38279 }
38280
38281 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38282@@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
38283 idev->owner = owner;
38284 idev->info = info;
38285 init_waitqueue_head(&idev->wait);
38286- atomic_set(&idev->event, 0);
38287+ atomic_set_unchecked(&idev->event, 0);
38288
38289 ret = uio_get_minor(idev);
38290 if (ret)
38291diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
38292index 98b89fe..aff824e 100644
38293--- a/drivers/usb/atm/cxacru.c
38294+++ b/drivers/usb/atm/cxacru.c
38295@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
38296 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
38297 if (ret < 2)
38298 return -EINVAL;
38299- if (index < 0 || index > 0x7f)
38300+ if (index > 0x7f)
38301 return -EINVAL;
38302 pos += tmp;
38303
38304diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
38305index d3448ca..d2864ca 100644
38306--- a/drivers/usb/atm/usbatm.c
38307+++ b/drivers/usb/atm/usbatm.c
38308@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38309 if (printk_ratelimit())
38310 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38311 __func__, vpi, vci);
38312- atomic_inc(&vcc->stats->rx_err);
38313+ atomic_inc_unchecked(&vcc->stats->rx_err);
38314 return;
38315 }
38316
38317@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38318 if (length > ATM_MAX_AAL5_PDU) {
38319 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38320 __func__, length, vcc);
38321- atomic_inc(&vcc->stats->rx_err);
38322+ atomic_inc_unchecked(&vcc->stats->rx_err);
38323 goto out;
38324 }
38325
38326@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38327 if (sarb->len < pdu_length) {
38328 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38329 __func__, pdu_length, sarb->len, vcc);
38330- atomic_inc(&vcc->stats->rx_err);
38331+ atomic_inc_unchecked(&vcc->stats->rx_err);
38332 goto out;
38333 }
38334
38335 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38336 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38337 __func__, vcc);
38338- atomic_inc(&vcc->stats->rx_err);
38339+ atomic_inc_unchecked(&vcc->stats->rx_err);
38340 goto out;
38341 }
38342
38343@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38344 if (printk_ratelimit())
38345 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38346 __func__, length);
38347- atomic_inc(&vcc->stats->rx_drop);
38348+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38349 goto out;
38350 }
38351
38352@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38353
38354 vcc->push(vcc, skb);
38355
38356- atomic_inc(&vcc->stats->rx);
38357+ atomic_inc_unchecked(&vcc->stats->rx);
38358 out:
38359 skb_trim(sarb, 0);
38360 }
38361@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
38362 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38363
38364 usbatm_pop(vcc, skb);
38365- atomic_inc(&vcc->stats->tx);
38366+ atomic_inc_unchecked(&vcc->stats->tx);
38367
38368 skb = skb_dequeue(&instance->sndqueue);
38369 }
38370@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
38371 if (!left--)
38372 return sprintf(page,
38373 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38374- atomic_read(&atm_dev->stats.aal5.tx),
38375- atomic_read(&atm_dev->stats.aal5.tx_err),
38376- atomic_read(&atm_dev->stats.aal5.rx),
38377- atomic_read(&atm_dev->stats.aal5.rx_err),
38378- atomic_read(&atm_dev->stats.aal5.rx_drop));
38379+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38380+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38381+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38382+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38383+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38384
38385 if (!left--) {
38386 if (instance->disconnected)
38387diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
38388index d956965..4179a77 100644
38389--- a/drivers/usb/core/devices.c
38390+++ b/drivers/usb/core/devices.c
38391@@ -126,7 +126,7 @@ static const char format_endpt[] =
38392 * time it gets called.
38393 */
38394 static struct device_connect_event {
38395- atomic_t count;
38396+ atomic_unchecked_t count;
38397 wait_queue_head_t wait;
38398 } device_event = {
38399 .count = ATOMIC_INIT(1),
38400@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
38401
38402 void usbfs_conn_disc_event(void)
38403 {
38404- atomic_add(2, &device_event.count);
38405+ atomic_add_unchecked(2, &device_event.count);
38406 wake_up(&device_event.wait);
38407 }
38408
38409@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
38410
38411 poll_wait(file, &device_event.wait, wait);
38412
38413- event_count = atomic_read(&device_event.count);
38414+ event_count = atomic_read_unchecked(&device_event.count);
38415 if (file->f_version != event_count) {
38416 file->f_version = event_count;
38417 return POLLIN | POLLRDNORM;
38418diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
38419index 1fc8f12..20647c1 100644
38420--- a/drivers/usb/early/ehci-dbgp.c
38421+++ b/drivers/usb/early/ehci-dbgp.c
38422@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
38423
38424 #ifdef CONFIG_KGDB
38425 static struct kgdb_io kgdbdbgp_io_ops;
38426-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
38427+static struct kgdb_io kgdbdbgp_io_ops_console;
38428+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
38429 #else
38430 #define dbgp_kgdb_mode (0)
38431 #endif
38432@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
38433 .write_char = kgdbdbgp_write_char,
38434 };
38435
38436+static struct kgdb_io kgdbdbgp_io_ops_console = {
38437+ .name = "kgdbdbgp",
38438+ .read_char = kgdbdbgp_read_char,
38439+ .write_char = kgdbdbgp_write_char,
38440+ .is_console = 1
38441+};
38442+
38443 static int kgdbdbgp_wait_time;
38444
38445 static int __init kgdbdbgp_parse_config(char *str)
38446@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
38447 ptr++;
38448 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
38449 }
38450- kgdb_register_io_module(&kgdbdbgp_io_ops);
38451- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
38452+ if (early_dbgp_console.index != -1)
38453+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
38454+ else
38455+ kgdb_register_io_module(&kgdbdbgp_io_ops);
38456
38457 return 0;
38458 }
38459diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
38460index d6bea3e..60b250e 100644
38461--- a/drivers/usb/wusbcore/wa-hc.h
38462+++ b/drivers/usb/wusbcore/wa-hc.h
38463@@ -192,7 +192,7 @@ struct wahc {
38464 struct list_head xfer_delayed_list;
38465 spinlock_t xfer_list_lock;
38466 struct work_struct xfer_work;
38467- atomic_t xfer_id_count;
38468+ atomic_unchecked_t xfer_id_count;
38469 };
38470
38471
38472@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
38473 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38474 spin_lock_init(&wa->xfer_list_lock);
38475 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38476- atomic_set(&wa->xfer_id_count, 1);
38477+ atomic_set_unchecked(&wa->xfer_id_count, 1);
38478 }
38479
38480 /**
38481diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
38482index 57c01ab..8a05959 100644
38483--- a/drivers/usb/wusbcore/wa-xfer.c
38484+++ b/drivers/usb/wusbcore/wa-xfer.c
38485@@ -296,7 +296,7 @@ out:
38486 */
38487 static void wa_xfer_id_init(struct wa_xfer *xfer)
38488 {
38489- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38490+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38491 }
38492
38493 /*
38494diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
38495index 51e4c1e..9d87e2a 100644
38496--- a/drivers/vhost/vhost.c
38497+++ b/drivers/vhost/vhost.c
38498@@ -632,7 +632,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
38499 return 0;
38500 }
38501
38502-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
38503+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
38504 {
38505 struct file *eventfp, *filep = NULL,
38506 *pollstart = NULL, *pollstop = NULL;
38507diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
38508index b0b2ac3..89a4399 100644
38509--- a/drivers/video/aty/aty128fb.c
38510+++ b/drivers/video/aty/aty128fb.c
38511@@ -148,7 +148,7 @@ enum {
38512 };
38513
38514 /* Must match above enum */
38515-static const char *r128_family[] __devinitdata = {
38516+static const char *r128_family[] __devinitconst = {
38517 "AGP",
38518 "PCI",
38519 "PRO AGP",
38520diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
38521index 5c3960d..15cf8fc 100644
38522--- a/drivers/video/fbcmap.c
38523+++ b/drivers/video/fbcmap.c
38524@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
38525 rc = -ENODEV;
38526 goto out;
38527 }
38528- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38529- !info->fbops->fb_setcmap)) {
38530+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38531 rc = -EINVAL;
38532 goto out1;
38533 }
38534diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
38535index c6ce416..3b9b642 100644
38536--- a/drivers/video/fbmem.c
38537+++ b/drivers/video/fbmem.c
38538@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38539 image->dx += image->width + 8;
38540 }
38541 } else if (rotate == FB_ROTATE_UD) {
38542- for (x = 0; x < num && image->dx >= 0; x++) {
38543+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38544 info->fbops->fb_imageblit(info, image);
38545 image->dx -= image->width + 8;
38546 }
38547@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38548 image->dy += image->height + 8;
38549 }
38550 } else if (rotate == FB_ROTATE_CCW) {
38551- for (x = 0; x < num && image->dy >= 0; x++) {
38552+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38553 info->fbops->fb_imageblit(info, image);
38554 image->dy -= image->height + 8;
38555 }
38556@@ -1157,7 +1157,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
38557 return -EFAULT;
38558 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38559 return -EINVAL;
38560- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38561+ if (con2fb.framebuffer >= FB_MAX)
38562 return -EINVAL;
38563 if (!registered_fb[con2fb.framebuffer])
38564 request_module("fb%d", con2fb.framebuffer);
38565diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
38566index 5a5d092..265c5ed 100644
38567--- a/drivers/video/geode/gx1fb_core.c
38568+++ b/drivers/video/geode/gx1fb_core.c
38569@@ -29,7 +29,7 @@ static int crt_option = 1;
38570 static char panel_option[32] = "";
38571
38572 /* Modes relevant to the GX1 (taken from modedb.c) */
38573-static const struct fb_videomode __devinitdata gx1_modedb[] = {
38574+static const struct fb_videomode __devinitconst gx1_modedb[] = {
38575 /* 640x480-60 VESA */
38576 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
38577 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
38578diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
38579index 0fad23f..0e9afa4 100644
38580--- a/drivers/video/gxt4500.c
38581+++ b/drivers/video/gxt4500.c
38582@@ -156,7 +156,7 @@ struct gxt4500_par {
38583 static char *mode_option;
38584
38585 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
38586-static const struct fb_videomode defaultmode __devinitdata = {
38587+static const struct fb_videomode defaultmode __devinitconst = {
38588 .refresh = 60,
38589 .xres = 1280,
38590 .yres = 1024,
38591@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
38592 return 0;
38593 }
38594
38595-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
38596+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
38597 .id = "IBM GXT4500P",
38598 .type = FB_TYPE_PACKED_PIXELS,
38599 .visual = FB_VISUAL_PSEUDOCOLOR,
38600diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
38601index 7672d2e..b56437f 100644
38602--- a/drivers/video/i810/i810_accel.c
38603+++ b/drivers/video/i810/i810_accel.c
38604@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
38605 }
38606 }
38607 printk("ringbuffer lockup!!!\n");
38608+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38609 i810_report_error(mmio);
38610 par->dev_flags |= LOCKUP;
38611 info->pixmap.scan_align = 1;
38612diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
38613index b83f361..2b05a91 100644
38614--- a/drivers/video/i810/i810_main.c
38615+++ b/drivers/video/i810/i810_main.c
38616@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
38617 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
38618
38619 /* PCI */
38620-static const char *i810_pci_list[] __devinitdata = {
38621+static const char *i810_pci_list[] __devinitconst = {
38622 "Intel(R) 810 Framebuffer Device" ,
38623 "Intel(R) 810-DC100 Framebuffer Device" ,
38624 "Intel(R) 810E Framebuffer Device" ,
38625diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
38626index de36693..3c63fc2 100644
38627--- a/drivers/video/jz4740_fb.c
38628+++ b/drivers/video/jz4740_fb.c
38629@@ -136,7 +136,7 @@ struct jzfb {
38630 uint32_t pseudo_palette[16];
38631 };
38632
38633-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
38634+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
38635 .id = "JZ4740 FB",
38636 .type = FB_TYPE_PACKED_PIXELS,
38637 .visual = FB_VISUAL_TRUECOLOR,
38638diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
38639index 3c14e43..eafa544 100644
38640--- a/drivers/video/logo/logo_linux_clut224.ppm
38641+++ b/drivers/video/logo/logo_linux_clut224.ppm
38642@@ -1,1604 +1,1123 @@
38643 P3
38644-# Standard 224-color Linux logo
38645 80 80
38646 255
38647- 0 0 0 0 0 0 0 0 0 0 0 0
38648- 0 0 0 0 0 0 0 0 0 0 0 0
38649- 0 0 0 0 0 0 0 0 0 0 0 0
38650- 0 0 0 0 0 0 0 0 0 0 0 0
38651- 0 0 0 0 0 0 0 0 0 0 0 0
38652- 0 0 0 0 0 0 0 0 0 0 0 0
38653- 0 0 0 0 0 0 0 0 0 0 0 0
38654- 0 0 0 0 0 0 0 0 0 0 0 0
38655- 0 0 0 0 0 0 0 0 0 0 0 0
38656- 6 6 6 6 6 6 10 10 10 10 10 10
38657- 10 10 10 6 6 6 6 6 6 6 6 6
38658- 0 0 0 0 0 0 0 0 0 0 0 0
38659- 0 0 0 0 0 0 0 0 0 0 0 0
38660- 0 0 0 0 0 0 0 0 0 0 0 0
38661- 0 0 0 0 0 0 0 0 0 0 0 0
38662- 0 0 0 0 0 0 0 0 0 0 0 0
38663- 0 0 0 0 0 0 0 0 0 0 0 0
38664- 0 0 0 0 0 0 0 0 0 0 0 0
38665- 0 0 0 0 0 0 0 0 0 0 0 0
38666- 0 0 0 0 0 0 0 0 0 0 0 0
38667- 0 0 0 0 0 0 0 0 0 0 0 0
38668- 0 0 0 0 0 0 0 0 0 0 0 0
38669- 0 0 0 0 0 0 0 0 0 0 0 0
38670- 0 0 0 0 0 0 0 0 0 0 0 0
38671- 0 0 0 0 0 0 0 0 0 0 0 0
38672- 0 0 0 0 0 0 0 0 0 0 0 0
38673- 0 0 0 0 0 0 0 0 0 0 0 0
38674- 0 0 0 0 0 0 0 0 0 0 0 0
38675- 0 0 0 6 6 6 10 10 10 14 14 14
38676- 22 22 22 26 26 26 30 30 30 34 34 34
38677- 30 30 30 30 30 30 26 26 26 18 18 18
38678- 14 14 14 10 10 10 6 6 6 0 0 0
38679- 0 0 0 0 0 0 0 0 0 0 0 0
38680- 0 0 0 0 0 0 0 0 0 0 0 0
38681- 0 0 0 0 0 0 0 0 0 0 0 0
38682- 0 0 0 0 0 0 0 0 0 0 0 0
38683- 0 0 0 0 0 0 0 0 0 0 0 0
38684- 0 0 0 0 0 0 0 0 0 0 0 0
38685- 0 0 0 0 0 0 0 0 0 0 0 0
38686- 0 0 0 0 0 0 0 0 0 0 0 0
38687- 0 0 0 0 0 0 0 0 0 0 0 0
38688- 0 0 0 0 0 1 0 0 1 0 0 0
38689- 0 0 0 0 0 0 0 0 0 0 0 0
38690- 0 0 0 0 0 0 0 0 0 0 0 0
38691- 0 0 0 0 0 0 0 0 0 0 0 0
38692- 0 0 0 0 0 0 0 0 0 0 0 0
38693- 0 0 0 0 0 0 0 0 0 0 0 0
38694- 0 0 0 0 0 0 0 0 0 0 0 0
38695- 6 6 6 14 14 14 26 26 26 42 42 42
38696- 54 54 54 66 66 66 78 78 78 78 78 78
38697- 78 78 78 74 74 74 66 66 66 54 54 54
38698- 42 42 42 26 26 26 18 18 18 10 10 10
38699- 6 6 6 0 0 0 0 0 0 0 0 0
38700- 0 0 0 0 0 0 0 0 0 0 0 0
38701- 0 0 0 0 0 0 0 0 0 0 0 0
38702- 0 0 0 0 0 0 0 0 0 0 0 0
38703- 0 0 0 0 0 0 0 0 0 0 0 0
38704- 0 0 0 0 0 0 0 0 0 0 0 0
38705- 0 0 0 0 0 0 0 0 0 0 0 0
38706- 0 0 0 0 0 0 0 0 0 0 0 0
38707- 0 0 0 0 0 0 0 0 0 0 0 0
38708- 0 0 1 0 0 0 0 0 0 0 0 0
38709- 0 0 0 0 0 0 0 0 0 0 0 0
38710- 0 0 0 0 0 0 0 0 0 0 0 0
38711- 0 0 0 0 0 0 0 0 0 0 0 0
38712- 0 0 0 0 0 0 0 0 0 0 0 0
38713- 0 0 0 0 0 0 0 0 0 0 0 0
38714- 0 0 0 0 0 0 0 0 0 10 10 10
38715- 22 22 22 42 42 42 66 66 66 86 86 86
38716- 66 66 66 38 38 38 38 38 38 22 22 22
38717- 26 26 26 34 34 34 54 54 54 66 66 66
38718- 86 86 86 70 70 70 46 46 46 26 26 26
38719- 14 14 14 6 6 6 0 0 0 0 0 0
38720- 0 0 0 0 0 0 0 0 0 0 0 0
38721- 0 0 0 0 0 0 0 0 0 0 0 0
38722- 0 0 0 0 0 0 0 0 0 0 0 0
38723- 0 0 0 0 0 0 0 0 0 0 0 0
38724- 0 0 0 0 0 0 0 0 0 0 0 0
38725- 0 0 0 0 0 0 0 0 0 0 0 0
38726- 0 0 0 0 0 0 0 0 0 0 0 0
38727- 0 0 0 0 0 0 0 0 0 0 0 0
38728- 0 0 1 0 0 1 0 0 1 0 0 0
38729- 0 0 0 0 0 0 0 0 0 0 0 0
38730- 0 0 0 0 0 0 0 0 0 0 0 0
38731- 0 0 0 0 0 0 0 0 0 0 0 0
38732- 0 0 0 0 0 0 0 0 0 0 0 0
38733- 0 0 0 0 0 0 0 0 0 0 0 0
38734- 0 0 0 0 0 0 10 10 10 26 26 26
38735- 50 50 50 82 82 82 58 58 58 6 6 6
38736- 2 2 6 2 2 6 2 2 6 2 2 6
38737- 2 2 6 2 2 6 2 2 6 2 2 6
38738- 6 6 6 54 54 54 86 86 86 66 66 66
38739- 38 38 38 18 18 18 6 6 6 0 0 0
38740- 0 0 0 0 0 0 0 0 0 0 0 0
38741- 0 0 0 0 0 0 0 0 0 0 0 0
38742- 0 0 0 0 0 0 0 0 0 0 0 0
38743- 0 0 0 0 0 0 0 0 0 0 0 0
38744- 0 0 0 0 0 0 0 0 0 0 0 0
38745- 0 0 0 0 0 0 0 0 0 0 0 0
38746- 0 0 0 0 0 0 0 0 0 0 0 0
38747- 0 0 0 0 0 0 0 0 0 0 0 0
38748- 0 0 0 0 0 0 0 0 0 0 0 0
38749- 0 0 0 0 0 0 0 0 0 0 0 0
38750- 0 0 0 0 0 0 0 0 0 0 0 0
38751- 0 0 0 0 0 0 0 0 0 0 0 0
38752- 0 0 0 0 0 0 0 0 0 0 0 0
38753- 0 0 0 0 0 0 0 0 0 0 0 0
38754- 0 0 0 6 6 6 22 22 22 50 50 50
38755- 78 78 78 34 34 34 2 2 6 2 2 6
38756- 2 2 6 2 2 6 2 2 6 2 2 6
38757- 2 2 6 2 2 6 2 2 6 2 2 6
38758- 2 2 6 2 2 6 6 6 6 70 70 70
38759- 78 78 78 46 46 46 22 22 22 6 6 6
38760- 0 0 0 0 0 0 0 0 0 0 0 0
38761- 0 0 0 0 0 0 0 0 0 0 0 0
38762- 0 0 0 0 0 0 0 0 0 0 0 0
38763- 0 0 0 0 0 0 0 0 0 0 0 0
38764- 0 0 0 0 0 0 0 0 0 0 0 0
38765- 0 0 0 0 0 0 0 0 0 0 0 0
38766- 0 0 0 0 0 0 0 0 0 0 0 0
38767- 0 0 0 0 0 0 0 0 0 0 0 0
38768- 0 0 1 0 0 1 0 0 1 0 0 0
38769- 0 0 0 0 0 0 0 0 0 0 0 0
38770- 0 0 0 0 0 0 0 0 0 0 0 0
38771- 0 0 0 0 0 0 0 0 0 0 0 0
38772- 0 0 0 0 0 0 0 0 0 0 0 0
38773- 0 0 0 0 0 0 0 0 0 0 0 0
38774- 6 6 6 18 18 18 42 42 42 82 82 82
38775- 26 26 26 2 2 6 2 2 6 2 2 6
38776- 2 2 6 2 2 6 2 2 6 2 2 6
38777- 2 2 6 2 2 6 2 2 6 14 14 14
38778- 46 46 46 34 34 34 6 6 6 2 2 6
38779- 42 42 42 78 78 78 42 42 42 18 18 18
38780- 6 6 6 0 0 0 0 0 0 0 0 0
38781- 0 0 0 0 0 0 0 0 0 0 0 0
38782- 0 0 0 0 0 0 0 0 0 0 0 0
38783- 0 0 0 0 0 0 0 0 0 0 0 0
38784- 0 0 0 0 0 0 0 0 0 0 0 0
38785- 0 0 0 0 0 0 0 0 0 0 0 0
38786- 0 0 0 0 0 0 0 0 0 0 0 0
38787- 0 0 0 0 0 0 0 0 0 0 0 0
38788- 0 0 1 0 0 0 0 0 1 0 0 0
38789- 0 0 0 0 0 0 0 0 0 0 0 0
38790- 0 0 0 0 0 0 0 0 0 0 0 0
38791- 0 0 0 0 0 0 0 0 0 0 0 0
38792- 0 0 0 0 0 0 0 0 0 0 0 0
38793- 0 0 0 0 0 0 0 0 0 0 0 0
38794- 10 10 10 30 30 30 66 66 66 58 58 58
38795- 2 2 6 2 2 6 2 2 6 2 2 6
38796- 2 2 6 2 2 6 2 2 6 2 2 6
38797- 2 2 6 2 2 6 2 2 6 26 26 26
38798- 86 86 86 101 101 101 46 46 46 10 10 10
38799- 2 2 6 58 58 58 70 70 70 34 34 34
38800- 10 10 10 0 0 0 0 0 0 0 0 0
38801- 0 0 0 0 0 0 0 0 0 0 0 0
38802- 0 0 0 0 0 0 0 0 0 0 0 0
38803- 0 0 0 0 0 0 0 0 0 0 0 0
38804- 0 0 0 0 0 0 0 0 0 0 0 0
38805- 0 0 0 0 0 0 0 0 0 0 0 0
38806- 0 0 0 0 0 0 0 0 0 0 0 0
38807- 0 0 0 0 0 0 0 0 0 0 0 0
38808- 0 0 1 0 0 1 0 0 1 0 0 0
38809- 0 0 0 0 0 0 0 0 0 0 0 0
38810- 0 0 0 0 0 0 0 0 0 0 0 0
38811- 0 0 0 0 0 0 0 0 0 0 0 0
38812- 0 0 0 0 0 0 0 0 0 0 0 0
38813- 0 0 0 0 0 0 0 0 0 0 0 0
38814- 14 14 14 42 42 42 86 86 86 10 10 10
38815- 2 2 6 2 2 6 2 2 6 2 2 6
38816- 2 2 6 2 2 6 2 2 6 2 2 6
38817- 2 2 6 2 2 6 2 2 6 30 30 30
38818- 94 94 94 94 94 94 58 58 58 26 26 26
38819- 2 2 6 6 6 6 78 78 78 54 54 54
38820- 22 22 22 6 6 6 0 0 0 0 0 0
38821- 0 0 0 0 0 0 0 0 0 0 0 0
38822- 0 0 0 0 0 0 0 0 0 0 0 0
38823- 0 0 0 0 0 0 0 0 0 0 0 0
38824- 0 0 0 0 0 0 0 0 0 0 0 0
38825- 0 0 0 0 0 0 0 0 0 0 0 0
38826- 0 0 0 0 0 0 0 0 0 0 0 0
38827- 0 0 0 0 0 0 0 0 0 0 0 0
38828- 0 0 0 0 0 0 0 0 0 0 0 0
38829- 0 0 0 0 0 0 0 0 0 0 0 0
38830- 0 0 0 0 0 0 0 0 0 0 0 0
38831- 0 0 0 0 0 0 0 0 0 0 0 0
38832- 0 0 0 0 0 0 0 0 0 0 0 0
38833- 0 0 0 0 0 0 0 0 0 6 6 6
38834- 22 22 22 62 62 62 62 62 62 2 2 6
38835- 2 2 6 2 2 6 2 2 6 2 2 6
38836- 2 2 6 2 2 6 2 2 6 2 2 6
38837- 2 2 6 2 2 6 2 2 6 26 26 26
38838- 54 54 54 38 38 38 18 18 18 10 10 10
38839- 2 2 6 2 2 6 34 34 34 82 82 82
38840- 38 38 38 14 14 14 0 0 0 0 0 0
38841- 0 0 0 0 0 0 0 0 0 0 0 0
38842- 0 0 0 0 0 0 0 0 0 0 0 0
38843- 0 0 0 0 0 0 0 0 0 0 0 0
38844- 0 0 0 0 0 0 0 0 0 0 0 0
38845- 0 0 0 0 0 0 0 0 0 0 0 0
38846- 0 0 0 0 0 0 0 0 0 0 0 0
38847- 0 0 0 0 0 0 0 0 0 0 0 0
38848- 0 0 0 0 0 1 0 0 1 0 0 0
38849- 0 0 0 0 0 0 0 0 0 0 0 0
38850- 0 0 0 0 0 0 0 0 0 0 0 0
38851- 0 0 0 0 0 0 0 0 0 0 0 0
38852- 0 0 0 0 0 0 0 0 0 0 0 0
38853- 0 0 0 0 0 0 0 0 0 6 6 6
38854- 30 30 30 78 78 78 30 30 30 2 2 6
38855- 2 2 6 2 2 6 2 2 6 2 2 6
38856- 2 2 6 2 2 6 2 2 6 2 2 6
38857- 2 2 6 2 2 6 2 2 6 10 10 10
38858- 10 10 10 2 2 6 2 2 6 2 2 6
38859- 2 2 6 2 2 6 2 2 6 78 78 78
38860- 50 50 50 18 18 18 6 6 6 0 0 0
38861- 0 0 0 0 0 0 0 0 0 0 0 0
38862- 0 0 0 0 0 0 0 0 0 0 0 0
38863- 0 0 0 0 0 0 0 0 0 0 0 0
38864- 0 0 0 0 0 0 0 0 0 0 0 0
38865- 0 0 0 0 0 0 0 0 0 0 0 0
38866- 0 0 0 0 0 0 0 0 0 0 0 0
38867- 0 0 0 0 0 0 0 0 0 0 0 0
38868- 0 0 1 0 0 0 0 0 0 0 0 0
38869- 0 0 0 0 0 0 0 0 0 0 0 0
38870- 0 0 0 0 0 0 0 0 0 0 0 0
38871- 0 0 0 0 0 0 0 0 0 0 0 0
38872- 0 0 0 0 0 0 0 0 0 0 0 0
38873- 0 0 0 0 0 0 0 0 0 10 10 10
38874- 38 38 38 86 86 86 14 14 14 2 2 6
38875- 2 2 6 2 2 6 2 2 6 2 2 6
38876- 2 2 6 2 2 6 2 2 6 2 2 6
38877- 2 2 6 2 2 6 2 2 6 2 2 6
38878- 2 2 6 2 2 6 2 2 6 2 2 6
38879- 2 2 6 2 2 6 2 2 6 54 54 54
38880- 66 66 66 26 26 26 6 6 6 0 0 0
38881- 0 0 0 0 0 0 0 0 0 0 0 0
38882- 0 0 0 0 0 0 0 0 0 0 0 0
38883- 0 0 0 0 0 0 0 0 0 0 0 0
38884- 0 0 0 0 0 0 0 0 0 0 0 0
38885- 0 0 0 0 0 0 0 0 0 0 0 0
38886- 0 0 0 0 0 0 0 0 0 0 0 0
38887- 0 0 0 0 0 0 0 0 0 0 0 0
38888- 0 0 0 0 0 1 0 0 1 0 0 0
38889- 0 0 0 0 0 0 0 0 0 0 0 0
38890- 0 0 0 0 0 0 0 0 0 0 0 0
38891- 0 0 0 0 0 0 0 0 0 0 0 0
38892- 0 0 0 0 0 0 0 0 0 0 0 0
38893- 0 0 0 0 0 0 0 0 0 14 14 14
38894- 42 42 42 82 82 82 2 2 6 2 2 6
38895- 2 2 6 6 6 6 10 10 10 2 2 6
38896- 2 2 6 2 2 6 2 2 6 2 2 6
38897- 2 2 6 2 2 6 2 2 6 6 6 6
38898- 14 14 14 10 10 10 2 2 6 2 2 6
38899- 2 2 6 2 2 6 2 2 6 18 18 18
38900- 82 82 82 34 34 34 10 10 10 0 0 0
38901- 0 0 0 0 0 0 0 0 0 0 0 0
38902- 0 0 0 0 0 0 0 0 0 0 0 0
38903- 0 0 0 0 0 0 0 0 0 0 0 0
38904- 0 0 0 0 0 0 0 0 0 0 0 0
38905- 0 0 0 0 0 0 0 0 0 0 0 0
38906- 0 0 0 0 0 0 0 0 0 0 0 0
38907- 0 0 0 0 0 0 0 0 0 0 0 0
38908- 0 0 1 0 0 0 0 0 0 0 0 0
38909- 0 0 0 0 0 0 0 0 0 0 0 0
38910- 0 0 0 0 0 0 0 0 0 0 0 0
38911- 0 0 0 0 0 0 0 0 0 0 0 0
38912- 0 0 0 0 0 0 0 0 0 0 0 0
38913- 0 0 0 0 0 0 0 0 0 14 14 14
38914- 46 46 46 86 86 86 2 2 6 2 2 6
38915- 6 6 6 6 6 6 22 22 22 34 34 34
38916- 6 6 6 2 2 6 2 2 6 2 2 6
38917- 2 2 6 2 2 6 18 18 18 34 34 34
38918- 10 10 10 50 50 50 22 22 22 2 2 6
38919- 2 2 6 2 2 6 2 2 6 10 10 10
38920- 86 86 86 42 42 42 14 14 14 0 0 0
38921- 0 0 0 0 0 0 0 0 0 0 0 0
38922- 0 0 0 0 0 0 0 0 0 0 0 0
38923- 0 0 0 0 0 0 0 0 0 0 0 0
38924- 0 0 0 0 0 0 0 0 0 0 0 0
38925- 0 0 0 0 0 0 0 0 0 0 0 0
38926- 0 0 0 0 0 0 0 0 0 0 0 0
38927- 0 0 0 0 0 0 0 0 0 0 0 0
38928- 0 0 1 0 0 1 0 0 1 0 0 0
38929- 0 0 0 0 0 0 0 0 0 0 0 0
38930- 0 0 0 0 0 0 0 0 0 0 0 0
38931- 0 0 0 0 0 0 0 0 0 0 0 0
38932- 0 0 0 0 0 0 0 0 0 0 0 0
38933- 0 0 0 0 0 0 0 0 0 14 14 14
38934- 46 46 46 86 86 86 2 2 6 2 2 6
38935- 38 38 38 116 116 116 94 94 94 22 22 22
38936- 22 22 22 2 2 6 2 2 6 2 2 6
38937- 14 14 14 86 86 86 138 138 138 162 162 162
38938-154 154 154 38 38 38 26 26 26 6 6 6
38939- 2 2 6 2 2 6 2 2 6 2 2 6
38940- 86 86 86 46 46 46 14 14 14 0 0 0
38941- 0 0 0 0 0 0 0 0 0 0 0 0
38942- 0 0 0 0 0 0 0 0 0 0 0 0
38943- 0 0 0 0 0 0 0 0 0 0 0 0
38944- 0 0 0 0 0 0 0 0 0 0 0 0
38945- 0 0 0 0 0 0 0 0 0 0 0 0
38946- 0 0 0 0 0 0 0 0 0 0 0 0
38947- 0 0 0 0 0 0 0 0 0 0 0 0
38948- 0 0 0 0 0 0 0 0 0 0 0 0
38949- 0 0 0 0 0 0 0 0 0 0 0 0
38950- 0 0 0 0 0 0 0 0 0 0 0 0
38951- 0 0 0 0 0 0 0 0 0 0 0 0
38952- 0 0 0 0 0 0 0 0 0 0 0 0
38953- 0 0 0 0 0 0 0 0 0 14 14 14
38954- 46 46 46 86 86 86 2 2 6 14 14 14
38955-134 134 134 198 198 198 195 195 195 116 116 116
38956- 10 10 10 2 2 6 2 2 6 6 6 6
38957-101 98 89 187 187 187 210 210 210 218 218 218
38958-214 214 214 134 134 134 14 14 14 6 6 6
38959- 2 2 6 2 2 6 2 2 6 2 2 6
38960- 86 86 86 50 50 50 18 18 18 6 6 6
38961- 0 0 0 0 0 0 0 0 0 0 0 0
38962- 0 0 0 0 0 0 0 0 0 0 0 0
38963- 0 0 0 0 0 0 0 0 0 0 0 0
38964- 0 0 0 0 0 0 0 0 0 0 0 0
38965- 0 0 0 0 0 0 0 0 0 0 0 0
38966- 0 0 0 0 0 0 0 0 0 0 0 0
38967- 0 0 0 0 0 0 0 0 1 0 0 0
38968- 0 0 1 0 0 1 0 0 1 0 0 0
38969- 0 0 0 0 0 0 0 0 0 0 0 0
38970- 0 0 0 0 0 0 0 0 0 0 0 0
38971- 0 0 0 0 0 0 0 0 0 0 0 0
38972- 0 0 0 0 0 0 0 0 0 0 0 0
38973- 0 0 0 0 0 0 0 0 0 14 14 14
38974- 46 46 46 86 86 86 2 2 6 54 54 54
38975-218 218 218 195 195 195 226 226 226 246 246 246
38976- 58 58 58 2 2 6 2 2 6 30 30 30
38977-210 210 210 253 253 253 174 174 174 123 123 123
38978-221 221 221 234 234 234 74 74 74 2 2 6
38979- 2 2 6 2 2 6 2 2 6 2 2 6
38980- 70 70 70 58 58 58 22 22 22 6 6 6
38981- 0 0 0 0 0 0 0 0 0 0 0 0
38982- 0 0 0 0 0 0 0 0 0 0 0 0
38983- 0 0 0 0 0 0 0 0 0 0 0 0
38984- 0 0 0 0 0 0 0 0 0 0 0 0
38985- 0 0 0 0 0 0 0 0 0 0 0 0
38986- 0 0 0 0 0 0 0 0 0 0 0 0
38987- 0 0 0 0 0 0 0 0 0 0 0 0
38988- 0 0 0 0 0 0 0 0 0 0 0 0
38989- 0 0 0 0 0 0 0 0 0 0 0 0
38990- 0 0 0 0 0 0 0 0 0 0 0 0
38991- 0 0 0 0 0 0 0 0 0 0 0 0
38992- 0 0 0 0 0 0 0 0 0 0 0 0
38993- 0 0 0 0 0 0 0 0 0 14 14 14
38994- 46 46 46 82 82 82 2 2 6 106 106 106
38995-170 170 170 26 26 26 86 86 86 226 226 226
38996-123 123 123 10 10 10 14 14 14 46 46 46
38997-231 231 231 190 190 190 6 6 6 70 70 70
38998- 90 90 90 238 238 238 158 158 158 2 2 6
38999- 2 2 6 2 2 6 2 2 6 2 2 6
39000- 70 70 70 58 58 58 22 22 22 6 6 6
39001- 0 0 0 0 0 0 0 0 0 0 0 0
39002- 0 0 0 0 0 0 0 0 0 0 0 0
39003- 0 0 0 0 0 0 0 0 0 0 0 0
39004- 0 0 0 0 0 0 0 0 0 0 0 0
39005- 0 0 0 0 0 0 0 0 0 0 0 0
39006- 0 0 0 0 0 0 0 0 0 0 0 0
39007- 0 0 0 0 0 0 0 0 1 0 0 0
39008- 0 0 1 0 0 1 0 0 1 0 0 0
39009- 0 0 0 0 0 0 0 0 0 0 0 0
39010- 0 0 0 0 0 0 0 0 0 0 0 0
39011- 0 0 0 0 0 0 0 0 0 0 0 0
39012- 0 0 0 0 0 0 0 0 0 0 0 0
39013- 0 0 0 0 0 0 0 0 0 14 14 14
39014- 42 42 42 86 86 86 6 6 6 116 116 116
39015-106 106 106 6 6 6 70 70 70 149 149 149
39016-128 128 128 18 18 18 38 38 38 54 54 54
39017-221 221 221 106 106 106 2 2 6 14 14 14
39018- 46 46 46 190 190 190 198 198 198 2 2 6
39019- 2 2 6 2 2 6 2 2 6 2 2 6
39020- 74 74 74 62 62 62 22 22 22 6 6 6
39021- 0 0 0 0 0 0 0 0 0 0 0 0
39022- 0 0 0 0 0 0 0 0 0 0 0 0
39023- 0 0 0 0 0 0 0 0 0 0 0 0
39024- 0 0 0 0 0 0 0 0 0 0 0 0
39025- 0 0 0 0 0 0 0 0 0 0 0 0
39026- 0 0 0 0 0 0 0 0 0 0 0 0
39027- 0 0 0 0 0 0 0 0 1 0 0 0
39028- 0 0 1 0 0 0 0 0 1 0 0 0
39029- 0 0 0 0 0 0 0 0 0 0 0 0
39030- 0 0 0 0 0 0 0 0 0 0 0 0
39031- 0 0 0 0 0 0 0 0 0 0 0 0
39032- 0 0 0 0 0 0 0 0 0 0 0 0
39033- 0 0 0 0 0 0 0 0 0 14 14 14
39034- 42 42 42 94 94 94 14 14 14 101 101 101
39035-128 128 128 2 2 6 18 18 18 116 116 116
39036-118 98 46 121 92 8 121 92 8 98 78 10
39037-162 162 162 106 106 106 2 2 6 2 2 6
39038- 2 2 6 195 195 195 195 195 195 6 6 6
39039- 2 2 6 2 2 6 2 2 6 2 2 6
39040- 74 74 74 62 62 62 22 22 22 6 6 6
39041- 0 0 0 0 0 0 0 0 0 0 0 0
39042- 0 0 0 0 0 0 0 0 0 0 0 0
39043- 0 0 0 0 0 0 0 0 0 0 0 0
39044- 0 0 0 0 0 0 0 0 0 0 0 0
39045- 0 0 0 0 0 0 0 0 0 0 0 0
39046- 0 0 0 0 0 0 0 0 0 0 0 0
39047- 0 0 0 0 0 0 0 0 1 0 0 1
39048- 0 0 1 0 0 0 0 0 1 0 0 0
39049- 0 0 0 0 0 0 0 0 0 0 0 0
39050- 0 0 0 0 0 0 0 0 0 0 0 0
39051- 0 0 0 0 0 0 0 0 0 0 0 0
39052- 0 0 0 0 0 0 0 0 0 0 0 0
39053- 0 0 0 0 0 0 0 0 0 10 10 10
39054- 38 38 38 90 90 90 14 14 14 58 58 58
39055-210 210 210 26 26 26 54 38 6 154 114 10
39056-226 170 11 236 186 11 225 175 15 184 144 12
39057-215 174 15 175 146 61 37 26 9 2 2 6
39058- 70 70 70 246 246 246 138 138 138 2 2 6
39059- 2 2 6 2 2 6 2 2 6 2 2 6
39060- 70 70 70 66 66 66 26 26 26 6 6 6
39061- 0 0 0 0 0 0 0 0 0 0 0 0
39062- 0 0 0 0 0 0 0 0 0 0 0 0
39063- 0 0 0 0 0 0 0 0 0 0 0 0
39064- 0 0 0 0 0 0 0 0 0 0 0 0
39065- 0 0 0 0 0 0 0 0 0 0 0 0
39066- 0 0 0 0 0 0 0 0 0 0 0 0
39067- 0 0 0 0 0 0 0 0 0 0 0 0
39068- 0 0 0 0 0 0 0 0 0 0 0 0
39069- 0 0 0 0 0 0 0 0 0 0 0 0
39070- 0 0 0 0 0 0 0 0 0 0 0 0
39071- 0 0 0 0 0 0 0 0 0 0 0 0
39072- 0 0 0 0 0 0 0 0 0 0 0 0
39073- 0 0 0 0 0 0 0 0 0 10 10 10
39074- 38 38 38 86 86 86 14 14 14 10 10 10
39075-195 195 195 188 164 115 192 133 9 225 175 15
39076-239 182 13 234 190 10 232 195 16 232 200 30
39077-245 207 45 241 208 19 232 195 16 184 144 12
39078-218 194 134 211 206 186 42 42 42 2 2 6
39079- 2 2 6 2 2 6 2 2 6 2 2 6
39080- 50 50 50 74 74 74 30 30 30 6 6 6
39081- 0 0 0 0 0 0 0 0 0 0 0 0
39082- 0 0 0 0 0 0 0 0 0 0 0 0
39083- 0 0 0 0 0 0 0 0 0 0 0 0
39084- 0 0 0 0 0 0 0 0 0 0 0 0
39085- 0 0 0 0 0 0 0 0 0 0 0 0
39086- 0 0 0 0 0 0 0 0 0 0 0 0
39087- 0 0 0 0 0 0 0 0 0 0 0 0
39088- 0 0 0 0 0 0 0 0 0 0 0 0
39089- 0 0 0 0 0 0 0 0 0 0 0 0
39090- 0 0 0 0 0 0 0 0 0 0 0 0
39091- 0 0 0 0 0 0 0 0 0 0 0 0
39092- 0 0 0 0 0 0 0 0 0 0 0 0
39093- 0 0 0 0 0 0 0 0 0 10 10 10
39094- 34 34 34 86 86 86 14 14 14 2 2 6
39095-121 87 25 192 133 9 219 162 10 239 182 13
39096-236 186 11 232 195 16 241 208 19 244 214 54
39097-246 218 60 246 218 38 246 215 20 241 208 19
39098-241 208 19 226 184 13 121 87 25 2 2 6
39099- 2 2 6 2 2 6 2 2 6 2 2 6
39100- 50 50 50 82 82 82 34 34 34 10 10 10
39101- 0 0 0 0 0 0 0 0 0 0 0 0
39102- 0 0 0 0 0 0 0 0 0 0 0 0
39103- 0 0 0 0 0 0 0 0 0 0 0 0
39104- 0 0 0 0 0 0 0 0 0 0 0 0
39105- 0 0 0 0 0 0 0 0 0 0 0 0
39106- 0 0 0 0 0 0 0 0 0 0 0 0
39107- 0 0 0 0 0 0 0 0 0 0 0 0
39108- 0 0 0 0 0 0 0 0 0 0 0 0
39109- 0 0 0 0 0 0 0 0 0 0 0 0
39110- 0 0 0 0 0 0 0 0 0 0 0 0
39111- 0 0 0 0 0 0 0 0 0 0 0 0
39112- 0 0 0 0 0 0 0 0 0 0 0 0
39113- 0 0 0 0 0 0 0 0 0 10 10 10
39114- 34 34 34 82 82 82 30 30 30 61 42 6
39115-180 123 7 206 145 10 230 174 11 239 182 13
39116-234 190 10 238 202 15 241 208 19 246 218 74
39117-246 218 38 246 215 20 246 215 20 246 215 20
39118-226 184 13 215 174 15 184 144 12 6 6 6
39119- 2 2 6 2 2 6 2 2 6 2 2 6
39120- 26 26 26 94 94 94 42 42 42 14 14 14
39121- 0 0 0 0 0 0 0 0 0 0 0 0
39122- 0 0 0 0 0 0 0 0 0 0 0 0
39123- 0 0 0 0 0 0 0 0 0 0 0 0
39124- 0 0 0 0 0 0 0 0 0 0 0 0
39125- 0 0 0 0 0 0 0 0 0 0 0 0
39126- 0 0 0 0 0 0 0 0 0 0 0 0
39127- 0 0 0 0 0 0 0 0 0 0 0 0
39128- 0 0 0 0 0 0 0 0 0 0 0 0
39129- 0 0 0 0 0 0 0 0 0 0 0 0
39130- 0 0 0 0 0 0 0 0 0 0 0 0
39131- 0 0 0 0 0 0 0 0 0 0 0 0
39132- 0 0 0 0 0 0 0 0 0 0 0 0
39133- 0 0 0 0 0 0 0 0 0 10 10 10
39134- 30 30 30 78 78 78 50 50 50 104 69 6
39135-192 133 9 216 158 10 236 178 12 236 186 11
39136-232 195 16 241 208 19 244 214 54 245 215 43
39137-246 215 20 246 215 20 241 208 19 198 155 10
39138-200 144 11 216 158 10 156 118 10 2 2 6
39139- 2 2 6 2 2 6 2 2 6 2 2 6
39140- 6 6 6 90 90 90 54 54 54 18 18 18
39141- 6 6 6 0 0 0 0 0 0 0 0 0
39142- 0 0 0 0 0 0 0 0 0 0 0 0
39143- 0 0 0 0 0 0 0 0 0 0 0 0
39144- 0 0 0 0 0 0 0 0 0 0 0 0
39145- 0 0 0 0 0 0 0 0 0 0 0 0
39146- 0 0 0 0 0 0 0 0 0 0 0 0
39147- 0 0 0 0 0 0 0 0 0 0 0 0
39148- 0 0 0 0 0 0 0 0 0 0 0 0
39149- 0 0 0 0 0 0 0 0 0 0 0 0
39150- 0 0 0 0 0 0 0 0 0 0 0 0
39151- 0 0 0 0 0 0 0 0 0 0 0 0
39152- 0 0 0 0 0 0 0 0 0 0 0 0
39153- 0 0 0 0 0 0 0 0 0 10 10 10
39154- 30 30 30 78 78 78 46 46 46 22 22 22
39155-137 92 6 210 162 10 239 182 13 238 190 10
39156-238 202 15 241 208 19 246 215 20 246 215 20
39157-241 208 19 203 166 17 185 133 11 210 150 10
39158-216 158 10 210 150 10 102 78 10 2 2 6
39159- 6 6 6 54 54 54 14 14 14 2 2 6
39160- 2 2 6 62 62 62 74 74 74 30 30 30
39161- 10 10 10 0 0 0 0 0 0 0 0 0
39162- 0 0 0 0 0 0 0 0 0 0 0 0
39163- 0 0 0 0 0 0 0 0 0 0 0 0
39164- 0 0 0 0 0 0 0 0 0 0 0 0
39165- 0 0 0 0 0 0 0 0 0 0 0 0
39166- 0 0 0 0 0 0 0 0 0 0 0 0
39167- 0 0 0 0 0 0 0 0 0 0 0 0
39168- 0 0 0 0 0 0 0 0 0 0 0 0
39169- 0 0 0 0 0 0 0 0 0 0 0 0
39170- 0 0 0 0 0 0 0 0 0 0 0 0
39171- 0 0 0 0 0 0 0 0 0 0 0 0
39172- 0 0 0 0 0 0 0 0 0 0 0 0
39173- 0 0 0 0 0 0 0 0 0 10 10 10
39174- 34 34 34 78 78 78 50 50 50 6 6 6
39175- 94 70 30 139 102 15 190 146 13 226 184 13
39176-232 200 30 232 195 16 215 174 15 190 146 13
39177-168 122 10 192 133 9 210 150 10 213 154 11
39178-202 150 34 182 157 106 101 98 89 2 2 6
39179- 2 2 6 78 78 78 116 116 116 58 58 58
39180- 2 2 6 22 22 22 90 90 90 46 46 46
39181- 18 18 18 6 6 6 0 0 0 0 0 0
39182- 0 0 0 0 0 0 0 0 0 0 0 0
39183- 0 0 0 0 0 0 0 0 0 0 0 0
39184- 0 0 0 0 0 0 0 0 0 0 0 0
39185- 0 0 0 0 0 0 0 0 0 0 0 0
39186- 0 0 0 0 0 0 0 0 0 0 0 0
39187- 0 0 0 0 0 0 0 0 0 0 0 0
39188- 0 0 0 0 0 0 0 0 0 0 0 0
39189- 0 0 0 0 0 0 0 0 0 0 0 0
39190- 0 0 0 0 0 0 0 0 0 0 0 0
39191- 0 0 0 0 0 0 0 0 0 0 0 0
39192- 0 0 0 0 0 0 0 0 0 0 0 0
39193- 0 0 0 0 0 0 0 0 0 10 10 10
39194- 38 38 38 86 86 86 50 50 50 6 6 6
39195-128 128 128 174 154 114 156 107 11 168 122 10
39196-198 155 10 184 144 12 197 138 11 200 144 11
39197-206 145 10 206 145 10 197 138 11 188 164 115
39198-195 195 195 198 198 198 174 174 174 14 14 14
39199- 2 2 6 22 22 22 116 116 116 116 116 116
39200- 22 22 22 2 2 6 74 74 74 70 70 70
39201- 30 30 30 10 10 10 0 0 0 0 0 0
39202- 0 0 0 0 0 0 0 0 0 0 0 0
39203- 0 0 0 0 0 0 0 0 0 0 0 0
39204- 0 0 0 0 0 0 0 0 0 0 0 0
39205- 0 0 0 0 0 0 0 0 0 0 0 0
39206- 0 0 0 0 0 0 0 0 0 0 0 0
39207- 0 0 0 0 0 0 0 0 0 0 0 0
39208- 0 0 0 0 0 0 0 0 0 0 0 0
39209- 0 0 0 0 0 0 0 0 0 0 0 0
39210- 0 0 0 0 0 0 0 0 0 0 0 0
39211- 0 0 0 0 0 0 0 0 0 0 0 0
39212- 0 0 0 0 0 0 0 0 0 0 0 0
39213- 0 0 0 0 0 0 6 6 6 18 18 18
39214- 50 50 50 101 101 101 26 26 26 10 10 10
39215-138 138 138 190 190 190 174 154 114 156 107 11
39216-197 138 11 200 144 11 197 138 11 192 133 9
39217-180 123 7 190 142 34 190 178 144 187 187 187
39218-202 202 202 221 221 221 214 214 214 66 66 66
39219- 2 2 6 2 2 6 50 50 50 62 62 62
39220- 6 6 6 2 2 6 10 10 10 90 90 90
39221- 50 50 50 18 18 18 6 6 6 0 0 0
39222- 0 0 0 0 0 0 0 0 0 0 0 0
39223- 0 0 0 0 0 0 0 0 0 0 0 0
39224- 0 0 0 0 0 0 0 0 0 0 0 0
39225- 0 0 0 0 0 0 0 0 0 0 0 0
39226- 0 0 0 0 0 0 0 0 0 0 0 0
39227- 0 0 0 0 0 0 0 0 0 0 0 0
39228- 0 0 0 0 0 0 0 0 0 0 0 0
39229- 0 0 0 0 0 0 0 0 0 0 0 0
39230- 0 0 0 0 0 0 0 0 0 0 0 0
39231- 0 0 0 0 0 0 0 0 0 0 0 0
39232- 0 0 0 0 0 0 0 0 0 0 0 0
39233- 0 0 0 0 0 0 10 10 10 34 34 34
39234- 74 74 74 74 74 74 2 2 6 6 6 6
39235-144 144 144 198 198 198 190 190 190 178 166 146
39236-154 121 60 156 107 11 156 107 11 168 124 44
39237-174 154 114 187 187 187 190 190 190 210 210 210
39238-246 246 246 253 253 253 253 253 253 182 182 182
39239- 6 6 6 2 2 6 2 2 6 2 2 6
39240- 2 2 6 2 2 6 2 2 6 62 62 62
39241- 74 74 74 34 34 34 14 14 14 0 0 0
39242- 0 0 0 0 0 0 0 0 0 0 0 0
39243- 0 0 0 0 0 0 0 0 0 0 0 0
39244- 0 0 0 0 0 0 0 0 0 0 0 0
39245- 0 0 0 0 0 0 0 0 0 0 0 0
39246- 0 0 0 0 0 0 0 0 0 0 0 0
39247- 0 0 0 0 0 0 0 0 0 0 0 0
39248- 0 0 0 0 0 0 0 0 0 0 0 0
39249- 0 0 0 0 0 0 0 0 0 0 0 0
39250- 0 0 0 0 0 0 0 0 0 0 0 0
39251- 0 0 0 0 0 0 0 0 0 0 0 0
39252- 0 0 0 0 0 0 0 0 0 0 0 0
39253- 0 0 0 10 10 10 22 22 22 54 54 54
39254- 94 94 94 18 18 18 2 2 6 46 46 46
39255-234 234 234 221 221 221 190 190 190 190 190 190
39256-190 190 190 187 187 187 187 187 187 190 190 190
39257-190 190 190 195 195 195 214 214 214 242 242 242
39258-253 253 253 253 253 253 253 253 253 253 253 253
39259- 82 82 82 2 2 6 2 2 6 2 2 6
39260- 2 2 6 2 2 6 2 2 6 14 14 14
39261- 86 86 86 54 54 54 22 22 22 6 6 6
39262- 0 0 0 0 0 0 0 0 0 0 0 0
39263- 0 0 0 0 0 0 0 0 0 0 0 0
39264- 0 0 0 0 0 0 0 0 0 0 0 0
39265- 0 0 0 0 0 0 0 0 0 0 0 0
39266- 0 0 0 0 0 0 0 0 0 0 0 0
39267- 0 0 0 0 0 0 0 0 0 0 0 0
39268- 0 0 0 0 0 0 0 0 0 0 0 0
39269- 0 0 0 0 0 0 0 0 0 0 0 0
39270- 0 0 0 0 0 0 0 0 0 0 0 0
39271- 0 0 0 0 0 0 0 0 0 0 0 0
39272- 0 0 0 0 0 0 0 0 0 0 0 0
39273- 6 6 6 18 18 18 46 46 46 90 90 90
39274- 46 46 46 18 18 18 6 6 6 182 182 182
39275-253 253 253 246 246 246 206 206 206 190 190 190
39276-190 190 190 190 190 190 190 190 190 190 190 190
39277-206 206 206 231 231 231 250 250 250 253 253 253
39278-253 253 253 253 253 253 253 253 253 253 253 253
39279-202 202 202 14 14 14 2 2 6 2 2 6
39280- 2 2 6 2 2 6 2 2 6 2 2 6
39281- 42 42 42 86 86 86 42 42 42 18 18 18
39282- 6 6 6 0 0 0 0 0 0 0 0 0
39283- 0 0 0 0 0 0 0 0 0 0 0 0
39284- 0 0 0 0 0 0 0 0 0 0 0 0
39285- 0 0 0 0 0 0 0 0 0 0 0 0
39286- 0 0 0 0 0 0 0 0 0 0 0 0
39287- 0 0 0 0 0 0 0 0 0 0 0 0
39288- 0 0 0 0 0 0 0 0 0 0 0 0
39289- 0 0 0 0 0 0 0 0 0 0 0 0
39290- 0 0 0 0 0 0 0 0 0 0 0 0
39291- 0 0 0 0 0 0 0 0 0 0 0 0
39292- 0 0 0 0 0 0 0 0 0 6 6 6
39293- 14 14 14 38 38 38 74 74 74 66 66 66
39294- 2 2 6 6 6 6 90 90 90 250 250 250
39295-253 253 253 253 253 253 238 238 238 198 198 198
39296-190 190 190 190 190 190 195 195 195 221 221 221
39297-246 246 246 253 253 253 253 253 253 253 253 253
39298-253 253 253 253 253 253 253 253 253 253 253 253
39299-253 253 253 82 82 82 2 2 6 2 2 6
39300- 2 2 6 2 2 6 2 2 6 2 2 6
39301- 2 2 6 78 78 78 70 70 70 34 34 34
39302- 14 14 14 6 6 6 0 0 0 0 0 0
39303- 0 0 0 0 0 0 0 0 0 0 0 0
39304- 0 0 0 0 0 0 0 0 0 0 0 0
39305- 0 0 0 0 0 0 0 0 0 0 0 0
39306- 0 0 0 0 0 0 0 0 0 0 0 0
39307- 0 0 0 0 0 0 0 0 0 0 0 0
39308- 0 0 0 0 0 0 0 0 0 0 0 0
39309- 0 0 0 0 0 0 0 0 0 0 0 0
39310- 0 0 0 0 0 0 0 0 0 0 0 0
39311- 0 0 0 0 0 0 0 0 0 0 0 0
39312- 0 0 0 0 0 0 0 0 0 14 14 14
39313- 34 34 34 66 66 66 78 78 78 6 6 6
39314- 2 2 6 18 18 18 218 218 218 253 253 253
39315-253 253 253 253 253 253 253 253 253 246 246 246
39316-226 226 226 231 231 231 246 246 246 253 253 253
39317-253 253 253 253 253 253 253 253 253 253 253 253
39318-253 253 253 253 253 253 253 253 253 253 253 253
39319-253 253 253 178 178 178 2 2 6 2 2 6
39320- 2 2 6 2 2 6 2 2 6 2 2 6
39321- 2 2 6 18 18 18 90 90 90 62 62 62
39322- 30 30 30 10 10 10 0 0 0 0 0 0
39323- 0 0 0 0 0 0 0 0 0 0 0 0
39324- 0 0 0 0 0 0 0 0 0 0 0 0
39325- 0 0 0 0 0 0 0 0 0 0 0 0
39326- 0 0 0 0 0 0 0 0 0 0 0 0
39327- 0 0 0 0 0 0 0 0 0 0 0 0
39328- 0 0 0 0 0 0 0 0 0 0 0 0
39329- 0 0 0 0 0 0 0 0 0 0 0 0
39330- 0 0 0 0 0 0 0 0 0 0 0 0
39331- 0 0 0 0 0 0 0 0 0 0 0 0
39332- 0 0 0 0 0 0 10 10 10 26 26 26
39333- 58 58 58 90 90 90 18 18 18 2 2 6
39334- 2 2 6 110 110 110 253 253 253 253 253 253
39335-253 253 253 253 253 253 253 253 253 253 253 253
39336-250 250 250 253 253 253 253 253 253 253 253 253
39337-253 253 253 253 253 253 253 253 253 253 253 253
39338-253 253 253 253 253 253 253 253 253 253 253 253
39339-253 253 253 231 231 231 18 18 18 2 2 6
39340- 2 2 6 2 2 6 2 2 6 2 2 6
39341- 2 2 6 2 2 6 18 18 18 94 94 94
39342- 54 54 54 26 26 26 10 10 10 0 0 0
39343- 0 0 0 0 0 0 0 0 0 0 0 0
39344- 0 0 0 0 0 0 0 0 0 0 0 0
39345- 0 0 0 0 0 0 0 0 0 0 0 0
39346- 0 0 0 0 0 0 0 0 0 0 0 0
39347- 0 0 0 0 0 0 0 0 0 0 0 0
39348- 0 0 0 0 0 0 0 0 0 0 0 0
39349- 0 0 0 0 0 0 0 0 0 0 0 0
39350- 0 0 0 0 0 0 0 0 0 0 0 0
39351- 0 0 0 0 0 0 0 0 0 0 0 0
39352- 0 0 0 6 6 6 22 22 22 50 50 50
39353- 90 90 90 26 26 26 2 2 6 2 2 6
39354- 14 14 14 195 195 195 250 250 250 253 253 253
39355-253 253 253 253 253 253 253 253 253 253 253 253
39356-253 253 253 253 253 253 253 253 253 253 253 253
39357-253 253 253 253 253 253 253 253 253 253 253 253
39358-253 253 253 253 253 253 253 253 253 253 253 253
39359-250 250 250 242 242 242 54 54 54 2 2 6
39360- 2 2 6 2 2 6 2 2 6 2 2 6
39361- 2 2 6 2 2 6 2 2 6 38 38 38
39362- 86 86 86 50 50 50 22 22 22 6 6 6
39363- 0 0 0 0 0 0 0 0 0 0 0 0
39364- 0 0 0 0 0 0 0 0 0 0 0 0
39365- 0 0 0 0 0 0 0 0 0 0 0 0
39366- 0 0 0 0 0 0 0 0 0 0 0 0
39367- 0 0 0 0 0 0 0 0 0 0 0 0
39368- 0 0 0 0 0 0 0 0 0 0 0 0
39369- 0 0 0 0 0 0 0 0 0 0 0 0
39370- 0 0 0 0 0 0 0 0 0 0 0 0
39371- 0 0 0 0 0 0 0 0 0 0 0 0
39372- 6 6 6 14 14 14 38 38 38 82 82 82
39373- 34 34 34 2 2 6 2 2 6 2 2 6
39374- 42 42 42 195 195 195 246 246 246 253 253 253
39375-253 253 253 253 253 253 253 253 253 250 250 250
39376-242 242 242 242 242 242 250 250 250 253 253 253
39377-253 253 253 253 253 253 253 253 253 253 253 253
39378-253 253 253 250 250 250 246 246 246 238 238 238
39379-226 226 226 231 231 231 101 101 101 6 6 6
39380- 2 2 6 2 2 6 2 2 6 2 2 6
39381- 2 2 6 2 2 6 2 2 6 2 2 6
39382- 38 38 38 82 82 82 42 42 42 14 14 14
39383- 6 6 6 0 0 0 0 0 0 0 0 0
39384- 0 0 0 0 0 0 0 0 0 0 0 0
39385- 0 0 0 0 0 0 0 0 0 0 0 0
39386- 0 0 0 0 0 0 0 0 0 0 0 0
39387- 0 0 0 0 0 0 0 0 0 0 0 0
39388- 0 0 0 0 0 0 0 0 0 0 0 0
39389- 0 0 0 0 0 0 0 0 0 0 0 0
39390- 0 0 0 0 0 0 0 0 0 0 0 0
39391- 0 0 0 0 0 0 0 0 0 0 0 0
39392- 10 10 10 26 26 26 62 62 62 66 66 66
39393- 2 2 6 2 2 6 2 2 6 6 6 6
39394- 70 70 70 170 170 170 206 206 206 234 234 234
39395-246 246 246 250 250 250 250 250 250 238 238 238
39396-226 226 226 231 231 231 238 238 238 250 250 250
39397-250 250 250 250 250 250 246 246 246 231 231 231
39398-214 214 214 206 206 206 202 202 202 202 202 202
39399-198 198 198 202 202 202 182 182 182 18 18 18
39400- 2 2 6 2 2 6 2 2 6 2 2 6
39401- 2 2 6 2 2 6 2 2 6 2 2 6
39402- 2 2 6 62 62 62 66 66 66 30 30 30
39403- 10 10 10 0 0 0 0 0 0 0 0 0
39404- 0 0 0 0 0 0 0 0 0 0 0 0
39405- 0 0 0 0 0 0 0 0 0 0 0 0
39406- 0 0 0 0 0 0 0 0 0 0 0 0
39407- 0 0 0 0 0 0 0 0 0 0 0 0
39408- 0 0 0 0 0 0 0 0 0 0 0 0
39409- 0 0 0 0 0 0 0 0 0 0 0 0
39410- 0 0 0 0 0 0 0 0 0 0 0 0
39411- 0 0 0 0 0 0 0 0 0 0 0 0
39412- 14 14 14 42 42 42 82 82 82 18 18 18
39413- 2 2 6 2 2 6 2 2 6 10 10 10
39414- 94 94 94 182 182 182 218 218 218 242 242 242
39415-250 250 250 253 253 253 253 253 253 250 250 250
39416-234 234 234 253 253 253 253 253 253 253 253 253
39417-253 253 253 253 253 253 253 253 253 246 246 246
39418-238 238 238 226 226 226 210 210 210 202 202 202
39419-195 195 195 195 195 195 210 210 210 158 158 158
39420- 6 6 6 14 14 14 50 50 50 14 14 14
39421- 2 2 6 2 2 6 2 2 6 2 2 6
39422- 2 2 6 6 6 6 86 86 86 46 46 46
39423- 18 18 18 6 6 6 0 0 0 0 0 0
39424- 0 0 0 0 0 0 0 0 0 0 0 0
39425- 0 0 0 0 0 0 0 0 0 0 0 0
39426- 0 0 0 0 0 0 0 0 0 0 0 0
39427- 0 0 0 0 0 0 0 0 0 0 0 0
39428- 0 0 0 0 0 0 0 0 0 0 0 0
39429- 0 0 0 0 0 0 0 0 0 0 0 0
39430- 0 0 0 0 0 0 0 0 0 0 0 0
39431- 0 0 0 0 0 0 0 0 0 6 6 6
39432- 22 22 22 54 54 54 70 70 70 2 2 6
39433- 2 2 6 10 10 10 2 2 6 22 22 22
39434-166 166 166 231 231 231 250 250 250 253 253 253
39435-253 253 253 253 253 253 253 253 253 250 250 250
39436-242 242 242 253 253 253 253 253 253 253 253 253
39437-253 253 253 253 253 253 253 253 253 253 253 253
39438-253 253 253 253 253 253 253 253 253 246 246 246
39439-231 231 231 206 206 206 198 198 198 226 226 226
39440- 94 94 94 2 2 6 6 6 6 38 38 38
39441- 30 30 30 2 2 6 2 2 6 2 2 6
39442- 2 2 6 2 2 6 62 62 62 66 66 66
39443- 26 26 26 10 10 10 0 0 0 0 0 0
39444- 0 0 0 0 0 0 0 0 0 0 0 0
39445- 0 0 0 0 0 0 0 0 0 0 0 0
39446- 0 0 0 0 0 0 0 0 0 0 0 0
39447- 0 0 0 0 0 0 0 0 0 0 0 0
39448- 0 0 0 0 0 0 0 0 0 0 0 0
39449- 0 0 0 0 0 0 0 0 0 0 0 0
39450- 0 0 0 0 0 0 0 0 0 0 0 0
39451- 0 0 0 0 0 0 0 0 0 10 10 10
39452- 30 30 30 74 74 74 50 50 50 2 2 6
39453- 26 26 26 26 26 26 2 2 6 106 106 106
39454-238 238 238 253 253 253 253 253 253 253 253 253
39455-253 253 253 253 253 253 253 253 253 253 253 253
39456-253 253 253 253 253 253 253 253 253 253 253 253
39457-253 253 253 253 253 253 253 253 253 253 253 253
39458-253 253 253 253 253 253 253 253 253 253 253 253
39459-253 253 253 246 246 246 218 218 218 202 202 202
39460-210 210 210 14 14 14 2 2 6 2 2 6
39461- 30 30 30 22 22 22 2 2 6 2 2 6
39462- 2 2 6 2 2 6 18 18 18 86 86 86
39463- 42 42 42 14 14 14 0 0 0 0 0 0
39464- 0 0 0 0 0 0 0 0 0 0 0 0
39465- 0 0 0 0 0 0 0 0 0 0 0 0
39466- 0 0 0 0 0 0 0 0 0 0 0 0
39467- 0 0 0 0 0 0 0 0 0 0 0 0
39468- 0 0 0 0 0 0 0 0 0 0 0 0
39469- 0 0 0 0 0 0 0 0 0 0 0 0
39470- 0 0 0 0 0 0 0 0 0 0 0 0
39471- 0 0 0 0 0 0 0 0 0 14 14 14
39472- 42 42 42 90 90 90 22 22 22 2 2 6
39473- 42 42 42 2 2 6 18 18 18 218 218 218
39474-253 253 253 253 253 253 253 253 253 253 253 253
39475-253 253 253 253 253 253 253 253 253 253 253 253
39476-253 253 253 253 253 253 253 253 253 253 253 253
39477-253 253 253 253 253 253 253 253 253 253 253 253
39478-253 253 253 253 253 253 253 253 253 253 253 253
39479-253 253 253 253 253 253 250 250 250 221 221 221
39480-218 218 218 101 101 101 2 2 6 14 14 14
39481- 18 18 18 38 38 38 10 10 10 2 2 6
39482- 2 2 6 2 2 6 2 2 6 78 78 78
39483- 58 58 58 22 22 22 6 6 6 0 0 0
39484- 0 0 0 0 0 0 0 0 0 0 0 0
39485- 0 0 0 0 0 0 0 0 0 0 0 0
39486- 0 0 0 0 0 0 0 0 0 0 0 0
39487- 0 0 0 0 0 0 0 0 0 0 0 0
39488- 0 0 0 0 0 0 0 0 0 0 0 0
39489- 0 0 0 0 0 0 0 0 0 0 0 0
39490- 0 0 0 0 0 0 0 0 0 0 0 0
39491- 0 0 0 0 0 0 6 6 6 18 18 18
39492- 54 54 54 82 82 82 2 2 6 26 26 26
39493- 22 22 22 2 2 6 123 123 123 253 253 253
39494-253 253 253 253 253 253 253 253 253 253 253 253
39495-253 253 253 253 253 253 253 253 253 253 253 253
39496-253 253 253 253 253 253 253 253 253 253 253 253
39497-253 253 253 253 253 253 253 253 253 253 253 253
39498-253 253 253 253 253 253 253 253 253 253 253 253
39499-253 253 253 253 253 253 253 253 253 250 250 250
39500-238 238 238 198 198 198 6 6 6 38 38 38
39501- 58 58 58 26 26 26 38 38 38 2 2 6
39502- 2 2 6 2 2 6 2 2 6 46 46 46
39503- 78 78 78 30 30 30 10 10 10 0 0 0
39504- 0 0 0 0 0 0 0 0 0 0 0 0
39505- 0 0 0 0 0 0 0 0 0 0 0 0
39506- 0 0 0 0 0 0 0 0 0 0 0 0
39507- 0 0 0 0 0 0 0 0 0 0 0 0
39508- 0 0 0 0 0 0 0 0 0 0 0 0
39509- 0 0 0 0 0 0 0 0 0 0 0 0
39510- 0 0 0 0 0 0 0 0 0 0 0 0
39511- 0 0 0 0 0 0 10 10 10 30 30 30
39512- 74 74 74 58 58 58 2 2 6 42 42 42
39513- 2 2 6 22 22 22 231 231 231 253 253 253
39514-253 253 253 253 253 253 253 253 253 253 253 253
39515-253 253 253 253 253 253 253 253 253 250 250 250
39516-253 253 253 253 253 253 253 253 253 253 253 253
39517-253 253 253 253 253 253 253 253 253 253 253 253
39518-253 253 253 253 253 253 253 253 253 253 253 253
39519-253 253 253 253 253 253 253 253 253 253 253 253
39520-253 253 253 246 246 246 46 46 46 38 38 38
39521- 42 42 42 14 14 14 38 38 38 14 14 14
39522- 2 2 6 2 2 6 2 2 6 6 6 6
39523- 86 86 86 46 46 46 14 14 14 0 0 0
39524- 0 0 0 0 0 0 0 0 0 0 0 0
39525- 0 0 0 0 0 0 0 0 0 0 0 0
39526- 0 0 0 0 0 0 0 0 0 0 0 0
39527- 0 0 0 0 0 0 0 0 0 0 0 0
39528- 0 0 0 0 0 0 0 0 0 0 0 0
39529- 0 0 0 0 0 0 0 0 0 0 0 0
39530- 0 0 0 0 0 0 0 0 0 0 0 0
39531- 0 0 0 6 6 6 14 14 14 42 42 42
39532- 90 90 90 18 18 18 18 18 18 26 26 26
39533- 2 2 6 116 116 116 253 253 253 253 253 253
39534-253 253 253 253 253 253 253 253 253 253 253 253
39535-253 253 253 253 253 253 250 250 250 238 238 238
39536-253 253 253 253 253 253 253 253 253 253 253 253
39537-253 253 253 253 253 253 253 253 253 253 253 253
39538-253 253 253 253 253 253 253 253 253 253 253 253
39539-253 253 253 253 253 253 253 253 253 253 253 253
39540-253 253 253 253 253 253 94 94 94 6 6 6
39541- 2 2 6 2 2 6 10 10 10 34 34 34
39542- 2 2 6 2 2 6 2 2 6 2 2 6
39543- 74 74 74 58 58 58 22 22 22 6 6 6
39544- 0 0 0 0 0 0 0 0 0 0 0 0
39545- 0 0 0 0 0 0 0 0 0 0 0 0
39546- 0 0 0 0 0 0 0 0 0 0 0 0
39547- 0 0 0 0 0 0 0 0 0 0 0 0
39548- 0 0 0 0 0 0 0 0 0 0 0 0
39549- 0 0 0 0 0 0 0 0 0 0 0 0
39550- 0 0 0 0 0 0 0 0 0 0 0 0
39551- 0 0 0 10 10 10 26 26 26 66 66 66
39552- 82 82 82 2 2 6 38 38 38 6 6 6
39553- 14 14 14 210 210 210 253 253 253 253 253 253
39554-253 253 253 253 253 253 253 253 253 253 253 253
39555-253 253 253 253 253 253 246 246 246 242 242 242
39556-253 253 253 253 253 253 253 253 253 253 253 253
39557-253 253 253 253 253 253 253 253 253 253 253 253
39558-253 253 253 253 253 253 253 253 253 253 253 253
39559-253 253 253 253 253 253 253 253 253 253 253 253
39560-253 253 253 253 253 253 144 144 144 2 2 6
39561- 2 2 6 2 2 6 2 2 6 46 46 46
39562- 2 2 6 2 2 6 2 2 6 2 2 6
39563- 42 42 42 74 74 74 30 30 30 10 10 10
39564- 0 0 0 0 0 0 0 0 0 0 0 0
39565- 0 0 0 0 0 0 0 0 0 0 0 0
39566- 0 0 0 0 0 0 0 0 0 0 0 0
39567- 0 0 0 0 0 0 0 0 0 0 0 0
39568- 0 0 0 0 0 0 0 0 0 0 0 0
39569- 0 0 0 0 0 0 0 0 0 0 0 0
39570- 0 0 0 0 0 0 0 0 0 0 0 0
39571- 6 6 6 14 14 14 42 42 42 90 90 90
39572- 26 26 26 6 6 6 42 42 42 2 2 6
39573- 74 74 74 250 250 250 253 253 253 253 253 253
39574-253 253 253 253 253 253 253 253 253 253 253 253
39575-253 253 253 253 253 253 242 242 242 242 242 242
39576-253 253 253 253 253 253 253 253 253 253 253 253
39577-253 253 253 253 253 253 253 253 253 253 253 253
39578-253 253 253 253 253 253 253 253 253 253 253 253
39579-253 253 253 253 253 253 253 253 253 253 253 253
39580-253 253 253 253 253 253 182 182 182 2 2 6
39581- 2 2 6 2 2 6 2 2 6 46 46 46
39582- 2 2 6 2 2 6 2 2 6 2 2 6
39583- 10 10 10 86 86 86 38 38 38 10 10 10
39584- 0 0 0 0 0 0 0 0 0 0 0 0
39585- 0 0 0 0 0 0 0 0 0 0 0 0
39586- 0 0 0 0 0 0 0 0 0 0 0 0
39587- 0 0 0 0 0 0 0 0 0 0 0 0
39588- 0 0 0 0 0 0 0 0 0 0 0 0
39589- 0 0 0 0 0 0 0 0 0 0 0 0
39590- 0 0 0 0 0 0 0 0 0 0 0 0
39591- 10 10 10 26 26 26 66 66 66 82 82 82
39592- 2 2 6 22 22 22 18 18 18 2 2 6
39593-149 149 149 253 253 253 253 253 253 253 253 253
39594-253 253 253 253 253 253 253 253 253 253 253 253
39595-253 253 253 253 253 253 234 234 234 242 242 242
39596-253 253 253 253 253 253 253 253 253 253 253 253
39597-253 253 253 253 253 253 253 253 253 253 253 253
39598-253 253 253 253 253 253 253 253 253 253 253 253
39599-253 253 253 253 253 253 253 253 253 253 253 253
39600-253 253 253 253 253 253 206 206 206 2 2 6
39601- 2 2 6 2 2 6 2 2 6 38 38 38
39602- 2 2 6 2 2 6 2 2 6 2 2 6
39603- 6 6 6 86 86 86 46 46 46 14 14 14
39604- 0 0 0 0 0 0 0 0 0 0 0 0
39605- 0 0 0 0 0 0 0 0 0 0 0 0
39606- 0 0 0 0 0 0 0 0 0 0 0 0
39607- 0 0 0 0 0 0 0 0 0 0 0 0
39608- 0 0 0 0 0 0 0 0 0 0 0 0
39609- 0 0 0 0 0 0 0 0 0 0 0 0
39610- 0 0 0 0 0 0 0 0 0 6 6 6
39611- 18 18 18 46 46 46 86 86 86 18 18 18
39612- 2 2 6 34 34 34 10 10 10 6 6 6
39613-210 210 210 253 253 253 253 253 253 253 253 253
39614-253 253 253 253 253 253 253 253 253 253 253 253
39615-253 253 253 253 253 253 234 234 234 242 242 242
39616-253 253 253 253 253 253 253 253 253 253 253 253
39617-253 253 253 253 253 253 253 253 253 253 253 253
39618-253 253 253 253 253 253 253 253 253 253 253 253
39619-253 253 253 253 253 253 253 253 253 253 253 253
39620-253 253 253 253 253 253 221 221 221 6 6 6
39621- 2 2 6 2 2 6 6 6 6 30 30 30
39622- 2 2 6 2 2 6 2 2 6 2 2 6
39623- 2 2 6 82 82 82 54 54 54 18 18 18
39624- 6 6 6 0 0 0 0 0 0 0 0 0
39625- 0 0 0 0 0 0 0 0 0 0 0 0
39626- 0 0 0 0 0 0 0 0 0 0 0 0
39627- 0 0 0 0 0 0 0 0 0 0 0 0
39628- 0 0 0 0 0 0 0 0 0 0 0 0
39629- 0 0 0 0 0 0 0 0 0 0 0 0
39630- 0 0 0 0 0 0 0 0 0 10 10 10
39631- 26 26 26 66 66 66 62 62 62 2 2 6
39632- 2 2 6 38 38 38 10 10 10 26 26 26
39633-238 238 238 253 253 253 253 253 253 253 253 253
39634-253 253 253 253 253 253 253 253 253 253 253 253
39635-253 253 253 253 253 253 231 231 231 238 238 238
39636-253 253 253 253 253 253 253 253 253 253 253 253
39637-253 253 253 253 253 253 253 253 253 253 253 253
39638-253 253 253 253 253 253 253 253 253 253 253 253
39639-253 253 253 253 253 253 253 253 253 253 253 253
39640-253 253 253 253 253 253 231 231 231 6 6 6
39641- 2 2 6 2 2 6 10 10 10 30 30 30
39642- 2 2 6 2 2 6 2 2 6 2 2 6
39643- 2 2 6 66 66 66 58 58 58 22 22 22
39644- 6 6 6 0 0 0 0 0 0 0 0 0
39645- 0 0 0 0 0 0 0 0 0 0 0 0
39646- 0 0 0 0 0 0 0 0 0 0 0 0
39647- 0 0 0 0 0 0 0 0 0 0 0 0
39648- 0 0 0 0 0 0 0 0 0 0 0 0
39649- 0 0 0 0 0 0 0 0 0 0 0 0
39650- 0 0 0 0 0 0 0 0 0 10 10 10
39651- 38 38 38 78 78 78 6 6 6 2 2 6
39652- 2 2 6 46 46 46 14 14 14 42 42 42
39653-246 246 246 253 253 253 253 253 253 253 253 253
39654-253 253 253 253 253 253 253 253 253 253 253 253
39655-253 253 253 253 253 253 231 231 231 242 242 242
39656-253 253 253 253 253 253 253 253 253 253 253 253
39657-253 253 253 253 253 253 253 253 253 253 253 253
39658-253 253 253 253 253 253 253 253 253 253 253 253
39659-253 253 253 253 253 253 253 253 253 253 253 253
39660-253 253 253 253 253 253 234 234 234 10 10 10
39661- 2 2 6 2 2 6 22 22 22 14 14 14
39662- 2 2 6 2 2 6 2 2 6 2 2 6
39663- 2 2 6 66 66 66 62 62 62 22 22 22
39664- 6 6 6 0 0 0 0 0 0 0 0 0
39665- 0 0 0 0 0 0 0 0 0 0 0 0
39666- 0 0 0 0 0 0 0 0 0 0 0 0
39667- 0 0 0 0 0 0 0 0 0 0 0 0
39668- 0 0 0 0 0 0 0 0 0 0 0 0
39669- 0 0 0 0 0 0 0 0 0 0 0 0
39670- 0 0 0 0 0 0 6 6 6 18 18 18
39671- 50 50 50 74 74 74 2 2 6 2 2 6
39672- 14 14 14 70 70 70 34 34 34 62 62 62
39673-250 250 250 253 253 253 253 253 253 253 253 253
39674-253 253 253 253 253 253 253 253 253 253 253 253
39675-253 253 253 253 253 253 231 231 231 246 246 246
39676-253 253 253 253 253 253 253 253 253 253 253 253
39677-253 253 253 253 253 253 253 253 253 253 253 253
39678-253 253 253 253 253 253 253 253 253 253 253 253
39679-253 253 253 253 253 253 253 253 253 253 253 253
39680-253 253 253 253 253 253 234 234 234 14 14 14
39681- 2 2 6 2 2 6 30 30 30 2 2 6
39682- 2 2 6 2 2 6 2 2 6 2 2 6
39683- 2 2 6 66 66 66 62 62 62 22 22 22
39684- 6 6 6 0 0 0 0 0 0 0 0 0
39685- 0 0 0 0 0 0 0 0 0 0 0 0
39686- 0 0 0 0 0 0 0 0 0 0 0 0
39687- 0 0 0 0 0 0 0 0 0 0 0 0
39688- 0 0 0 0 0 0 0 0 0 0 0 0
39689- 0 0 0 0 0 0 0 0 0 0 0 0
39690- 0 0 0 0 0 0 6 6 6 18 18 18
39691- 54 54 54 62 62 62 2 2 6 2 2 6
39692- 2 2 6 30 30 30 46 46 46 70 70 70
39693-250 250 250 253 253 253 253 253 253 253 253 253
39694-253 253 253 253 253 253 253 253 253 253 253 253
39695-253 253 253 253 253 253 231 231 231 246 246 246
39696-253 253 253 253 253 253 253 253 253 253 253 253
39697-253 253 253 253 253 253 253 253 253 253 253 253
39698-253 253 253 253 253 253 253 253 253 253 253 253
39699-253 253 253 253 253 253 253 253 253 253 253 253
39700-253 253 253 253 253 253 226 226 226 10 10 10
39701- 2 2 6 6 6 6 30 30 30 2 2 6
39702- 2 2 6 2 2 6 2 2 6 2 2 6
39703- 2 2 6 66 66 66 58 58 58 22 22 22
39704- 6 6 6 0 0 0 0 0 0 0 0 0
39705- 0 0 0 0 0 0 0 0 0 0 0 0
39706- 0 0 0 0 0 0 0 0 0 0 0 0
39707- 0 0 0 0 0 0 0 0 0 0 0 0
39708- 0 0 0 0 0 0 0 0 0 0 0 0
39709- 0 0 0 0 0 0 0 0 0 0 0 0
39710- 0 0 0 0 0 0 6 6 6 22 22 22
39711- 58 58 58 62 62 62 2 2 6 2 2 6
39712- 2 2 6 2 2 6 30 30 30 78 78 78
39713-250 250 250 253 253 253 253 253 253 253 253 253
39714-253 253 253 253 253 253 253 253 253 253 253 253
39715-253 253 253 253 253 253 231 231 231 246 246 246
39716-253 253 253 253 253 253 253 253 253 253 253 253
39717-253 253 253 253 253 253 253 253 253 253 253 253
39718-253 253 253 253 253 253 253 253 253 253 253 253
39719-253 253 253 253 253 253 253 253 253 253 253 253
39720-253 253 253 253 253 253 206 206 206 2 2 6
39721- 22 22 22 34 34 34 18 14 6 22 22 22
39722- 26 26 26 18 18 18 6 6 6 2 2 6
39723- 2 2 6 82 82 82 54 54 54 18 18 18
39724- 6 6 6 0 0 0 0 0 0 0 0 0
39725- 0 0 0 0 0 0 0 0 0 0 0 0
39726- 0 0 0 0 0 0 0 0 0 0 0 0
39727- 0 0 0 0 0 0 0 0 0 0 0 0
39728- 0 0 0 0 0 0 0 0 0 0 0 0
39729- 0 0 0 0 0 0 0 0 0 0 0 0
39730- 0 0 0 0 0 0 6 6 6 26 26 26
39731- 62 62 62 106 106 106 74 54 14 185 133 11
39732-210 162 10 121 92 8 6 6 6 62 62 62
39733-238 238 238 253 253 253 253 253 253 253 253 253
39734-253 253 253 253 253 253 253 253 253 253 253 253
39735-253 253 253 253 253 253 231 231 231 246 246 246
39736-253 253 253 253 253 253 253 253 253 253 253 253
39737-253 253 253 253 253 253 253 253 253 253 253 253
39738-253 253 253 253 253 253 253 253 253 253 253 253
39739-253 253 253 253 253 253 253 253 253 253 253 253
39740-253 253 253 253 253 253 158 158 158 18 18 18
39741- 14 14 14 2 2 6 2 2 6 2 2 6
39742- 6 6 6 18 18 18 66 66 66 38 38 38
39743- 6 6 6 94 94 94 50 50 50 18 18 18
39744- 6 6 6 0 0 0 0 0 0 0 0 0
39745- 0 0 0 0 0 0 0 0 0 0 0 0
39746- 0 0 0 0 0 0 0 0 0 0 0 0
39747- 0 0 0 0 0 0 0 0 0 0 0 0
39748- 0 0 0 0 0 0 0 0 0 0 0 0
39749- 0 0 0 0 0 0 0 0 0 6 6 6
39750- 10 10 10 10 10 10 18 18 18 38 38 38
39751- 78 78 78 142 134 106 216 158 10 242 186 14
39752-246 190 14 246 190 14 156 118 10 10 10 10
39753- 90 90 90 238 238 238 253 253 253 253 253 253
39754-253 253 253 253 253 253 253 253 253 253 253 253
39755-253 253 253 253 253 253 231 231 231 250 250 250
39756-253 253 253 253 253 253 253 253 253 253 253 253
39757-253 253 253 253 253 253 253 253 253 253 253 253
39758-253 253 253 253 253 253 253 253 253 253 253 253
39759-253 253 253 253 253 253 253 253 253 246 230 190
39760-238 204 91 238 204 91 181 142 44 37 26 9
39761- 2 2 6 2 2 6 2 2 6 2 2 6
39762- 2 2 6 2 2 6 38 38 38 46 46 46
39763- 26 26 26 106 106 106 54 54 54 18 18 18
39764- 6 6 6 0 0 0 0 0 0 0 0 0
39765- 0 0 0 0 0 0 0 0 0 0 0 0
39766- 0 0 0 0 0 0 0 0 0 0 0 0
39767- 0 0 0 0 0 0 0 0 0 0 0 0
39768- 0 0 0 0 0 0 0 0 0 0 0 0
39769- 0 0 0 6 6 6 14 14 14 22 22 22
39770- 30 30 30 38 38 38 50 50 50 70 70 70
39771-106 106 106 190 142 34 226 170 11 242 186 14
39772-246 190 14 246 190 14 246 190 14 154 114 10
39773- 6 6 6 74 74 74 226 226 226 253 253 253
39774-253 253 253 253 253 253 253 253 253 253 253 253
39775-253 253 253 253 253 253 231 231 231 250 250 250
39776-253 253 253 253 253 253 253 253 253 253 253 253
39777-253 253 253 253 253 253 253 253 253 253 253 253
39778-253 253 253 253 253 253 253 253 253 253 253 253
39779-253 253 253 253 253 253 253 253 253 228 184 62
39780-241 196 14 241 208 19 232 195 16 38 30 10
39781- 2 2 6 2 2 6 2 2 6 2 2 6
39782- 2 2 6 6 6 6 30 30 30 26 26 26
39783-203 166 17 154 142 90 66 66 66 26 26 26
39784- 6 6 6 0 0 0 0 0 0 0 0 0
39785- 0 0 0 0 0 0 0 0 0 0 0 0
39786- 0 0 0 0 0 0 0 0 0 0 0 0
39787- 0 0 0 0 0 0 0 0 0 0 0 0
39788- 0 0 0 0 0 0 0 0 0 0 0 0
39789- 6 6 6 18 18 18 38 38 38 58 58 58
39790- 78 78 78 86 86 86 101 101 101 123 123 123
39791-175 146 61 210 150 10 234 174 13 246 186 14
39792-246 190 14 246 190 14 246 190 14 238 190 10
39793-102 78 10 2 2 6 46 46 46 198 198 198
39794-253 253 253 253 253 253 253 253 253 253 253 253
39795-253 253 253 253 253 253 234 234 234 242 242 242
39796-253 253 253 253 253 253 253 253 253 253 253 253
39797-253 253 253 253 253 253 253 253 253 253 253 253
39798-253 253 253 253 253 253 253 253 253 253 253 253
39799-253 253 253 253 253 253 253 253 253 224 178 62
39800-242 186 14 241 196 14 210 166 10 22 18 6
39801- 2 2 6 2 2 6 2 2 6 2 2 6
39802- 2 2 6 2 2 6 6 6 6 121 92 8
39803-238 202 15 232 195 16 82 82 82 34 34 34
39804- 10 10 10 0 0 0 0 0 0 0 0 0
39805- 0 0 0 0 0 0 0 0 0 0 0 0
39806- 0 0 0 0 0 0 0 0 0 0 0 0
39807- 0 0 0 0 0 0 0 0 0 0 0 0
39808- 0 0 0 0 0 0 0 0 0 0 0 0
39809- 14 14 14 38 38 38 70 70 70 154 122 46
39810-190 142 34 200 144 11 197 138 11 197 138 11
39811-213 154 11 226 170 11 242 186 14 246 190 14
39812-246 190 14 246 190 14 246 190 14 246 190 14
39813-225 175 15 46 32 6 2 2 6 22 22 22
39814-158 158 158 250 250 250 253 253 253 253 253 253
39815-253 253 253 253 253 253 253 253 253 253 253 253
39816-253 253 253 253 253 253 253 253 253 253 253 253
39817-253 253 253 253 253 253 253 253 253 253 253 253
39818-253 253 253 253 253 253 253 253 253 253 253 253
39819-253 253 253 250 250 250 242 242 242 224 178 62
39820-239 182 13 236 186 11 213 154 11 46 32 6
39821- 2 2 6 2 2 6 2 2 6 2 2 6
39822- 2 2 6 2 2 6 61 42 6 225 175 15
39823-238 190 10 236 186 11 112 100 78 42 42 42
39824- 14 14 14 0 0 0 0 0 0 0 0 0
39825- 0 0 0 0 0 0 0 0 0 0 0 0
39826- 0 0 0 0 0 0 0 0 0 0 0 0
39827- 0 0 0 0 0 0 0 0 0 0 0 0
39828- 0 0 0 0 0 0 0 0 0 6 6 6
39829- 22 22 22 54 54 54 154 122 46 213 154 11
39830-226 170 11 230 174 11 226 170 11 226 170 11
39831-236 178 12 242 186 14 246 190 14 246 190 14
39832-246 190 14 246 190 14 246 190 14 246 190 14
39833-241 196 14 184 144 12 10 10 10 2 2 6
39834- 6 6 6 116 116 116 242 242 242 253 253 253
39835-253 253 253 253 253 253 253 253 253 253 253 253
39836-253 253 253 253 253 253 253 253 253 253 253 253
39837-253 253 253 253 253 253 253 253 253 253 253 253
39838-253 253 253 253 253 253 253 253 253 253 253 253
39839-253 253 253 231 231 231 198 198 198 214 170 54
39840-236 178 12 236 178 12 210 150 10 137 92 6
39841- 18 14 6 2 2 6 2 2 6 2 2 6
39842- 6 6 6 70 47 6 200 144 11 236 178 12
39843-239 182 13 239 182 13 124 112 88 58 58 58
39844- 22 22 22 6 6 6 0 0 0 0 0 0
39845- 0 0 0 0 0 0 0 0 0 0 0 0
39846- 0 0 0 0 0 0 0 0 0 0 0 0
39847- 0 0 0 0 0 0 0 0 0 0 0 0
39848- 0 0 0 0 0 0 0 0 0 10 10 10
39849- 30 30 30 70 70 70 180 133 36 226 170 11
39850-239 182 13 242 186 14 242 186 14 246 186 14
39851-246 190 14 246 190 14 246 190 14 246 190 14
39852-246 190 14 246 190 14 246 190 14 246 190 14
39853-246 190 14 232 195 16 98 70 6 2 2 6
39854- 2 2 6 2 2 6 66 66 66 221 221 221
39855-253 253 253 253 253 253 253 253 253 253 253 253
39856-253 253 253 253 253 253 253 253 253 253 253 253
39857-253 253 253 253 253 253 253 253 253 253 253 253
39858-253 253 253 253 253 253 253 253 253 253 253 253
39859-253 253 253 206 206 206 198 198 198 214 166 58
39860-230 174 11 230 174 11 216 158 10 192 133 9
39861-163 110 8 116 81 8 102 78 10 116 81 8
39862-167 114 7 197 138 11 226 170 11 239 182 13
39863-242 186 14 242 186 14 162 146 94 78 78 78
39864- 34 34 34 14 14 14 6 6 6 0 0 0
39865- 0 0 0 0 0 0 0 0 0 0 0 0
39866- 0 0 0 0 0 0 0 0 0 0 0 0
39867- 0 0 0 0 0 0 0 0 0 0 0 0
39868- 0 0 0 0 0 0 0 0 0 6 6 6
39869- 30 30 30 78 78 78 190 142 34 226 170 11
39870-239 182 13 246 190 14 246 190 14 246 190 14
39871-246 190 14 246 190 14 246 190 14 246 190 14
39872-246 190 14 246 190 14 246 190 14 246 190 14
39873-246 190 14 241 196 14 203 166 17 22 18 6
39874- 2 2 6 2 2 6 2 2 6 38 38 38
39875-218 218 218 253 253 253 253 253 253 253 253 253
39876-253 253 253 253 253 253 253 253 253 253 253 253
39877-253 253 253 253 253 253 253 253 253 253 253 253
39878-253 253 253 253 253 253 253 253 253 253 253 253
39879-250 250 250 206 206 206 198 198 198 202 162 69
39880-226 170 11 236 178 12 224 166 10 210 150 10
39881-200 144 11 197 138 11 192 133 9 197 138 11
39882-210 150 10 226 170 11 242 186 14 246 190 14
39883-246 190 14 246 186 14 225 175 15 124 112 88
39884- 62 62 62 30 30 30 14 14 14 6 6 6
39885- 0 0 0 0 0 0 0 0 0 0 0 0
39886- 0 0 0 0 0 0 0 0 0 0 0 0
39887- 0 0 0 0 0 0 0 0 0 0 0 0
39888- 0 0 0 0 0 0 0 0 0 10 10 10
39889- 30 30 30 78 78 78 174 135 50 224 166 10
39890-239 182 13 246 190 14 246 190 14 246 190 14
39891-246 190 14 246 190 14 246 190 14 246 190 14
39892-246 190 14 246 190 14 246 190 14 246 190 14
39893-246 190 14 246 190 14 241 196 14 139 102 15
39894- 2 2 6 2 2 6 2 2 6 2 2 6
39895- 78 78 78 250 250 250 253 253 253 253 253 253
39896-253 253 253 253 253 253 253 253 253 253 253 253
39897-253 253 253 253 253 253 253 253 253 253 253 253
39898-253 253 253 253 253 253 253 253 253 253 253 253
39899-250 250 250 214 214 214 198 198 198 190 150 46
39900-219 162 10 236 178 12 234 174 13 224 166 10
39901-216 158 10 213 154 11 213 154 11 216 158 10
39902-226 170 11 239 182 13 246 190 14 246 190 14
39903-246 190 14 246 190 14 242 186 14 206 162 42
39904-101 101 101 58 58 58 30 30 30 14 14 14
39905- 6 6 6 0 0 0 0 0 0 0 0 0
39906- 0 0 0 0 0 0 0 0 0 0 0 0
39907- 0 0 0 0 0 0 0 0 0 0 0 0
39908- 0 0 0 0 0 0 0 0 0 10 10 10
39909- 30 30 30 74 74 74 174 135 50 216 158 10
39910-236 178 12 246 190 14 246 190 14 246 190 14
39911-246 190 14 246 190 14 246 190 14 246 190 14
39912-246 190 14 246 190 14 246 190 14 246 190 14
39913-246 190 14 246 190 14 241 196 14 226 184 13
39914- 61 42 6 2 2 6 2 2 6 2 2 6
39915- 22 22 22 238 238 238 253 253 253 253 253 253
39916-253 253 253 253 253 253 253 253 253 253 253 253
39917-253 253 253 253 253 253 253 253 253 253 253 253
39918-253 253 253 253 253 253 253 253 253 253 253 253
39919-253 253 253 226 226 226 187 187 187 180 133 36
39920-216 158 10 236 178 12 239 182 13 236 178 12
39921-230 174 11 226 170 11 226 170 11 230 174 11
39922-236 178 12 242 186 14 246 190 14 246 190 14
39923-246 190 14 246 190 14 246 186 14 239 182 13
39924-206 162 42 106 106 106 66 66 66 34 34 34
39925- 14 14 14 6 6 6 0 0 0 0 0 0
39926- 0 0 0 0 0 0 0 0 0 0 0 0
39927- 0 0 0 0 0 0 0 0 0 0 0 0
39928- 0 0 0 0 0 0 0 0 0 6 6 6
39929- 26 26 26 70 70 70 163 133 67 213 154 11
39930-236 178 12 246 190 14 246 190 14 246 190 14
39931-246 190 14 246 190 14 246 190 14 246 190 14
39932-246 190 14 246 190 14 246 190 14 246 190 14
39933-246 190 14 246 190 14 246 190 14 241 196 14
39934-190 146 13 18 14 6 2 2 6 2 2 6
39935- 46 46 46 246 246 246 253 253 253 253 253 253
39936-253 253 253 253 253 253 253 253 253 253 253 253
39937-253 253 253 253 253 253 253 253 253 253 253 253
39938-253 253 253 253 253 253 253 253 253 253 253 253
39939-253 253 253 221 221 221 86 86 86 156 107 11
39940-216 158 10 236 178 12 242 186 14 246 186 14
39941-242 186 14 239 182 13 239 182 13 242 186 14
39942-242 186 14 246 186 14 246 190 14 246 190 14
39943-246 190 14 246 190 14 246 190 14 246 190 14
39944-242 186 14 225 175 15 142 122 72 66 66 66
39945- 30 30 30 10 10 10 0 0 0 0 0 0
39946- 0 0 0 0 0 0 0 0 0 0 0 0
39947- 0 0 0 0 0 0 0 0 0 0 0 0
39948- 0 0 0 0 0 0 0 0 0 6 6 6
39949- 26 26 26 70 70 70 163 133 67 210 150 10
39950-236 178 12 246 190 14 246 190 14 246 190 14
39951-246 190 14 246 190 14 246 190 14 246 190 14
39952-246 190 14 246 190 14 246 190 14 246 190 14
39953-246 190 14 246 190 14 246 190 14 246 190 14
39954-232 195 16 121 92 8 34 34 34 106 106 106
39955-221 221 221 253 253 253 253 253 253 253 253 253
39956-253 253 253 253 253 253 253 253 253 253 253 253
39957-253 253 253 253 253 253 253 253 253 253 253 253
39958-253 253 253 253 253 253 253 253 253 253 253 253
39959-242 242 242 82 82 82 18 14 6 163 110 8
39960-216 158 10 236 178 12 242 186 14 246 190 14
39961-246 190 14 246 190 14 246 190 14 246 190 14
39962-246 190 14 246 190 14 246 190 14 246 190 14
39963-246 190 14 246 190 14 246 190 14 246 190 14
39964-246 190 14 246 190 14 242 186 14 163 133 67
39965- 46 46 46 18 18 18 6 6 6 0 0 0
39966- 0 0 0 0 0 0 0 0 0 0 0 0
39967- 0 0 0 0 0 0 0 0 0 0 0 0
39968- 0 0 0 0 0 0 0 0 0 10 10 10
39969- 30 30 30 78 78 78 163 133 67 210 150 10
39970-236 178 12 246 186 14 246 190 14 246 190 14
39971-246 190 14 246 190 14 246 190 14 246 190 14
39972-246 190 14 246 190 14 246 190 14 246 190 14
39973-246 190 14 246 190 14 246 190 14 246 190 14
39974-241 196 14 215 174 15 190 178 144 253 253 253
39975-253 253 253 253 253 253 253 253 253 253 253 253
39976-253 253 253 253 253 253 253 253 253 253 253 253
39977-253 253 253 253 253 253 253 253 253 253 253 253
39978-253 253 253 253 253 253 253 253 253 218 218 218
39979- 58 58 58 2 2 6 22 18 6 167 114 7
39980-216 158 10 236 178 12 246 186 14 246 190 14
39981-246 190 14 246 190 14 246 190 14 246 190 14
39982-246 190 14 246 190 14 246 190 14 246 190 14
39983-246 190 14 246 190 14 246 190 14 246 190 14
39984-246 190 14 246 186 14 242 186 14 190 150 46
39985- 54 54 54 22 22 22 6 6 6 0 0 0
39986- 0 0 0 0 0 0 0 0 0 0 0 0
39987- 0 0 0 0 0 0 0 0 0 0 0 0
39988- 0 0 0 0 0 0 0 0 0 14 14 14
39989- 38 38 38 86 86 86 180 133 36 213 154 11
39990-236 178 12 246 186 14 246 190 14 246 190 14
39991-246 190 14 246 190 14 246 190 14 246 190 14
39992-246 190 14 246 190 14 246 190 14 246 190 14
39993-246 190 14 246 190 14 246 190 14 246 190 14
39994-246 190 14 232 195 16 190 146 13 214 214 214
39995-253 253 253 253 253 253 253 253 253 253 253 253
39996-253 253 253 253 253 253 253 253 253 253 253 253
39997-253 253 253 253 253 253 253 253 253 253 253 253
39998-253 253 253 250 250 250 170 170 170 26 26 26
39999- 2 2 6 2 2 6 37 26 9 163 110 8
40000-219 162 10 239 182 13 246 186 14 246 190 14
40001-246 190 14 246 190 14 246 190 14 246 190 14
40002-246 190 14 246 190 14 246 190 14 246 190 14
40003-246 190 14 246 190 14 246 190 14 246 190 14
40004-246 186 14 236 178 12 224 166 10 142 122 72
40005- 46 46 46 18 18 18 6 6 6 0 0 0
40006- 0 0 0 0 0 0 0 0 0 0 0 0
40007- 0 0 0 0 0 0 0 0 0 0 0 0
40008- 0 0 0 0 0 0 6 6 6 18 18 18
40009- 50 50 50 109 106 95 192 133 9 224 166 10
40010-242 186 14 246 190 14 246 190 14 246 190 14
40011-246 190 14 246 190 14 246 190 14 246 190 14
40012-246 190 14 246 190 14 246 190 14 246 190 14
40013-246 190 14 246 190 14 246 190 14 246 190 14
40014-242 186 14 226 184 13 210 162 10 142 110 46
40015-226 226 226 253 253 253 253 253 253 253 253 253
40016-253 253 253 253 253 253 253 253 253 253 253 253
40017-253 253 253 253 253 253 253 253 253 253 253 253
40018-198 198 198 66 66 66 2 2 6 2 2 6
40019- 2 2 6 2 2 6 50 34 6 156 107 11
40020-219 162 10 239 182 13 246 186 14 246 190 14
40021-246 190 14 246 190 14 246 190 14 246 190 14
40022-246 190 14 246 190 14 246 190 14 246 190 14
40023-246 190 14 246 190 14 246 190 14 242 186 14
40024-234 174 13 213 154 11 154 122 46 66 66 66
40025- 30 30 30 10 10 10 0 0 0 0 0 0
40026- 0 0 0 0 0 0 0 0 0 0 0 0
40027- 0 0 0 0 0 0 0 0 0 0 0 0
40028- 0 0 0 0 0 0 6 6 6 22 22 22
40029- 58 58 58 154 121 60 206 145 10 234 174 13
40030-242 186 14 246 186 14 246 190 14 246 190 14
40031-246 190 14 246 190 14 246 190 14 246 190 14
40032-246 190 14 246 190 14 246 190 14 246 190 14
40033-246 190 14 246 190 14 246 190 14 246 190 14
40034-246 186 14 236 178 12 210 162 10 163 110 8
40035- 61 42 6 138 138 138 218 218 218 250 250 250
40036-253 253 253 253 253 253 253 253 253 250 250 250
40037-242 242 242 210 210 210 144 144 144 66 66 66
40038- 6 6 6 2 2 6 2 2 6 2 2 6
40039- 2 2 6 2 2 6 61 42 6 163 110 8
40040-216 158 10 236 178 12 246 190 14 246 190 14
40041-246 190 14 246 190 14 246 190 14 246 190 14
40042-246 190 14 246 190 14 246 190 14 246 190 14
40043-246 190 14 239 182 13 230 174 11 216 158 10
40044-190 142 34 124 112 88 70 70 70 38 38 38
40045- 18 18 18 6 6 6 0 0 0 0 0 0
40046- 0 0 0 0 0 0 0 0 0 0 0 0
40047- 0 0 0 0 0 0 0 0 0 0 0 0
40048- 0 0 0 0 0 0 6 6 6 22 22 22
40049- 62 62 62 168 124 44 206 145 10 224 166 10
40050-236 178 12 239 182 13 242 186 14 242 186 14
40051-246 186 14 246 190 14 246 190 14 246 190 14
40052-246 190 14 246 190 14 246 190 14 246 190 14
40053-246 190 14 246 190 14 246 190 14 246 190 14
40054-246 190 14 236 178 12 216 158 10 175 118 6
40055- 80 54 7 2 2 6 6 6 6 30 30 30
40056- 54 54 54 62 62 62 50 50 50 38 38 38
40057- 14 14 14 2 2 6 2 2 6 2 2 6
40058- 2 2 6 2 2 6 2 2 6 2 2 6
40059- 2 2 6 6 6 6 80 54 7 167 114 7
40060-213 154 11 236 178 12 246 190 14 246 190 14
40061-246 190 14 246 190 14 246 190 14 246 190 14
40062-246 190 14 242 186 14 239 182 13 239 182 13
40063-230 174 11 210 150 10 174 135 50 124 112 88
40064- 82 82 82 54 54 54 34 34 34 18 18 18
40065- 6 6 6 0 0 0 0 0 0 0 0 0
40066- 0 0 0 0 0 0 0 0 0 0 0 0
40067- 0 0 0 0 0 0 0 0 0 0 0 0
40068- 0 0 0 0 0 0 6 6 6 18 18 18
40069- 50 50 50 158 118 36 192 133 9 200 144 11
40070-216 158 10 219 162 10 224 166 10 226 170 11
40071-230 174 11 236 178 12 239 182 13 239 182 13
40072-242 186 14 246 186 14 246 190 14 246 190 14
40073-246 190 14 246 190 14 246 190 14 246 190 14
40074-246 186 14 230 174 11 210 150 10 163 110 8
40075-104 69 6 10 10 10 2 2 6 2 2 6
40076- 2 2 6 2 2 6 2 2 6 2 2 6
40077- 2 2 6 2 2 6 2 2 6 2 2 6
40078- 2 2 6 2 2 6 2 2 6 2 2 6
40079- 2 2 6 6 6 6 91 60 6 167 114 7
40080-206 145 10 230 174 11 242 186 14 246 190 14
40081-246 190 14 246 190 14 246 186 14 242 186 14
40082-239 182 13 230 174 11 224 166 10 213 154 11
40083-180 133 36 124 112 88 86 86 86 58 58 58
40084- 38 38 38 22 22 22 10 10 10 6 6 6
40085- 0 0 0 0 0 0 0 0 0 0 0 0
40086- 0 0 0 0 0 0 0 0 0 0 0 0
40087- 0 0 0 0 0 0 0 0 0 0 0 0
40088- 0 0 0 0 0 0 0 0 0 14 14 14
40089- 34 34 34 70 70 70 138 110 50 158 118 36
40090-167 114 7 180 123 7 192 133 9 197 138 11
40091-200 144 11 206 145 10 213 154 11 219 162 10
40092-224 166 10 230 174 11 239 182 13 242 186 14
40093-246 186 14 246 186 14 246 186 14 246 186 14
40094-239 182 13 216 158 10 185 133 11 152 99 6
40095-104 69 6 18 14 6 2 2 6 2 2 6
40096- 2 2 6 2 2 6 2 2 6 2 2 6
40097- 2 2 6 2 2 6 2 2 6 2 2 6
40098- 2 2 6 2 2 6 2 2 6 2 2 6
40099- 2 2 6 6 6 6 80 54 7 152 99 6
40100-192 133 9 219 162 10 236 178 12 239 182 13
40101-246 186 14 242 186 14 239 182 13 236 178 12
40102-224 166 10 206 145 10 192 133 9 154 121 60
40103- 94 94 94 62 62 62 42 42 42 22 22 22
40104- 14 14 14 6 6 6 0 0 0 0 0 0
40105- 0 0 0 0 0 0 0 0 0 0 0 0
40106- 0 0 0 0 0 0 0 0 0 0 0 0
40107- 0 0 0 0 0 0 0 0 0 0 0 0
40108- 0 0 0 0 0 0 0 0 0 6 6 6
40109- 18 18 18 34 34 34 58 58 58 78 78 78
40110-101 98 89 124 112 88 142 110 46 156 107 11
40111-163 110 8 167 114 7 175 118 6 180 123 7
40112-185 133 11 197 138 11 210 150 10 219 162 10
40113-226 170 11 236 178 12 236 178 12 234 174 13
40114-219 162 10 197 138 11 163 110 8 130 83 6
40115- 91 60 6 10 10 10 2 2 6 2 2 6
40116- 18 18 18 38 38 38 38 38 38 38 38 38
40117- 38 38 38 38 38 38 38 38 38 38 38 38
40118- 38 38 38 38 38 38 26 26 26 2 2 6
40119- 2 2 6 6 6 6 70 47 6 137 92 6
40120-175 118 6 200 144 11 219 162 10 230 174 11
40121-234 174 13 230 174 11 219 162 10 210 150 10
40122-192 133 9 163 110 8 124 112 88 82 82 82
40123- 50 50 50 30 30 30 14 14 14 6 6 6
40124- 0 0 0 0 0 0 0 0 0 0 0 0
40125- 0 0 0 0 0 0 0 0 0 0 0 0
40126- 0 0 0 0 0 0 0 0 0 0 0 0
40127- 0 0 0 0 0 0 0 0 0 0 0 0
40128- 0 0 0 0 0 0 0 0 0 0 0 0
40129- 6 6 6 14 14 14 22 22 22 34 34 34
40130- 42 42 42 58 58 58 74 74 74 86 86 86
40131-101 98 89 122 102 70 130 98 46 121 87 25
40132-137 92 6 152 99 6 163 110 8 180 123 7
40133-185 133 11 197 138 11 206 145 10 200 144 11
40134-180 123 7 156 107 11 130 83 6 104 69 6
40135- 50 34 6 54 54 54 110 110 110 101 98 89
40136- 86 86 86 82 82 82 78 78 78 78 78 78
40137- 78 78 78 78 78 78 78 78 78 78 78 78
40138- 78 78 78 82 82 82 86 86 86 94 94 94
40139-106 106 106 101 101 101 86 66 34 124 80 6
40140-156 107 11 180 123 7 192 133 9 200 144 11
40141-206 145 10 200 144 11 192 133 9 175 118 6
40142-139 102 15 109 106 95 70 70 70 42 42 42
40143- 22 22 22 10 10 10 0 0 0 0 0 0
40144- 0 0 0 0 0 0 0 0 0 0 0 0
40145- 0 0 0 0 0 0 0 0 0 0 0 0
40146- 0 0 0 0 0 0 0 0 0 0 0 0
40147- 0 0 0 0 0 0 0 0 0 0 0 0
40148- 0 0 0 0 0 0 0 0 0 0 0 0
40149- 0 0 0 0 0 0 6 6 6 10 10 10
40150- 14 14 14 22 22 22 30 30 30 38 38 38
40151- 50 50 50 62 62 62 74 74 74 90 90 90
40152-101 98 89 112 100 78 121 87 25 124 80 6
40153-137 92 6 152 99 6 152 99 6 152 99 6
40154-138 86 6 124 80 6 98 70 6 86 66 30
40155-101 98 89 82 82 82 58 58 58 46 46 46
40156- 38 38 38 34 34 34 34 34 34 34 34 34
40157- 34 34 34 34 34 34 34 34 34 34 34 34
40158- 34 34 34 34 34 34 38 38 38 42 42 42
40159- 54 54 54 82 82 82 94 86 76 91 60 6
40160-134 86 6 156 107 11 167 114 7 175 118 6
40161-175 118 6 167 114 7 152 99 6 121 87 25
40162-101 98 89 62 62 62 34 34 34 18 18 18
40163- 6 6 6 0 0 0 0 0 0 0 0 0
40164- 0 0 0 0 0 0 0 0 0 0 0 0
40165- 0 0 0 0 0 0 0 0 0 0 0 0
40166- 0 0 0 0 0 0 0 0 0 0 0 0
40167- 0 0 0 0 0 0 0 0 0 0 0 0
40168- 0 0 0 0 0 0 0 0 0 0 0 0
40169- 0 0 0 0 0 0 0 0 0 0 0 0
40170- 0 0 0 6 6 6 6 6 6 10 10 10
40171- 18 18 18 22 22 22 30 30 30 42 42 42
40172- 50 50 50 66 66 66 86 86 86 101 98 89
40173-106 86 58 98 70 6 104 69 6 104 69 6
40174-104 69 6 91 60 6 82 62 34 90 90 90
40175- 62 62 62 38 38 38 22 22 22 14 14 14
40176- 10 10 10 10 10 10 10 10 10 10 10 10
40177- 10 10 10 10 10 10 6 6 6 10 10 10
40178- 10 10 10 10 10 10 10 10 10 14 14 14
40179- 22 22 22 42 42 42 70 70 70 89 81 66
40180- 80 54 7 104 69 6 124 80 6 137 92 6
40181-134 86 6 116 81 8 100 82 52 86 86 86
40182- 58 58 58 30 30 30 14 14 14 6 6 6
40183- 0 0 0 0 0 0 0 0 0 0 0 0
40184- 0 0 0 0 0 0 0 0 0 0 0 0
40185- 0 0 0 0 0 0 0 0 0 0 0 0
40186- 0 0 0 0 0 0 0 0 0 0 0 0
40187- 0 0 0 0 0 0 0 0 0 0 0 0
40188- 0 0 0 0 0 0 0 0 0 0 0 0
40189- 0 0 0 0 0 0 0 0 0 0 0 0
40190- 0 0 0 0 0 0 0 0 0 0 0 0
40191- 0 0 0 6 6 6 10 10 10 14 14 14
40192- 18 18 18 26 26 26 38 38 38 54 54 54
40193- 70 70 70 86 86 86 94 86 76 89 81 66
40194- 89 81 66 86 86 86 74 74 74 50 50 50
40195- 30 30 30 14 14 14 6 6 6 0 0 0
40196- 0 0 0 0 0 0 0 0 0 0 0 0
40197- 0 0 0 0 0 0 0 0 0 0 0 0
40198- 0 0 0 0 0 0 0 0 0 0 0 0
40199- 6 6 6 18 18 18 34 34 34 58 58 58
40200- 82 82 82 89 81 66 89 81 66 89 81 66
40201- 94 86 66 94 86 76 74 74 74 50 50 50
40202- 26 26 26 14 14 14 6 6 6 0 0 0
40203- 0 0 0 0 0 0 0 0 0 0 0 0
40204- 0 0 0 0 0 0 0 0 0 0 0 0
40205- 0 0 0 0 0 0 0 0 0 0 0 0
40206- 0 0 0 0 0 0 0 0 0 0 0 0
40207- 0 0 0 0 0 0 0 0 0 0 0 0
40208- 0 0 0 0 0 0 0 0 0 0 0 0
40209- 0 0 0 0 0 0 0 0 0 0 0 0
40210- 0 0 0 0 0 0 0 0 0 0 0 0
40211- 0 0 0 0 0 0 0 0 0 0 0 0
40212- 6 6 6 6 6 6 14 14 14 18 18 18
40213- 30 30 30 38 38 38 46 46 46 54 54 54
40214- 50 50 50 42 42 42 30 30 30 18 18 18
40215- 10 10 10 0 0 0 0 0 0 0 0 0
40216- 0 0 0 0 0 0 0 0 0 0 0 0
40217- 0 0 0 0 0 0 0 0 0 0 0 0
40218- 0 0 0 0 0 0 0 0 0 0 0 0
40219- 0 0 0 6 6 6 14 14 14 26 26 26
40220- 38 38 38 50 50 50 58 58 58 58 58 58
40221- 54 54 54 42 42 42 30 30 30 18 18 18
40222- 10 10 10 0 0 0 0 0 0 0 0 0
40223- 0 0 0 0 0 0 0 0 0 0 0 0
40224- 0 0 0 0 0 0 0 0 0 0 0 0
40225- 0 0 0 0 0 0 0 0 0 0 0 0
40226- 0 0 0 0 0 0 0 0 0 0 0 0
40227- 0 0 0 0 0 0 0 0 0 0 0 0
40228- 0 0 0 0 0 0 0 0 0 0 0 0
40229- 0 0 0 0 0 0 0 0 0 0 0 0
40230- 0 0 0 0 0 0 0 0 0 0 0 0
40231- 0 0 0 0 0 0 0 0 0 0 0 0
40232- 0 0 0 0 0 0 0 0 0 6 6 6
40233- 6 6 6 10 10 10 14 14 14 18 18 18
40234- 18 18 18 14 14 14 10 10 10 6 6 6
40235- 0 0 0 0 0 0 0 0 0 0 0 0
40236- 0 0 0 0 0 0 0 0 0 0 0 0
40237- 0 0 0 0 0 0 0 0 0 0 0 0
40238- 0 0 0 0 0 0 0 0 0 0 0 0
40239- 0 0 0 0 0 0 0 0 0 6 6 6
40240- 14 14 14 18 18 18 22 22 22 22 22 22
40241- 18 18 18 14 14 14 10 10 10 6 6 6
40242- 0 0 0 0 0 0 0 0 0 0 0 0
40243- 0 0 0 0 0 0 0 0 0 0 0 0
40244- 0 0 0 0 0 0 0 0 0 0 0 0
40245- 0 0 0 0 0 0 0 0 0 0 0 0
40246- 0 0 0 0 0 0 0 0 0 0 0 0
40247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40260+4 4 4 4 4 4
40261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40274+4 4 4 4 4 4
40275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40288+4 4 4 4 4 4
40289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40302+4 4 4 4 4 4
40303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40316+4 4 4 4 4 4
40317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40320+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40330+4 4 4 4 4 4
40331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40333+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40334+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40335+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
40336+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
40337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40340+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
40341+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40342+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
40343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40344+4 4 4 4 4 4
40345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40347+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40348+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40349+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
40350+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
40351+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40354+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
40355+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
40356+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
40357+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40358+4 4 4 4 4 4
40359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40360+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40361+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40362+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40363+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
40364+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
40365+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40367+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40368+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
40369+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
40370+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
40371+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
40372+4 4 4 4 4 4
40373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40374+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40375+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40376+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
40377+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
40378+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
40379+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
40380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40381+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40382+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
40383+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
40384+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
40385+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
40386+4 4 4 4 4 4
40387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40388+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40389+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40390+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
40391+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
40392+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
40393+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
40394+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40395+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
40396+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
40397+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
40398+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
40399+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
40400+4 4 4 4 4 4
40401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40404+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
40405+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
40406+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
40407+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
40408+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40409+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
40410+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
40411+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
40412+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
40413+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
40414+4 4 4 4 4 4
40415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40417+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
40418+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
40419+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
40420+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
40421+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
40422+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
40423+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
40424+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
40425+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
40426+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
40427+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
40428+4 4 4 4 4 4
40429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40431+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
40432+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
40433+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
40434+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
40435+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
40436+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
40437+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
40438+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
40439+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
40440+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
40441+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
40442+4 4 4 4 4 4
40443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40445+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
40446+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
40447+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
40448+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
40449+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
40450+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
40451+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
40452+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
40453+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
40454+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
40455+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40456+4 4 4 4 4 4
40457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40459+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
40460+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
40461+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
40462+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
40463+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
40464+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
40465+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
40466+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
40467+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
40468+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
40469+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
40470+4 4 4 4 4 4
40471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40472+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
40473+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
40474+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
40475+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
40476+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
40477+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
40478+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
40479+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
40480+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
40481+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
40482+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
40483+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
40484+4 4 4 4 4 4
40485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40486+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
40487+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
40488+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
40489+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40490+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
40491+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
40492+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
40493+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
40494+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
40495+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
40496+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
40497+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
40498+0 0 0 4 4 4
40499+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40500+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
40501+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
40502+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
40503+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
40504+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
40505+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
40506+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
40507+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
40508+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
40509+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
40510+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
40511+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
40512+2 0 0 0 0 0
40513+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
40514+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
40515+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
40516+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
40517+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
40518+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
40519+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
40520+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
40521+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
40522+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
40523+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
40524+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
40525+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
40526+37 38 37 0 0 0
40527+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40528+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
40529+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
40530+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
40531+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
40532+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
40533+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
40534+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
40535+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
40536+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
40537+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
40538+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
40539+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
40540+85 115 134 4 0 0
40541+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
40542+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
40543+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
40544+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
40545+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
40546+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
40547+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
40548+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
40549+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
40550+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
40551+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
40552+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
40553+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
40554+60 73 81 4 0 0
40555+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
40556+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
40557+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
40558+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
40559+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
40560+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
40561+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
40562+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
40563+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
40564+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
40565+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
40566+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
40567+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
40568+16 19 21 4 0 0
40569+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
40570+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
40571+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
40572+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
40573+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
40574+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
40575+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
40576+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
40577+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
40578+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
40579+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
40580+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
40581+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
40582+4 0 0 4 3 3
40583+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
40584+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
40585+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
40586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
40587+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
40588+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
40589+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
40590+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
40591+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
40592+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
40593+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
40594+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
40595+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
40596+3 2 2 4 4 4
40597+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
40598+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
40599+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
40600+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40601+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
40602+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
40603+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
40604+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
40605+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
40606+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
40607+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
40608+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
40609+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
40610+4 4 4 4 4 4
40611+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
40612+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
40613+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
40614+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
40615+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
40616+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
40617+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
40618+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
40619+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
40620+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
40621+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
40622+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
40623+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
40624+4 4 4 4 4 4
40625+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
40626+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
40627+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
40628+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
40629+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
40630+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40631+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
40632+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
40633+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
40634+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
40635+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
40636+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
40637+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
40638+5 5 5 5 5 5
40639+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
40640+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
40641+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
40642+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
40643+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
40644+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40645+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
40646+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
40647+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
40648+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
40649+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
40650+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
40651+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40652+5 5 5 4 4 4
40653+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
40654+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
40655+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
40656+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
40657+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40658+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
40659+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
40660+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
40661+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
40662+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
40663+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
40664+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40666+4 4 4 4 4 4
40667+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
40668+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
40669+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
40670+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
40671+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
40672+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40673+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40674+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
40675+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
40676+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
40677+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
40678+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
40679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40680+4 4 4 4 4 4
40681+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
40682+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
40683+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
40684+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
40685+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40686+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
40687+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
40688+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
40689+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
40690+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
40691+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
40692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40694+4 4 4 4 4 4
40695+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
40696+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
40697+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
40698+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
40699+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40700+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40701+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40702+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
40703+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
40704+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
40705+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
40706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40708+4 4 4 4 4 4
40709+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
40710+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
40711+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
40712+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
40713+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40714+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
40715+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40716+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
40717+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
40718+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
40719+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40722+4 4 4 4 4 4
40723+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
40724+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
40725+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
40726+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
40727+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40728+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
40729+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
40730+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
40731+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
40732+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
40733+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
40734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40736+4 4 4 4 4 4
40737+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
40738+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
40739+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
40740+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
40741+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40742+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
40743+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
40744+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
40745+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
40746+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
40747+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
40748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40750+4 4 4 4 4 4
40751+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
40752+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
40753+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
40754+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40755+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
40756+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
40757+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
40758+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
40759+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
40760+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
40761+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40764+4 4 4 4 4 4
40765+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
40766+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
40767+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
40768+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40769+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40770+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
40771+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
40772+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
40773+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
40774+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
40775+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40778+4 4 4 4 4 4
40779+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
40780+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
40781+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40782+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40783+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40784+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
40785+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
40786+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
40787+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
40788+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
40789+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40792+4 4 4 4 4 4
40793+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
40794+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
40795+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40796+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40797+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40798+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
40799+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
40800+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
40801+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40802+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40803+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40806+4 4 4 4 4 4
40807+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40808+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
40809+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40810+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
40811+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
40812+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
40813+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
40814+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
40815+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40816+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40817+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40820+4 4 4 4 4 4
40821+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40822+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
40823+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40824+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
40825+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40826+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
40827+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
40828+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
40829+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40830+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40831+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40834+4 4 4 4 4 4
40835+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
40836+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
40837+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40838+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
40839+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
40840+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
40841+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
40842+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
40843+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40844+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40845+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40848+4 4 4 4 4 4
40849+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
40850+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
40851+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40852+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
40853+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
40854+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
40855+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
40856+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
40857+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40858+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40859+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40862+4 4 4 4 4 4
40863+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40864+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
40865+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40866+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
40867+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
40868+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
40869+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
40870+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
40871+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40872+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40873+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40876+4 4 4 4 4 4
40877+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
40878+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
40879+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40880+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
40881+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
40882+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
40883+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
40884+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
40885+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
40886+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40887+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40890+4 4 4 4 4 4
40891+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40892+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
40893+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
40894+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
40895+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
40896+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
40897+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
40898+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
40899+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40900+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40901+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40904+4 4 4 4 4 4
40905+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40906+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
40907+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40908+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
40909+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
40910+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
40911+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
40912+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
40913+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40914+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40915+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40918+4 4 4 4 4 4
40919+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40920+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
40921+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
40922+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
40923+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
40924+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
40925+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40926+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
40927+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40928+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40929+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40932+4 4 4 4 4 4
40933+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40934+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
40935+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
40936+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40937+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
40938+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
40939+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40940+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
40941+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40942+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40943+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40945+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40946+4 4 4 4 4 4
40947+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40948+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
40949+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
40950+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
40951+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
40952+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
40953+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
40954+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
40955+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
40956+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40957+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40960+4 4 4 4 4 4
40961+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40962+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
40963+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
40964+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
40965+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
40966+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
40967+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
40968+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
40969+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
40970+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40971+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40974+4 4 4 4 4 4
40975+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
40976+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
40977+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
40978+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
40979+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40980+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
40981+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
40982+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
40983+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
40984+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40985+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40988+4 4 4 4 4 4
40989+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40990+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
40991+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
40992+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
40993+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
40994+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
40995+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
40996+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
40997+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
40998+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40999+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41002+4 4 4 4 4 4
41003+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
41004+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
41005+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
41006+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
41007+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
41008+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
41009+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
41010+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
41011+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
41012+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
41013+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41016+4 4 4 4 4 4
41017+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
41018+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41019+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
41020+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
41021+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
41022+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
41023+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
41024+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
41025+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
41026+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
41027+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41030+4 4 4 4 4 4
41031+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
41032+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41033+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
41034+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
41035+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
41036+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
41037+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41038+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
41039+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
41040+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
41041+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41044+4 4 4 4 4 4
41045+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
41046+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
41047+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
41048+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
41049+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
41050+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
41051+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
41052+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
41053+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
41054+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
41055+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41058+4 4 4 4 4 4
41059+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
41060+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
41061+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41062+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
41063+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
41064+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
41065+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
41066+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
41067+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
41068+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
41069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41072+4 4 4 4 4 4
41073+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41074+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
41075+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
41076+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
41077+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
41078+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
41079+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
41080+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
41081+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
41082+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41086+4 4 4 4 4 4
41087+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
41088+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
41089+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
41090+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
41091+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
41092+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
41093+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
41094+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
41095+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
41096+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41100+4 4 4 4 4 4
41101+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
41102+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
41103+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
41104+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
41105+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
41106+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
41107+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
41108+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
41109+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41110+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41114+4 4 4 4 4 4
41115+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
41116+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41117+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
41118+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41119+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
41120+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
41121+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
41122+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
41123+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
41124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41128+4 4 4 4 4 4
41129+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
41130+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
41131+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
41132+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
41133+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
41134+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
41135+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
41136+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
41137+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
41138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41142+4 4 4 4 4 4
41143+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41144+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
41145+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
41146+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
41147+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
41148+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
41149+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
41150+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
41151+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41156+4 4 4 4 4 4
41157+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
41158+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
41159+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41160+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
41161+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
41162+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
41163+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
41164+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
41165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41170+4 4 4 4 4 4
41171+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41172+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
41173+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
41174+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
41175+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
41176+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
41177+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
41178+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41184+4 4 4 4 4 4
41185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41186+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
41187+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41188+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
41189+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
41190+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
41191+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
41192+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
41193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41198+4 4 4 4 4 4
41199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41200+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
41201+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
41202+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
41203+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
41204+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
41205+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
41206+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
41207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41212+4 4 4 4 4 4
41213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41214+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
41215+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
41216+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41217+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
41218+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
41219+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
41220+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41226+4 4 4 4 4 4
41227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41229+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41230+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
41231+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
41232+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
41233+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
41234+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41240+4 4 4 4 4 4
41241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41244+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41245+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
41246+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
41247+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
41248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41254+4 4 4 4 4 4
41255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41258+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41259+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41260+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
41261+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
41262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41268+4 4 4 4 4 4
41269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41272+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41273+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41274+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41275+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
41276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41282+4 4 4 4 4 4
41283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41286+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
41287+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
41288+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
41289+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
41290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41296+4 4 4 4 4 4
41297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41301+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
41302+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41303+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41310+4 4 4 4 4 4
41311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41315+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
41316+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
41317+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
41318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41320+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41324+4 4 4 4 4 4
41325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41329+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
41330+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
41331+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41333+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41334+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41338+4 4 4 4 4 4
41339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41341+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41343+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
41344+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
41345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41347+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41348+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41352+4 4 4 4 4 4
41353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41354+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41355+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41357+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41358+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
41359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41360+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41361+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41362+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41366+4 4 4 4 4 4
41367diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
41368index a159b63..4ab532d 100644
41369--- a/drivers/video/udlfb.c
41370+++ b/drivers/video/udlfb.c
41371@@ -620,11 +620,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
41372 dlfb_urb_completion(urb);
41373
41374 error:
41375- atomic_add(bytes_sent, &dev->bytes_sent);
41376- atomic_add(bytes_identical, &dev->bytes_identical);
41377- atomic_add(width*height*2, &dev->bytes_rendered);
41378+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41379+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41380+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
41381 end_cycles = get_cycles();
41382- atomic_add(((unsigned int) ((end_cycles - start_cycles)
41383+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41384 >> 10)), /* Kcycles */
41385 &dev->cpu_kcycles_used);
41386
41387@@ -745,11 +745,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
41388 dlfb_urb_completion(urb);
41389
41390 error:
41391- atomic_add(bytes_sent, &dev->bytes_sent);
41392- atomic_add(bytes_identical, &dev->bytes_identical);
41393- atomic_add(bytes_rendered, &dev->bytes_rendered);
41394+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41395+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41396+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
41397 end_cycles = get_cycles();
41398- atomic_add(((unsigned int) ((end_cycles - start_cycles)
41399+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41400 >> 10)), /* Kcycles */
41401 &dev->cpu_kcycles_used);
41402 }
41403@@ -1373,7 +1373,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
41404 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41405 struct dlfb_data *dev = fb_info->par;
41406 return snprintf(buf, PAGE_SIZE, "%u\n",
41407- atomic_read(&dev->bytes_rendered));
41408+ atomic_read_unchecked(&dev->bytes_rendered));
41409 }
41410
41411 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41412@@ -1381,7 +1381,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41413 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41414 struct dlfb_data *dev = fb_info->par;
41415 return snprintf(buf, PAGE_SIZE, "%u\n",
41416- atomic_read(&dev->bytes_identical));
41417+ atomic_read_unchecked(&dev->bytes_identical));
41418 }
41419
41420 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41421@@ -1389,7 +1389,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41422 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41423 struct dlfb_data *dev = fb_info->par;
41424 return snprintf(buf, PAGE_SIZE, "%u\n",
41425- atomic_read(&dev->bytes_sent));
41426+ atomic_read_unchecked(&dev->bytes_sent));
41427 }
41428
41429 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41430@@ -1397,7 +1397,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41431 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41432 struct dlfb_data *dev = fb_info->par;
41433 return snprintf(buf, PAGE_SIZE, "%u\n",
41434- atomic_read(&dev->cpu_kcycles_used));
41435+ atomic_read_unchecked(&dev->cpu_kcycles_used));
41436 }
41437
41438 static ssize_t edid_show(
41439@@ -1457,10 +1457,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
41440 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41441 struct dlfb_data *dev = fb_info->par;
41442
41443- atomic_set(&dev->bytes_rendered, 0);
41444- atomic_set(&dev->bytes_identical, 0);
41445- atomic_set(&dev->bytes_sent, 0);
41446- atomic_set(&dev->cpu_kcycles_used, 0);
41447+ atomic_set_unchecked(&dev->bytes_rendered, 0);
41448+ atomic_set_unchecked(&dev->bytes_identical, 0);
41449+ atomic_set_unchecked(&dev->bytes_sent, 0);
41450+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
41451
41452 return count;
41453 }
41454diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
41455index b0e2a42..e2df3ad 100644
41456--- a/drivers/video/uvesafb.c
41457+++ b/drivers/video/uvesafb.c
41458@@ -19,6 +19,7 @@
41459 #include <linux/io.h>
41460 #include <linux/mutex.h>
41461 #include <linux/slab.h>
41462+#include <linux/moduleloader.h>
41463 #include <video/edid.h>
41464 #include <video/uvesafb.h>
41465 #ifdef CONFIG_X86
41466@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
41467 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
41468 par->pmi_setpal = par->ypan = 0;
41469 } else {
41470+
41471+#ifdef CONFIG_PAX_KERNEXEC
41472+#ifdef CONFIG_MODULES
41473+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
41474+#endif
41475+ if (!par->pmi_code) {
41476+ par->pmi_setpal = par->ypan = 0;
41477+ return 0;
41478+ }
41479+#endif
41480+
41481 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
41482 + task->t.regs.edi);
41483+
41484+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41485+ pax_open_kernel();
41486+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
41487+ pax_close_kernel();
41488+
41489+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
41490+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
41491+#else
41492 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
41493 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
41494+#endif
41495+
41496 printk(KERN_INFO "uvesafb: protected mode interface info at "
41497 "%04x:%04x\n",
41498 (u16)task->t.regs.es, (u16)task->t.regs.edi);
41499@@ -816,13 +839,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
41500 par->ypan = ypan;
41501
41502 if (par->pmi_setpal || par->ypan) {
41503+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
41504 if (__supported_pte_mask & _PAGE_NX) {
41505 par->pmi_setpal = par->ypan = 0;
41506 printk(KERN_WARNING "uvesafb: NX protection is actively."
41507 "We have better not to use the PMI.\n");
41508- } else {
41509+ } else
41510+#endif
41511 uvesafb_vbe_getpmi(task, par);
41512- }
41513 }
41514 #else
41515 /* The protected mode interface is not available on non-x86. */
41516@@ -1836,6 +1860,11 @@ out:
41517 if (par->vbe_modes)
41518 kfree(par->vbe_modes);
41519
41520+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41521+ if (par->pmi_code)
41522+ module_free_exec(NULL, par->pmi_code);
41523+#endif
41524+
41525 framebuffer_release(info);
41526 return err;
41527 }
41528@@ -1862,6 +1891,12 @@ static int uvesafb_remove(struct platform_device *dev)
41529 kfree(par->vbe_state_orig);
41530 if (par->vbe_state_saved)
41531 kfree(par->vbe_state_saved);
41532+
41533+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41534+ if (par->pmi_code)
41535+ module_free_exec(NULL, par->pmi_code);
41536+#endif
41537+
41538 }
41539
41540 framebuffer_release(info);
41541diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
41542index 501b340..86bd4cf 100644
41543--- a/drivers/video/vesafb.c
41544+++ b/drivers/video/vesafb.c
41545@@ -9,6 +9,7 @@
41546 */
41547
41548 #include <linux/module.h>
41549+#include <linux/moduleloader.h>
41550 #include <linux/kernel.h>
41551 #include <linux/errno.h>
41552 #include <linux/string.h>
41553@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
41554 static int vram_total __initdata; /* Set total amount of memory */
41555 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
41556 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
41557-static void (*pmi_start)(void) __read_mostly;
41558-static void (*pmi_pal) (void) __read_mostly;
41559+static void (*pmi_start)(void) __read_only;
41560+static void (*pmi_pal) (void) __read_only;
41561 static int depth __read_mostly;
41562 static int vga_compat __read_mostly;
41563 /* --------------------------------------------------------------------- */
41564@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
41565 unsigned int size_vmode;
41566 unsigned int size_remap;
41567 unsigned int size_total;
41568+ void *pmi_code = NULL;
41569
41570 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
41571 return -ENODEV;
41572@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
41573 size_remap = size_total;
41574 vesafb_fix.smem_len = size_remap;
41575
41576-#ifndef __i386__
41577- screen_info.vesapm_seg = 0;
41578-#endif
41579-
41580 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
41581 printk(KERN_WARNING
41582 "vesafb: cannot reserve video memory at 0x%lx\n",
41583@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
41584 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
41585 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
41586
41587+#ifdef __i386__
41588+
41589+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41590+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
41591+ if (!pmi_code)
41592+#elif !defined(CONFIG_PAX_KERNEXEC)
41593+ if (0)
41594+#endif
41595+
41596+#endif
41597+ screen_info.vesapm_seg = 0;
41598+
41599 if (screen_info.vesapm_seg) {
41600- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
41601- screen_info.vesapm_seg,screen_info.vesapm_off);
41602+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
41603+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
41604 }
41605
41606 if (screen_info.vesapm_seg < 0xc000)
41607@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
41608
41609 if (ypan || pmi_setpal) {
41610 unsigned short *pmi_base;
41611+
41612 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
41613- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
41614- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
41615+
41616+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41617+ pax_open_kernel();
41618+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
41619+#else
41620+ pmi_code = pmi_base;
41621+#endif
41622+
41623+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
41624+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
41625+
41626+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41627+ pmi_start = ktva_ktla(pmi_start);
41628+ pmi_pal = ktva_ktla(pmi_pal);
41629+ pax_close_kernel();
41630+#endif
41631+
41632 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
41633 if (pmi_base[3]) {
41634 printk(KERN_INFO "vesafb: pmi: ports = ");
41635@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
41636 info->node, info->fix.id);
41637 return 0;
41638 err:
41639+
41640+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41641+ module_free_exec(NULL, pmi_code);
41642+#endif
41643+
41644 if (info->screen_base)
41645 iounmap(info->screen_base);
41646 framebuffer_release(info);
41647diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
41648index 88714ae..16c2e11 100644
41649--- a/drivers/video/via/via_clock.h
41650+++ b/drivers/video/via/via_clock.h
41651@@ -56,7 +56,7 @@ struct via_clock {
41652
41653 void (*set_engine_pll_state)(u8 state);
41654 void (*set_engine_pll)(struct via_pll_config config);
41655-};
41656+} __no_const;
41657
41658
41659 static inline u32 get_pll_internal_frequency(u32 ref_freq,
41660diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
41661index e56c934..fc22f4b 100644
41662--- a/drivers/xen/xen-pciback/conf_space.h
41663+++ b/drivers/xen/xen-pciback/conf_space.h
41664@@ -44,15 +44,15 @@ struct config_field {
41665 struct {
41666 conf_dword_write write;
41667 conf_dword_read read;
41668- } dw;
41669+ } __no_const dw;
41670 struct {
41671 conf_word_write write;
41672 conf_word_read read;
41673- } w;
41674+ } __no_const w;
41675 struct {
41676 conf_byte_write write;
41677 conf_byte_read read;
41678- } b;
41679+ } __no_const b;
41680 } u;
41681 struct list_head list;
41682 };
41683diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
41684index 014c8dd..6f3dfe6 100644
41685--- a/fs/9p/vfs_inode.c
41686+++ b/fs/9p/vfs_inode.c
41687@@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41688 void
41689 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41690 {
41691- char *s = nd_get_link(nd);
41692+ const char *s = nd_get_link(nd);
41693
41694 p9_debug(P9_DEBUG_VFS, " %s %s\n",
41695 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
41696diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
41697index e95d1b6..3454244 100644
41698--- a/fs/Kconfig.binfmt
41699+++ b/fs/Kconfig.binfmt
41700@@ -89,7 +89,7 @@ config HAVE_AOUT
41701
41702 config BINFMT_AOUT
41703 tristate "Kernel support for a.out and ECOFF binaries"
41704- depends on HAVE_AOUT
41705+ depends on HAVE_AOUT && BROKEN
41706 ---help---
41707 A.out (Assembler.OUTput) is a set of formats for libraries and
41708 executables used in the earliest versions of UNIX. Linux used
41709diff --git a/fs/aio.c b/fs/aio.c
41710index e7f2fad..15ad8a4 100644
41711--- a/fs/aio.c
41712+++ b/fs/aio.c
41713@@ -118,7 +118,7 @@ static int aio_setup_ring(struct kioctx *ctx)
41714 size += sizeof(struct io_event) * nr_events;
41715 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
41716
41717- if (nr_pages < 0)
41718+ if (nr_pages <= 0)
41719 return -EINVAL;
41720
41721 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
41722@@ -1440,18 +1440,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
41723 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41724 {
41725 ssize_t ret;
41726+ struct iovec iovstack;
41727
41728 #ifdef CONFIG_COMPAT
41729 if (compat)
41730 ret = compat_rw_copy_check_uvector(type,
41731 (struct compat_iovec __user *)kiocb->ki_buf,
41732- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41733+ kiocb->ki_nbytes, 1, &iovstack,
41734 &kiocb->ki_iovec, 1);
41735 else
41736 #endif
41737 ret = rw_copy_check_uvector(type,
41738 (struct iovec __user *)kiocb->ki_buf,
41739- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41740+ kiocb->ki_nbytes, 1, &iovstack,
41741 &kiocb->ki_iovec, 1);
41742 if (ret < 0)
41743 goto out;
41744@@ -1460,6 +1461,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41745 if (ret < 0)
41746 goto out;
41747
41748+ if (kiocb->ki_iovec == &iovstack) {
41749+ kiocb->ki_inline_vec = iovstack;
41750+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
41751+ }
41752 kiocb->ki_nr_segs = kiocb->ki_nbytes;
41753 kiocb->ki_cur_seg = 0;
41754 /* ki_nbytes/left now reflect bytes instead of segs */
41755diff --git a/fs/attr.c b/fs/attr.c
41756index d94d1b6..f9bccd6 100644
41757--- a/fs/attr.c
41758+++ b/fs/attr.c
41759@@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
41760 unsigned long limit;
41761
41762 limit = rlimit(RLIMIT_FSIZE);
41763+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
41764 if (limit != RLIM_INFINITY && offset > limit)
41765 goto out_sig;
41766 if (offset > inode->i_sb->s_maxbytes)
41767diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
41768index da8876d..9f3e6d8 100644
41769--- a/fs/autofs4/waitq.c
41770+++ b/fs/autofs4/waitq.c
41771@@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
41772 {
41773 unsigned long sigpipe, flags;
41774 mm_segment_t fs;
41775- const char *data = (const char *)addr;
41776+ const char __user *data = (const char __force_user *)addr;
41777 ssize_t wr = 0;
41778
41779 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
41780diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
41781index e18da23..affc30e 100644
41782--- a/fs/befs/linuxvfs.c
41783+++ b/fs/befs/linuxvfs.c
41784@@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41785 {
41786 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
41787 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
41788- char *link = nd_get_link(nd);
41789+ const char *link = nd_get_link(nd);
41790 if (!IS_ERR(link))
41791 kfree(link);
41792 }
41793diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
41794index d146e18..12d1bd1 100644
41795--- a/fs/binfmt_aout.c
41796+++ b/fs/binfmt_aout.c
41797@@ -16,6 +16,7 @@
41798 #include <linux/string.h>
41799 #include <linux/fs.h>
41800 #include <linux/file.h>
41801+#include <linux/security.h>
41802 #include <linux/stat.h>
41803 #include <linux/fcntl.h>
41804 #include <linux/ptrace.h>
41805@@ -83,6 +84,8 @@ static int aout_core_dump(struct coredump_params *cprm)
41806 #endif
41807 # define START_STACK(u) ((void __user *)u.start_stack)
41808
41809+ memset(&dump, 0, sizeof(dump));
41810+
41811 fs = get_fs();
41812 set_fs(KERNEL_DS);
41813 has_dumped = 1;
41814@@ -94,10 +97,12 @@ static int aout_core_dump(struct coredump_params *cprm)
41815
41816 /* If the size of the dump file exceeds the rlimit, then see what would happen
41817 if we wrote the stack, but not the data area. */
41818+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
41819 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
41820 dump.u_dsize = 0;
41821
41822 /* Make sure we have enough room to write the stack and data areas. */
41823+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
41824 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
41825 dump.u_ssize = 0;
41826
41827@@ -231,6 +236,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41828 rlim = rlimit(RLIMIT_DATA);
41829 if (rlim >= RLIM_INFINITY)
41830 rlim = ~0;
41831+
41832+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
41833 if (ex.a_data + ex.a_bss > rlim)
41834 return -ENOMEM;
41835
41836@@ -265,6 +272,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41837
41838 install_exec_creds(bprm);
41839
41840+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41841+ current->mm->pax_flags = 0UL;
41842+#endif
41843+
41844+#ifdef CONFIG_PAX_PAGEEXEC
41845+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
41846+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
41847+
41848+#ifdef CONFIG_PAX_EMUTRAMP
41849+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
41850+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
41851+#endif
41852+
41853+#ifdef CONFIG_PAX_MPROTECT
41854+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
41855+ current->mm->pax_flags |= MF_PAX_MPROTECT;
41856+#endif
41857+
41858+ }
41859+#endif
41860+
41861 if (N_MAGIC(ex) == OMAGIC) {
41862 unsigned long text_addr, map_size;
41863 loff_t pos;
41864@@ -330,7 +358,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41865 }
41866
41867 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
41868- PROT_READ | PROT_WRITE | PROT_EXEC,
41869+ PROT_READ | PROT_WRITE,
41870 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
41871 fd_offset + ex.a_text);
41872 if (error != N_DATADDR(ex)) {
41873diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
41874index 16f7354..7cc1e24 100644
41875--- a/fs/binfmt_elf.c
41876+++ b/fs/binfmt_elf.c
41877@@ -32,6 +32,7 @@
41878 #include <linux/elf.h>
41879 #include <linux/utsname.h>
41880 #include <linux/coredump.h>
41881+#include <linux/xattr.h>
41882 #include <asm/uaccess.h>
41883 #include <asm/param.h>
41884 #include <asm/page.h>
41885@@ -52,6 +53,10 @@ static int elf_core_dump(struct coredump_params *cprm);
41886 #define elf_core_dump NULL
41887 #endif
41888
41889+#ifdef CONFIG_PAX_MPROTECT
41890+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
41891+#endif
41892+
41893 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
41894 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
41895 #else
41896@@ -71,6 +76,11 @@ static struct linux_binfmt elf_format = {
41897 .load_binary = load_elf_binary,
41898 .load_shlib = load_elf_library,
41899 .core_dump = elf_core_dump,
41900+
41901+#ifdef CONFIG_PAX_MPROTECT
41902+ .handle_mprotect= elf_handle_mprotect,
41903+#endif
41904+
41905 .min_coredump = ELF_EXEC_PAGESIZE,
41906 };
41907
41908@@ -78,6 +88,8 @@ static struct linux_binfmt elf_format = {
41909
41910 static int set_brk(unsigned long start, unsigned long end)
41911 {
41912+ unsigned long e = end;
41913+
41914 start = ELF_PAGEALIGN(start);
41915 end = ELF_PAGEALIGN(end);
41916 if (end > start) {
41917@@ -86,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end)
41918 if (BAD_ADDR(addr))
41919 return addr;
41920 }
41921- current->mm->start_brk = current->mm->brk = end;
41922+ current->mm->start_brk = current->mm->brk = e;
41923 return 0;
41924 }
41925
41926@@ -147,12 +159,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41927 elf_addr_t __user *u_rand_bytes;
41928 const char *k_platform = ELF_PLATFORM;
41929 const char *k_base_platform = ELF_BASE_PLATFORM;
41930- unsigned char k_rand_bytes[16];
41931+ u32 k_rand_bytes[4];
41932 int items;
41933 elf_addr_t *elf_info;
41934 int ei_index = 0;
41935 const struct cred *cred = current_cred();
41936 struct vm_area_struct *vma;
41937+ unsigned long saved_auxv[AT_VECTOR_SIZE];
41938
41939 /*
41940 * In some cases (e.g. Hyper-Threading), we want to avoid L1
41941@@ -194,8 +207,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41942 * Generate 16 random bytes for userspace PRNG seeding.
41943 */
41944 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
41945- u_rand_bytes = (elf_addr_t __user *)
41946- STACK_ALLOC(p, sizeof(k_rand_bytes));
41947+ srandom32(k_rand_bytes[0] ^ random32());
41948+ srandom32(k_rand_bytes[1] ^ random32());
41949+ srandom32(k_rand_bytes[2] ^ random32());
41950+ srandom32(k_rand_bytes[3] ^ random32());
41951+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
41952+ u_rand_bytes = (elf_addr_t __user *) p;
41953 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
41954 return -EFAULT;
41955
41956@@ -307,9 +324,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41957 return -EFAULT;
41958 current->mm->env_end = p;
41959
41960+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
41961+
41962 /* Put the elf_info on the stack in the right place. */
41963 sp = (elf_addr_t __user *)envp + 1;
41964- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
41965+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
41966 return -EFAULT;
41967 return 0;
41968 }
41969@@ -380,10 +399,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41970 {
41971 struct elf_phdr *elf_phdata;
41972 struct elf_phdr *eppnt;
41973- unsigned long load_addr = 0;
41974+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
41975 int load_addr_set = 0;
41976 unsigned long last_bss = 0, elf_bss = 0;
41977- unsigned long error = ~0UL;
41978+ unsigned long error = -EINVAL;
41979 unsigned long total_size;
41980 int retval, i, size;
41981
41982@@ -429,6 +448,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41983 goto out_close;
41984 }
41985
41986+#ifdef CONFIG_PAX_SEGMEXEC
41987+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
41988+ pax_task_size = SEGMEXEC_TASK_SIZE;
41989+#endif
41990+
41991 eppnt = elf_phdata;
41992 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
41993 if (eppnt->p_type == PT_LOAD) {
41994@@ -472,8 +496,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41995 k = load_addr + eppnt->p_vaddr;
41996 if (BAD_ADDR(k) ||
41997 eppnt->p_filesz > eppnt->p_memsz ||
41998- eppnt->p_memsz > TASK_SIZE ||
41999- TASK_SIZE - eppnt->p_memsz < k) {
42000+ eppnt->p_memsz > pax_task_size ||
42001+ pax_task_size - eppnt->p_memsz < k) {
42002 error = -ENOMEM;
42003 goto out_close;
42004 }
42005@@ -525,6 +549,311 @@ out:
42006 return error;
42007 }
42008
42009+#ifdef CONFIG_PAX_PT_PAX_FLAGS
42010+#ifdef CONFIG_PAX_SOFTMODE
42011+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
42012+{
42013+ unsigned long pax_flags = 0UL;
42014+
42015+#ifdef CONFIG_PAX_PAGEEXEC
42016+ if (elf_phdata->p_flags & PF_PAGEEXEC)
42017+ pax_flags |= MF_PAX_PAGEEXEC;
42018+#endif
42019+
42020+#ifdef CONFIG_PAX_SEGMEXEC
42021+ if (elf_phdata->p_flags & PF_SEGMEXEC)
42022+ pax_flags |= MF_PAX_SEGMEXEC;
42023+#endif
42024+
42025+#ifdef CONFIG_PAX_EMUTRAMP
42026+ if (elf_phdata->p_flags & PF_EMUTRAMP)
42027+ pax_flags |= MF_PAX_EMUTRAMP;
42028+#endif
42029+
42030+#ifdef CONFIG_PAX_MPROTECT
42031+ if (elf_phdata->p_flags & PF_MPROTECT)
42032+ pax_flags |= MF_PAX_MPROTECT;
42033+#endif
42034+
42035+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42036+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
42037+ pax_flags |= MF_PAX_RANDMMAP;
42038+#endif
42039+
42040+ return pax_flags;
42041+}
42042+#endif
42043+
42044+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
42045+{
42046+ unsigned long pax_flags = 0UL;
42047+
42048+#ifdef CONFIG_PAX_PAGEEXEC
42049+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
42050+ pax_flags |= MF_PAX_PAGEEXEC;
42051+#endif
42052+
42053+#ifdef CONFIG_PAX_SEGMEXEC
42054+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
42055+ pax_flags |= MF_PAX_SEGMEXEC;
42056+#endif
42057+
42058+#ifdef CONFIG_PAX_EMUTRAMP
42059+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
42060+ pax_flags |= MF_PAX_EMUTRAMP;
42061+#endif
42062+
42063+#ifdef CONFIG_PAX_MPROTECT
42064+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
42065+ pax_flags |= MF_PAX_MPROTECT;
42066+#endif
42067+
42068+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42069+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
42070+ pax_flags |= MF_PAX_RANDMMAP;
42071+#endif
42072+
42073+ return pax_flags;
42074+}
42075+#endif
42076+
42077+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42078+#ifdef CONFIG_PAX_SOFTMODE
42079+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
42080+{
42081+ unsigned long pax_flags = 0UL;
42082+
42083+#ifdef CONFIG_PAX_PAGEEXEC
42084+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
42085+ pax_flags |= MF_PAX_PAGEEXEC;
42086+#endif
42087+
42088+#ifdef CONFIG_PAX_SEGMEXEC
42089+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
42090+ pax_flags |= MF_PAX_SEGMEXEC;
42091+#endif
42092+
42093+#ifdef CONFIG_PAX_EMUTRAMP
42094+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
42095+ pax_flags |= MF_PAX_EMUTRAMP;
42096+#endif
42097+
42098+#ifdef CONFIG_PAX_MPROTECT
42099+ if (pax_flags_softmode & MF_PAX_MPROTECT)
42100+ pax_flags |= MF_PAX_MPROTECT;
42101+#endif
42102+
42103+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42104+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
42105+ pax_flags |= MF_PAX_RANDMMAP;
42106+#endif
42107+
42108+ return pax_flags;
42109+}
42110+#endif
42111+
42112+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
42113+{
42114+ unsigned long pax_flags = 0UL;
42115+
42116+#ifdef CONFIG_PAX_PAGEEXEC
42117+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
42118+ pax_flags |= MF_PAX_PAGEEXEC;
42119+#endif
42120+
42121+#ifdef CONFIG_PAX_SEGMEXEC
42122+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
42123+ pax_flags |= MF_PAX_SEGMEXEC;
42124+#endif
42125+
42126+#ifdef CONFIG_PAX_EMUTRAMP
42127+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
42128+ pax_flags |= MF_PAX_EMUTRAMP;
42129+#endif
42130+
42131+#ifdef CONFIG_PAX_MPROTECT
42132+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
42133+ pax_flags |= MF_PAX_MPROTECT;
42134+#endif
42135+
42136+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42137+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
42138+ pax_flags |= MF_PAX_RANDMMAP;
42139+#endif
42140+
42141+ return pax_flags;
42142+}
42143+#endif
42144+
42145+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42146+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
42147+{
42148+ unsigned long pax_flags = 0UL;
42149+
42150+#ifdef CONFIG_PAX_EI_PAX
42151+
42152+#ifdef CONFIG_PAX_PAGEEXEC
42153+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
42154+ pax_flags |= MF_PAX_PAGEEXEC;
42155+#endif
42156+
42157+#ifdef CONFIG_PAX_SEGMEXEC
42158+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
42159+ pax_flags |= MF_PAX_SEGMEXEC;
42160+#endif
42161+
42162+#ifdef CONFIG_PAX_EMUTRAMP
42163+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
42164+ pax_flags |= MF_PAX_EMUTRAMP;
42165+#endif
42166+
42167+#ifdef CONFIG_PAX_MPROTECT
42168+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
42169+ pax_flags |= MF_PAX_MPROTECT;
42170+#endif
42171+
42172+#ifdef CONFIG_PAX_ASLR
42173+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
42174+ pax_flags |= MF_PAX_RANDMMAP;
42175+#endif
42176+
42177+#else
42178+
42179+#ifdef CONFIG_PAX_PAGEEXEC
42180+ pax_flags |= MF_PAX_PAGEEXEC;
42181+#endif
42182+
42183+#ifdef CONFIG_PAX_SEGMEXEC
42184+ pax_flags |= MF_PAX_SEGMEXEC;
42185+#endif
42186+
42187+#ifdef CONFIG_PAX_MPROTECT
42188+ pax_flags |= MF_PAX_MPROTECT;
42189+#endif
42190+
42191+#ifdef CONFIG_PAX_RANDMMAP
42192+ if (randomize_va_space)
42193+ pax_flags |= MF_PAX_RANDMMAP;
42194+#endif
42195+
42196+#endif
42197+
42198+ return pax_flags;
42199+}
42200+
42201+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
42202+{
42203+
42204+#ifdef CONFIG_PAX_PT_PAX_FLAGS
42205+ unsigned long i;
42206+
42207+ for (i = 0UL; i < elf_ex->e_phnum; i++)
42208+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
42209+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
42210+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
42211+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
42212+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
42213+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
42214+ return ~0UL;
42215+
42216+#ifdef CONFIG_PAX_SOFTMODE
42217+ if (pax_softmode)
42218+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
42219+ else
42220+#endif
42221+
42222+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
42223+ break;
42224+ }
42225+#endif
42226+
42227+ return ~0UL;
42228+}
42229+
42230+static unsigned long pax_parse_xattr_pax(struct file * const file)
42231+{
42232+
42233+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42234+ ssize_t xattr_size, i;
42235+ unsigned char xattr_value[5];
42236+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
42237+
42238+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
42239+ if (xattr_size <= 0)
42240+ return ~0UL;
42241+
42242+ for (i = 0; i < xattr_size; i++)
42243+ switch (xattr_value[i]) {
42244+ default:
42245+ return ~0UL;
42246+
42247+#define parse_flag(option1, option2, flag) \
42248+ case option1: \
42249+ pax_flags_hardmode |= MF_PAX_##flag; \
42250+ break; \
42251+ case option2: \
42252+ pax_flags_softmode |= MF_PAX_##flag; \
42253+ break;
42254+
42255+ parse_flag('p', 'P', PAGEEXEC);
42256+ parse_flag('e', 'E', EMUTRAMP);
42257+ parse_flag('m', 'M', MPROTECT);
42258+ parse_flag('r', 'R', RANDMMAP);
42259+ parse_flag('s', 'S', SEGMEXEC);
42260+
42261+#undef parse_flag
42262+ }
42263+
42264+ if (pax_flags_hardmode & pax_flags_softmode)
42265+ return ~0UL;
42266+
42267+#ifdef CONFIG_PAX_SOFTMODE
42268+ if (pax_softmode)
42269+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
42270+ else
42271+#endif
42272+
42273+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
42274+#else
42275+ return ~0UL;
42276+#endif
42277+
42278+}
42279+
42280+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
42281+{
42282+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
42283+
42284+ pax_flags = pax_parse_ei_pax(elf_ex);
42285+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
42286+ xattr_pax_flags = pax_parse_xattr_pax(file);
42287+
42288+ if (pt_pax_flags == ~0UL)
42289+ pt_pax_flags = xattr_pax_flags;
42290+ else if (xattr_pax_flags == ~0UL)
42291+ xattr_pax_flags = pt_pax_flags;
42292+ if (pt_pax_flags != xattr_pax_flags)
42293+ return -EINVAL;
42294+ if (pt_pax_flags != ~0UL)
42295+ pax_flags = pt_pax_flags;
42296+
42297+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42298+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42299+ if ((__supported_pte_mask & _PAGE_NX))
42300+ pax_flags &= ~MF_PAX_SEGMEXEC;
42301+ else
42302+ pax_flags &= ~MF_PAX_PAGEEXEC;
42303+ }
42304+#endif
42305+
42306+ if (0 > pax_check_flags(&pax_flags))
42307+ return -EINVAL;
42308+
42309+ current->mm->pax_flags = pax_flags;
42310+ return 0;
42311+}
42312+#endif
42313+
42314 /*
42315 * These are the functions used to load ELF style executables and shared
42316 * libraries. There is no binary dependent code anywhere else.
42317@@ -541,6 +870,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
42318 {
42319 unsigned int random_variable = 0;
42320
42321+#ifdef CONFIG_PAX_RANDUSTACK
42322+ if (randomize_va_space)
42323+ return stack_top - current->mm->delta_stack;
42324+#endif
42325+
42326 if ((current->flags & PF_RANDOMIZE) &&
42327 !(current->personality & ADDR_NO_RANDOMIZE)) {
42328 random_variable = get_random_int() & STACK_RND_MASK;
42329@@ -559,7 +893,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42330 unsigned long load_addr = 0, load_bias = 0;
42331 int load_addr_set = 0;
42332 char * elf_interpreter = NULL;
42333- unsigned long error;
42334+ unsigned long error = 0;
42335 struct elf_phdr *elf_ppnt, *elf_phdata;
42336 unsigned long elf_bss, elf_brk;
42337 int retval, i;
42338@@ -569,11 +903,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42339 unsigned long start_code, end_code, start_data, end_data;
42340 unsigned long reloc_func_desc __maybe_unused = 0;
42341 int executable_stack = EXSTACK_DEFAULT;
42342- unsigned long def_flags = 0;
42343 struct {
42344 struct elfhdr elf_ex;
42345 struct elfhdr interp_elf_ex;
42346 } *loc;
42347+ unsigned long pax_task_size = TASK_SIZE;
42348
42349 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
42350 if (!loc) {
42351@@ -709,11 +1043,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42352 goto out_free_dentry;
42353
42354 /* OK, This is the point of no return */
42355- current->mm->def_flags = def_flags;
42356+
42357+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42358+ current->mm->pax_flags = 0UL;
42359+#endif
42360+
42361+#ifdef CONFIG_PAX_DLRESOLVE
42362+ current->mm->call_dl_resolve = 0UL;
42363+#endif
42364+
42365+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
42366+ current->mm->call_syscall = 0UL;
42367+#endif
42368+
42369+#ifdef CONFIG_PAX_ASLR
42370+ current->mm->delta_mmap = 0UL;
42371+ current->mm->delta_stack = 0UL;
42372+#endif
42373+
42374+ current->mm->def_flags = 0;
42375+
42376+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42377+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
42378+ send_sig(SIGKILL, current, 0);
42379+ goto out_free_dentry;
42380+ }
42381+#endif
42382+
42383+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
42384+ pax_set_initial_flags(bprm);
42385+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
42386+ if (pax_set_initial_flags_func)
42387+ (pax_set_initial_flags_func)(bprm);
42388+#endif
42389+
42390+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42391+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
42392+ current->mm->context.user_cs_limit = PAGE_SIZE;
42393+ current->mm->def_flags |= VM_PAGEEXEC;
42394+ }
42395+#endif
42396+
42397+#ifdef CONFIG_PAX_SEGMEXEC
42398+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
42399+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
42400+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
42401+ pax_task_size = SEGMEXEC_TASK_SIZE;
42402+ current->mm->def_flags |= VM_NOHUGEPAGE;
42403+ }
42404+#endif
42405+
42406+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
42407+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42408+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
42409+ put_cpu();
42410+ }
42411+#endif
42412
42413 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
42414 may depend on the personality. */
42415 SET_PERSONALITY(loc->elf_ex);
42416+
42417+#ifdef CONFIG_PAX_ASLR
42418+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42419+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
42420+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
42421+ }
42422+#endif
42423+
42424+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42425+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42426+ executable_stack = EXSTACK_DISABLE_X;
42427+ current->personality &= ~READ_IMPLIES_EXEC;
42428+ } else
42429+#endif
42430+
42431 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
42432 current->personality |= READ_IMPLIES_EXEC;
42433
42434@@ -804,6 +1208,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42435 #else
42436 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
42437 #endif
42438+
42439+#ifdef CONFIG_PAX_RANDMMAP
42440+ /* PaX: randomize base address at the default exe base if requested */
42441+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
42442+#ifdef CONFIG_SPARC64
42443+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
42444+#else
42445+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
42446+#endif
42447+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
42448+ elf_flags |= MAP_FIXED;
42449+ }
42450+#endif
42451+
42452 }
42453
42454 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
42455@@ -836,9 +1254,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42456 * allowed task size. Note that p_filesz must always be
42457 * <= p_memsz so it is only necessary to check p_memsz.
42458 */
42459- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42460- elf_ppnt->p_memsz > TASK_SIZE ||
42461- TASK_SIZE - elf_ppnt->p_memsz < k) {
42462+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42463+ elf_ppnt->p_memsz > pax_task_size ||
42464+ pax_task_size - elf_ppnt->p_memsz < k) {
42465 /* set_brk can never work. Avoid overflows. */
42466 send_sig(SIGKILL, current, 0);
42467 retval = -EINVAL;
42468@@ -877,11 +1295,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42469 goto out_free_dentry;
42470 }
42471 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
42472- send_sig(SIGSEGV, current, 0);
42473- retval = -EFAULT; /* Nobody gets to see this, but.. */
42474- goto out_free_dentry;
42475+ /*
42476+ * This bss-zeroing can fail if the ELF
42477+ * file specifies odd protections. So
42478+ * we don't check the return value
42479+ */
42480 }
42481
42482+#ifdef CONFIG_PAX_RANDMMAP
42483+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42484+ unsigned long start, size;
42485+
42486+ start = ELF_PAGEALIGN(elf_brk);
42487+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
42488+ down_write(&current->mm->mmap_sem);
42489+ retval = -ENOMEM;
42490+ if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
42491+ unsigned long prot = PROT_NONE;
42492+
42493+ current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
42494+// if (current->personality & ADDR_NO_RANDOMIZE)
42495+// prot = PROT_READ;
42496+ start = do_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
42497+ retval = IS_ERR_VALUE(start) ? start : 0;
42498+ }
42499+ up_write(&current->mm->mmap_sem);
42500+ if (retval == 0)
42501+ retval = set_brk(start + size, start + size + PAGE_SIZE);
42502+ if (retval < 0) {
42503+ send_sig(SIGKILL, current, 0);
42504+ goto out_free_dentry;
42505+ }
42506+ }
42507+#endif
42508+
42509 if (elf_interpreter) {
42510 unsigned long uninitialized_var(interp_map_addr);
42511
42512@@ -1109,7 +1556,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
42513 * Decide what to dump of a segment, part, all or none.
42514 */
42515 static unsigned long vma_dump_size(struct vm_area_struct *vma,
42516- unsigned long mm_flags)
42517+ unsigned long mm_flags, long signr)
42518 {
42519 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
42520
42521@@ -1146,7 +1593,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
42522 if (vma->vm_file == NULL)
42523 return 0;
42524
42525- if (FILTER(MAPPED_PRIVATE))
42526+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
42527 goto whole;
42528
42529 /*
42530@@ -1368,9 +1815,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
42531 {
42532 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
42533 int i = 0;
42534- do
42535+ do {
42536 i += 2;
42537- while (auxv[i - 2] != AT_NULL);
42538+ } while (auxv[i - 2] != AT_NULL);
42539 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
42540 }
42541
42542@@ -1892,14 +2339,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
42543 }
42544
42545 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
42546- unsigned long mm_flags)
42547+ struct coredump_params *cprm)
42548 {
42549 struct vm_area_struct *vma;
42550 size_t size = 0;
42551
42552 for (vma = first_vma(current, gate_vma); vma != NULL;
42553 vma = next_vma(vma, gate_vma))
42554- size += vma_dump_size(vma, mm_flags);
42555+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42556 return size;
42557 }
42558
42559@@ -1993,7 +2440,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42560
42561 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
42562
42563- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
42564+ offset += elf_core_vma_data_size(gate_vma, cprm);
42565 offset += elf_core_extra_data_size();
42566 e_shoff = offset;
42567
42568@@ -2007,10 +2454,12 @@ static int elf_core_dump(struct coredump_params *cprm)
42569 offset = dataoff;
42570
42571 size += sizeof(*elf);
42572+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42573 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
42574 goto end_coredump;
42575
42576 size += sizeof(*phdr4note);
42577+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42578 if (size > cprm->limit
42579 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
42580 goto end_coredump;
42581@@ -2024,7 +2473,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42582 phdr.p_offset = offset;
42583 phdr.p_vaddr = vma->vm_start;
42584 phdr.p_paddr = 0;
42585- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
42586+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42587 phdr.p_memsz = vma->vm_end - vma->vm_start;
42588 offset += phdr.p_filesz;
42589 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
42590@@ -2035,6 +2484,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42591 phdr.p_align = ELF_EXEC_PAGESIZE;
42592
42593 size += sizeof(phdr);
42594+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42595 if (size > cprm->limit
42596 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
42597 goto end_coredump;
42598@@ -2059,7 +2509,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42599 unsigned long addr;
42600 unsigned long end;
42601
42602- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
42603+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42604
42605 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
42606 struct page *page;
42607@@ -2068,6 +2518,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42608 page = get_dump_page(addr);
42609 if (page) {
42610 void *kaddr = kmap(page);
42611+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
42612 stop = ((size += PAGE_SIZE) > cprm->limit) ||
42613 !dump_write(cprm->file, kaddr,
42614 PAGE_SIZE);
42615@@ -2085,6 +2536,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42616
42617 if (e_phnum == PN_XNUM) {
42618 size += sizeof(*shdr4extnum);
42619+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42620 if (size > cprm->limit
42621 || !dump_write(cprm->file, shdr4extnum,
42622 sizeof(*shdr4extnum)))
42623@@ -2105,6 +2557,97 @@ out:
42624
42625 #endif /* CONFIG_ELF_CORE */
42626
42627+#ifdef CONFIG_PAX_MPROTECT
42628+/* PaX: non-PIC ELF libraries need relocations on their executable segments
42629+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
42630+ * we'll remove VM_MAYWRITE for good on RELRO segments.
42631+ *
42632+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
42633+ * basis because we want to allow the common case and not the special ones.
42634+ */
42635+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
42636+{
42637+ struct elfhdr elf_h;
42638+ struct elf_phdr elf_p;
42639+ unsigned long i;
42640+ unsigned long oldflags;
42641+ bool is_textrel_rw, is_textrel_rx, is_relro;
42642+
42643+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
42644+ return;
42645+
42646+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
42647+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
42648+
42649+#ifdef CONFIG_PAX_ELFRELOCS
42650+ /* possible TEXTREL */
42651+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
42652+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
42653+#else
42654+ is_textrel_rw = false;
42655+ is_textrel_rx = false;
42656+#endif
42657+
42658+ /* possible RELRO */
42659+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
42660+
42661+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
42662+ return;
42663+
42664+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
42665+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
42666+
42667+#ifdef CONFIG_PAX_ETEXECRELOCS
42668+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42669+#else
42670+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
42671+#endif
42672+
42673+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42674+ !elf_check_arch(&elf_h) ||
42675+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
42676+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
42677+ return;
42678+
42679+ for (i = 0UL; i < elf_h.e_phnum; i++) {
42680+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
42681+ return;
42682+ switch (elf_p.p_type) {
42683+ case PT_DYNAMIC:
42684+ if (!is_textrel_rw && !is_textrel_rx)
42685+ continue;
42686+ i = 0UL;
42687+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
42688+ elf_dyn dyn;
42689+
42690+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
42691+ return;
42692+ if (dyn.d_tag == DT_NULL)
42693+ return;
42694+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
42695+ gr_log_textrel(vma);
42696+ if (is_textrel_rw)
42697+ vma->vm_flags |= VM_MAYWRITE;
42698+ else
42699+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
42700+ vma->vm_flags &= ~VM_MAYWRITE;
42701+ return;
42702+ }
42703+ i++;
42704+ }
42705+ return;
42706+
42707+ case PT_GNU_RELRO:
42708+ if (!is_relro)
42709+ continue;
42710+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
42711+ vma->vm_flags &= ~VM_MAYWRITE;
42712+ return;
42713+ }
42714+ }
42715+}
42716+#endif
42717+
42718 static int __init init_elf_binfmt(void)
42719 {
42720 register_binfmt(&elf_format);
42721diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
42722index 6b2daf9..a70dccb 100644
42723--- a/fs/binfmt_flat.c
42724+++ b/fs/binfmt_flat.c
42725@@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
42726 realdatastart = (unsigned long) -ENOMEM;
42727 printk("Unable to allocate RAM for process data, errno %d\n",
42728 (int)-realdatastart);
42729+ down_write(&current->mm->mmap_sem);
42730 do_munmap(current->mm, textpos, text_len);
42731+ up_write(&current->mm->mmap_sem);
42732 ret = realdatastart;
42733 goto err;
42734 }
42735@@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42736 }
42737 if (IS_ERR_VALUE(result)) {
42738 printk("Unable to read data+bss, errno %d\n", (int)-result);
42739+ down_write(&current->mm->mmap_sem);
42740 do_munmap(current->mm, textpos, text_len);
42741 do_munmap(current->mm, realdatastart, len);
42742+ up_write(&current->mm->mmap_sem);
42743 ret = result;
42744 goto err;
42745 }
42746@@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42747 }
42748 if (IS_ERR_VALUE(result)) {
42749 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
42750+ down_write(&current->mm->mmap_sem);
42751 do_munmap(current->mm, textpos, text_len + data_len + extra +
42752 MAX_SHARED_LIBS * sizeof(unsigned long));
42753+ up_write(&current->mm->mmap_sem);
42754 ret = result;
42755 goto err;
42756 }
42757diff --git a/fs/bio.c b/fs/bio.c
42758index 84da885..bac1d48 100644
42759--- a/fs/bio.c
42760+++ b/fs/bio.c
42761@@ -838,7 +838,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
42762 /*
42763 * Overflow, abort
42764 */
42765- if (end < start)
42766+ if (end < start || end - start > INT_MAX - nr_pages)
42767 return ERR_PTR(-EINVAL);
42768
42769 nr_pages += end - start;
42770@@ -972,7 +972,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
42771 /*
42772 * Overflow, abort
42773 */
42774- if (end < start)
42775+ if (end < start || end - start > INT_MAX - nr_pages)
42776 return ERR_PTR(-EINVAL);
42777
42778 nr_pages += end - start;
42779@@ -1234,7 +1234,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
42780 const int read = bio_data_dir(bio) == READ;
42781 struct bio_map_data *bmd = bio->bi_private;
42782 int i;
42783- char *p = bmd->sgvecs[0].iov_base;
42784+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
42785
42786 __bio_for_each_segment(bvec, bio, i, 0) {
42787 char *addr = page_address(bvec->bv_page);
42788diff --git a/fs/block_dev.c b/fs/block_dev.c
42789index ba11c30..623d736 100644
42790--- a/fs/block_dev.c
42791+++ b/fs/block_dev.c
42792@@ -704,7 +704,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
42793 else if (bdev->bd_contains == bdev)
42794 return true; /* is a whole device which isn't held */
42795
42796- else if (whole->bd_holder == bd_may_claim)
42797+ else if (whole->bd_holder == (void *)bd_may_claim)
42798 return true; /* is a partition of a device that is being partitioned */
42799 else if (whole->bd_holder != NULL)
42800 return false; /* is a partition of a held device */
42801diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
42802index c053e90..e5f1afc 100644
42803--- a/fs/btrfs/check-integrity.c
42804+++ b/fs/btrfs/check-integrity.c
42805@@ -156,7 +156,7 @@ struct btrfsic_block {
42806 union {
42807 bio_end_io_t *bio;
42808 bh_end_io_t *bh;
42809- } orig_bio_bh_end_io;
42810+ } __no_const orig_bio_bh_end_io;
42811 int submit_bio_bh_rw;
42812 u64 flush_gen; /* only valid if !never_written */
42813 };
42814diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
42815index 4106264..8157ede 100644
42816--- a/fs/btrfs/ctree.c
42817+++ b/fs/btrfs/ctree.c
42818@@ -513,9 +513,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
42819 free_extent_buffer(buf);
42820 add_root_to_dirty_list(root);
42821 } else {
42822- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
42823- parent_start = parent->start;
42824- else
42825+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
42826+ if (parent)
42827+ parent_start = parent->start;
42828+ else
42829+ parent_start = 0;
42830+ } else
42831 parent_start = 0;
42832
42833 WARN_ON(trans->transid != btrfs_header_generation(parent));
42834diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
42835index 0df0d1f..4bdcbfe 100644
42836--- a/fs/btrfs/inode.c
42837+++ b/fs/btrfs/inode.c
42838@@ -7074,7 +7074,7 @@ fail:
42839 return -ENOMEM;
42840 }
42841
42842-static int btrfs_getattr(struct vfsmount *mnt,
42843+int btrfs_getattr(struct vfsmount *mnt,
42844 struct dentry *dentry, struct kstat *stat)
42845 {
42846 struct inode *inode = dentry->d_inode;
42847@@ -7088,6 +7088,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
42848 return 0;
42849 }
42850
42851+EXPORT_SYMBOL(btrfs_getattr);
42852+
42853+dev_t get_btrfs_dev_from_inode(struct inode *inode)
42854+{
42855+ return BTRFS_I(inode)->root->anon_dev;
42856+}
42857+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
42858+
42859 /*
42860 * If a file is moved, it will inherit the cow and compression flags of the new
42861 * directory.
42862diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
42863index 14f8e1f..ab8d81f 100644
42864--- a/fs/btrfs/ioctl.c
42865+++ b/fs/btrfs/ioctl.c
42866@@ -2882,9 +2882,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42867 for (i = 0; i < num_types; i++) {
42868 struct btrfs_space_info *tmp;
42869
42870+ /* Don't copy in more than we allocated */
42871 if (!slot_count)
42872 break;
42873
42874+ slot_count--;
42875+
42876 info = NULL;
42877 rcu_read_lock();
42878 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
42879@@ -2906,15 +2909,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42880 memcpy(dest, &space, sizeof(space));
42881 dest++;
42882 space_args.total_spaces++;
42883- slot_count--;
42884 }
42885- if (!slot_count)
42886- break;
42887 }
42888 up_read(&info->groups_sem);
42889 }
42890
42891- user_dest = (struct btrfs_ioctl_space_info *)
42892+ user_dest = (struct btrfs_ioctl_space_info __user *)
42893 (arg + sizeof(struct btrfs_ioctl_space_args));
42894
42895 if (copy_to_user(user_dest, dest_orig, alloc_size))
42896diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
42897index 646ee21..f020f87 100644
42898--- a/fs/btrfs/relocation.c
42899+++ b/fs/btrfs/relocation.c
42900@@ -1268,7 +1268,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
42901 }
42902 spin_unlock(&rc->reloc_root_tree.lock);
42903
42904- BUG_ON((struct btrfs_root *)node->data != root);
42905+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
42906
42907 if (!del) {
42908 spin_lock(&rc->reloc_root_tree.lock);
42909diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
42910index 622f469..e8d2d55 100644
42911--- a/fs/cachefiles/bind.c
42912+++ b/fs/cachefiles/bind.c
42913@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
42914 args);
42915
42916 /* start by checking things over */
42917- ASSERT(cache->fstop_percent >= 0 &&
42918- cache->fstop_percent < cache->fcull_percent &&
42919+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
42920 cache->fcull_percent < cache->frun_percent &&
42921 cache->frun_percent < 100);
42922
42923- ASSERT(cache->bstop_percent >= 0 &&
42924- cache->bstop_percent < cache->bcull_percent &&
42925+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
42926 cache->bcull_percent < cache->brun_percent &&
42927 cache->brun_percent < 100);
42928
42929diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
42930index 0a1467b..6a53245 100644
42931--- a/fs/cachefiles/daemon.c
42932+++ b/fs/cachefiles/daemon.c
42933@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
42934 if (n > buflen)
42935 return -EMSGSIZE;
42936
42937- if (copy_to_user(_buffer, buffer, n) != 0)
42938+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
42939 return -EFAULT;
42940
42941 return n;
42942@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
42943 if (test_bit(CACHEFILES_DEAD, &cache->flags))
42944 return -EIO;
42945
42946- if (datalen < 0 || datalen > PAGE_SIZE - 1)
42947+ if (datalen > PAGE_SIZE - 1)
42948 return -EOPNOTSUPP;
42949
42950 /* drag the command string into the kernel so we can parse it */
42951@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
42952 if (args[0] != '%' || args[1] != '\0')
42953 return -EINVAL;
42954
42955- if (fstop < 0 || fstop >= cache->fcull_percent)
42956+ if (fstop >= cache->fcull_percent)
42957 return cachefiles_daemon_range_error(cache, args);
42958
42959 cache->fstop_percent = fstop;
42960@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
42961 if (args[0] != '%' || args[1] != '\0')
42962 return -EINVAL;
42963
42964- if (bstop < 0 || bstop >= cache->bcull_percent)
42965+ if (bstop >= cache->bcull_percent)
42966 return cachefiles_daemon_range_error(cache, args);
42967
42968 cache->bstop_percent = bstop;
42969diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
42970index bd6bc1b..b627b53 100644
42971--- a/fs/cachefiles/internal.h
42972+++ b/fs/cachefiles/internal.h
42973@@ -57,7 +57,7 @@ struct cachefiles_cache {
42974 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
42975 struct rb_root active_nodes; /* active nodes (can't be culled) */
42976 rwlock_t active_lock; /* lock for active_nodes */
42977- atomic_t gravecounter; /* graveyard uniquifier */
42978+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
42979 unsigned frun_percent; /* when to stop culling (% files) */
42980 unsigned fcull_percent; /* when to start culling (% files) */
42981 unsigned fstop_percent; /* when to stop allocating (% files) */
42982@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
42983 * proc.c
42984 */
42985 #ifdef CONFIG_CACHEFILES_HISTOGRAM
42986-extern atomic_t cachefiles_lookup_histogram[HZ];
42987-extern atomic_t cachefiles_mkdir_histogram[HZ];
42988-extern atomic_t cachefiles_create_histogram[HZ];
42989+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42990+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42991+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
42992
42993 extern int __init cachefiles_proc_init(void);
42994 extern void cachefiles_proc_cleanup(void);
42995 static inline
42996-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
42997+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
42998 {
42999 unsigned long jif = jiffies - start_jif;
43000 if (jif >= HZ)
43001 jif = HZ - 1;
43002- atomic_inc(&histogram[jif]);
43003+ atomic_inc_unchecked(&histogram[jif]);
43004 }
43005
43006 #else
43007diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
43008index 7f0771d..87d4f36 100644
43009--- a/fs/cachefiles/namei.c
43010+++ b/fs/cachefiles/namei.c
43011@@ -318,7 +318,7 @@ try_again:
43012 /* first step is to make up a grave dentry in the graveyard */
43013 sprintf(nbuffer, "%08x%08x",
43014 (uint32_t) get_seconds(),
43015- (uint32_t) atomic_inc_return(&cache->gravecounter));
43016+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
43017
43018 /* do the multiway lock magic */
43019 trap = lock_rename(cache->graveyard, dir);
43020diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
43021index eccd339..4c1d995 100644
43022--- a/fs/cachefiles/proc.c
43023+++ b/fs/cachefiles/proc.c
43024@@ -14,9 +14,9 @@
43025 #include <linux/seq_file.h>
43026 #include "internal.h"
43027
43028-atomic_t cachefiles_lookup_histogram[HZ];
43029-atomic_t cachefiles_mkdir_histogram[HZ];
43030-atomic_t cachefiles_create_histogram[HZ];
43031+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43032+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43033+atomic_unchecked_t cachefiles_create_histogram[HZ];
43034
43035 /*
43036 * display the latency histogram
43037@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
43038 return 0;
43039 default:
43040 index = (unsigned long) v - 3;
43041- x = atomic_read(&cachefiles_lookup_histogram[index]);
43042- y = atomic_read(&cachefiles_mkdir_histogram[index]);
43043- z = atomic_read(&cachefiles_create_histogram[index]);
43044+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
43045+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
43046+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
43047 if (x == 0 && y == 0 && z == 0)
43048 return 0;
43049
43050diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
43051index 0e3c092..818480e 100644
43052--- a/fs/cachefiles/rdwr.c
43053+++ b/fs/cachefiles/rdwr.c
43054@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
43055 old_fs = get_fs();
43056 set_fs(KERNEL_DS);
43057 ret = file->f_op->write(
43058- file, (const void __user *) data, len, &pos);
43059+ file, (const void __force_user *) data, len, &pos);
43060 set_fs(old_fs);
43061 kunmap(page);
43062 if (ret != len)
43063diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
43064index 3e8094b..cb3ff3d 100644
43065--- a/fs/ceph/dir.c
43066+++ b/fs/ceph/dir.c
43067@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
43068 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
43069 struct ceph_mds_client *mdsc = fsc->mdsc;
43070 unsigned frag = fpos_frag(filp->f_pos);
43071- int off = fpos_off(filp->f_pos);
43072+ unsigned int off = fpos_off(filp->f_pos);
43073 int err;
43074 u32 ftype;
43075 struct ceph_mds_reply_info_parsed *rinfo;
43076@@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
43077 if (nd &&
43078 (nd->flags & LOOKUP_OPEN) &&
43079 !(nd->intent.open.flags & O_CREAT)) {
43080- int mode = nd->intent.open.create_mode & ~current->fs->umask;
43081+ int mode = nd->intent.open.create_mode & ~current_umask();
43082 return ceph_lookup_open(dir, dentry, nd, mode, 1);
43083 }
43084
43085diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
43086index 2704646..c581c91 100644
43087--- a/fs/cifs/cifs_debug.c
43088+++ b/fs/cifs/cifs_debug.c
43089@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43090
43091 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
43092 #ifdef CONFIG_CIFS_STATS2
43093- atomic_set(&totBufAllocCount, 0);
43094- atomic_set(&totSmBufAllocCount, 0);
43095+ atomic_set_unchecked(&totBufAllocCount, 0);
43096+ atomic_set_unchecked(&totSmBufAllocCount, 0);
43097 #endif /* CONFIG_CIFS_STATS2 */
43098 spin_lock(&cifs_tcp_ses_lock);
43099 list_for_each(tmp1, &cifs_tcp_ses_list) {
43100@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43101 tcon = list_entry(tmp3,
43102 struct cifs_tcon,
43103 tcon_list);
43104- atomic_set(&tcon->num_smbs_sent, 0);
43105- atomic_set(&tcon->num_writes, 0);
43106- atomic_set(&tcon->num_reads, 0);
43107- atomic_set(&tcon->num_oplock_brks, 0);
43108- atomic_set(&tcon->num_opens, 0);
43109- atomic_set(&tcon->num_posixopens, 0);
43110- atomic_set(&tcon->num_posixmkdirs, 0);
43111- atomic_set(&tcon->num_closes, 0);
43112- atomic_set(&tcon->num_deletes, 0);
43113- atomic_set(&tcon->num_mkdirs, 0);
43114- atomic_set(&tcon->num_rmdirs, 0);
43115- atomic_set(&tcon->num_renames, 0);
43116- atomic_set(&tcon->num_t2renames, 0);
43117- atomic_set(&tcon->num_ffirst, 0);
43118- atomic_set(&tcon->num_fnext, 0);
43119- atomic_set(&tcon->num_fclose, 0);
43120- atomic_set(&tcon->num_hardlinks, 0);
43121- atomic_set(&tcon->num_symlinks, 0);
43122- atomic_set(&tcon->num_locks, 0);
43123+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
43124+ atomic_set_unchecked(&tcon->num_writes, 0);
43125+ atomic_set_unchecked(&tcon->num_reads, 0);
43126+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
43127+ atomic_set_unchecked(&tcon->num_opens, 0);
43128+ atomic_set_unchecked(&tcon->num_posixopens, 0);
43129+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
43130+ atomic_set_unchecked(&tcon->num_closes, 0);
43131+ atomic_set_unchecked(&tcon->num_deletes, 0);
43132+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
43133+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
43134+ atomic_set_unchecked(&tcon->num_renames, 0);
43135+ atomic_set_unchecked(&tcon->num_t2renames, 0);
43136+ atomic_set_unchecked(&tcon->num_ffirst, 0);
43137+ atomic_set_unchecked(&tcon->num_fnext, 0);
43138+ atomic_set_unchecked(&tcon->num_fclose, 0);
43139+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
43140+ atomic_set_unchecked(&tcon->num_symlinks, 0);
43141+ atomic_set_unchecked(&tcon->num_locks, 0);
43142 }
43143 }
43144 }
43145@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
43146 smBufAllocCount.counter, cifs_min_small);
43147 #ifdef CONFIG_CIFS_STATS2
43148 seq_printf(m, "Total Large %d Small %d Allocations\n",
43149- atomic_read(&totBufAllocCount),
43150- atomic_read(&totSmBufAllocCount));
43151+ atomic_read_unchecked(&totBufAllocCount),
43152+ atomic_read_unchecked(&totSmBufAllocCount));
43153 #endif /* CONFIG_CIFS_STATS2 */
43154
43155 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
43156@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
43157 if (tcon->need_reconnect)
43158 seq_puts(m, "\tDISCONNECTED ");
43159 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
43160- atomic_read(&tcon->num_smbs_sent),
43161- atomic_read(&tcon->num_oplock_brks));
43162+ atomic_read_unchecked(&tcon->num_smbs_sent),
43163+ atomic_read_unchecked(&tcon->num_oplock_brks));
43164 seq_printf(m, "\nReads: %d Bytes: %lld",
43165- atomic_read(&tcon->num_reads),
43166+ atomic_read_unchecked(&tcon->num_reads),
43167 (long long)(tcon->bytes_read));
43168 seq_printf(m, "\nWrites: %d Bytes: %lld",
43169- atomic_read(&tcon->num_writes),
43170+ atomic_read_unchecked(&tcon->num_writes),
43171 (long long)(tcon->bytes_written));
43172 seq_printf(m, "\nFlushes: %d",
43173- atomic_read(&tcon->num_flushes));
43174+ atomic_read_unchecked(&tcon->num_flushes));
43175 seq_printf(m, "\nLocks: %d HardLinks: %d "
43176 "Symlinks: %d",
43177- atomic_read(&tcon->num_locks),
43178- atomic_read(&tcon->num_hardlinks),
43179- atomic_read(&tcon->num_symlinks));
43180+ atomic_read_unchecked(&tcon->num_locks),
43181+ atomic_read_unchecked(&tcon->num_hardlinks),
43182+ atomic_read_unchecked(&tcon->num_symlinks));
43183 seq_printf(m, "\nOpens: %d Closes: %d "
43184 "Deletes: %d",
43185- atomic_read(&tcon->num_opens),
43186- atomic_read(&tcon->num_closes),
43187- atomic_read(&tcon->num_deletes));
43188+ atomic_read_unchecked(&tcon->num_opens),
43189+ atomic_read_unchecked(&tcon->num_closes),
43190+ atomic_read_unchecked(&tcon->num_deletes));
43191 seq_printf(m, "\nPosix Opens: %d "
43192 "Posix Mkdirs: %d",
43193- atomic_read(&tcon->num_posixopens),
43194- atomic_read(&tcon->num_posixmkdirs));
43195+ atomic_read_unchecked(&tcon->num_posixopens),
43196+ atomic_read_unchecked(&tcon->num_posixmkdirs));
43197 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
43198- atomic_read(&tcon->num_mkdirs),
43199- atomic_read(&tcon->num_rmdirs));
43200+ atomic_read_unchecked(&tcon->num_mkdirs),
43201+ atomic_read_unchecked(&tcon->num_rmdirs));
43202 seq_printf(m, "\nRenames: %d T2 Renames %d",
43203- atomic_read(&tcon->num_renames),
43204- atomic_read(&tcon->num_t2renames));
43205+ atomic_read_unchecked(&tcon->num_renames),
43206+ atomic_read_unchecked(&tcon->num_t2renames));
43207 seq_printf(m, "\nFindFirst: %d FNext %d "
43208 "FClose %d",
43209- atomic_read(&tcon->num_ffirst),
43210- atomic_read(&tcon->num_fnext),
43211- atomic_read(&tcon->num_fclose));
43212+ atomic_read_unchecked(&tcon->num_ffirst),
43213+ atomic_read_unchecked(&tcon->num_fnext),
43214+ atomic_read_unchecked(&tcon->num_fclose));
43215 }
43216 }
43217 }
43218diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
43219index 541ef81..a78deb8 100644
43220--- a/fs/cifs/cifsfs.c
43221+++ b/fs/cifs/cifsfs.c
43222@@ -985,7 +985,7 @@ cifs_init_request_bufs(void)
43223 cifs_req_cachep = kmem_cache_create("cifs_request",
43224 CIFSMaxBufSize +
43225 MAX_CIFS_HDR_SIZE, 0,
43226- SLAB_HWCACHE_ALIGN, NULL);
43227+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
43228 if (cifs_req_cachep == NULL)
43229 return -ENOMEM;
43230
43231@@ -1012,7 +1012,7 @@ cifs_init_request_bufs(void)
43232 efficient to alloc 1 per page off the slab compared to 17K (5page)
43233 alloc of large cifs buffers even when page debugging is on */
43234 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
43235- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
43236+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
43237 NULL);
43238 if (cifs_sm_req_cachep == NULL) {
43239 mempool_destroy(cifs_req_poolp);
43240@@ -1097,8 +1097,8 @@ init_cifs(void)
43241 atomic_set(&bufAllocCount, 0);
43242 atomic_set(&smBufAllocCount, 0);
43243 #ifdef CONFIG_CIFS_STATS2
43244- atomic_set(&totBufAllocCount, 0);
43245- atomic_set(&totSmBufAllocCount, 0);
43246+ atomic_set_unchecked(&totBufAllocCount, 0);
43247+ atomic_set_unchecked(&totSmBufAllocCount, 0);
43248 #endif /* CONFIG_CIFS_STATS2 */
43249
43250 atomic_set(&midCount, 0);
43251diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
43252index 73fea28..b996b84 100644
43253--- a/fs/cifs/cifsglob.h
43254+++ b/fs/cifs/cifsglob.h
43255@@ -439,28 +439,28 @@ struct cifs_tcon {
43256 __u16 Flags; /* optional support bits */
43257 enum statusEnum tidStatus;
43258 #ifdef CONFIG_CIFS_STATS
43259- atomic_t num_smbs_sent;
43260- atomic_t num_writes;
43261- atomic_t num_reads;
43262- atomic_t num_flushes;
43263- atomic_t num_oplock_brks;
43264- atomic_t num_opens;
43265- atomic_t num_closes;
43266- atomic_t num_deletes;
43267- atomic_t num_mkdirs;
43268- atomic_t num_posixopens;
43269- atomic_t num_posixmkdirs;
43270- atomic_t num_rmdirs;
43271- atomic_t num_renames;
43272- atomic_t num_t2renames;
43273- atomic_t num_ffirst;
43274- atomic_t num_fnext;
43275- atomic_t num_fclose;
43276- atomic_t num_hardlinks;
43277- atomic_t num_symlinks;
43278- atomic_t num_locks;
43279- atomic_t num_acl_get;
43280- atomic_t num_acl_set;
43281+ atomic_unchecked_t num_smbs_sent;
43282+ atomic_unchecked_t num_writes;
43283+ atomic_unchecked_t num_reads;
43284+ atomic_unchecked_t num_flushes;
43285+ atomic_unchecked_t num_oplock_brks;
43286+ atomic_unchecked_t num_opens;
43287+ atomic_unchecked_t num_closes;
43288+ atomic_unchecked_t num_deletes;
43289+ atomic_unchecked_t num_mkdirs;
43290+ atomic_unchecked_t num_posixopens;
43291+ atomic_unchecked_t num_posixmkdirs;
43292+ atomic_unchecked_t num_rmdirs;
43293+ atomic_unchecked_t num_renames;
43294+ atomic_unchecked_t num_t2renames;
43295+ atomic_unchecked_t num_ffirst;
43296+ atomic_unchecked_t num_fnext;
43297+ atomic_unchecked_t num_fclose;
43298+ atomic_unchecked_t num_hardlinks;
43299+ atomic_unchecked_t num_symlinks;
43300+ atomic_unchecked_t num_locks;
43301+ atomic_unchecked_t num_acl_get;
43302+ atomic_unchecked_t num_acl_set;
43303 #ifdef CONFIG_CIFS_STATS2
43304 unsigned long long time_writes;
43305 unsigned long long time_reads;
43306@@ -677,7 +677,7 @@ convert_delimiter(char *path, char delim)
43307 }
43308
43309 #ifdef CONFIG_CIFS_STATS
43310-#define cifs_stats_inc atomic_inc
43311+#define cifs_stats_inc atomic_inc_unchecked
43312
43313 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
43314 unsigned int bytes)
43315@@ -1036,8 +1036,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
43316 /* Various Debug counters */
43317 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
43318 #ifdef CONFIG_CIFS_STATS2
43319-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
43320-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
43321+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
43322+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
43323 #endif
43324 GLOBAL_EXTERN atomic_t smBufAllocCount;
43325 GLOBAL_EXTERN atomic_t midCount;
43326diff --git a/fs/cifs/link.c b/fs/cifs/link.c
43327index 6b0e064..94e6c3c 100644
43328--- a/fs/cifs/link.c
43329+++ b/fs/cifs/link.c
43330@@ -600,7 +600,7 @@ symlink_exit:
43331
43332 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
43333 {
43334- char *p = nd_get_link(nd);
43335+ const char *p = nd_get_link(nd);
43336 if (!IS_ERR(p))
43337 kfree(p);
43338 }
43339diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
43340index c29d1aa..58018da 100644
43341--- a/fs/cifs/misc.c
43342+++ b/fs/cifs/misc.c
43343@@ -156,7 +156,7 @@ cifs_buf_get(void)
43344 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
43345 atomic_inc(&bufAllocCount);
43346 #ifdef CONFIG_CIFS_STATS2
43347- atomic_inc(&totBufAllocCount);
43348+ atomic_inc_unchecked(&totBufAllocCount);
43349 #endif /* CONFIG_CIFS_STATS2 */
43350 }
43351
43352@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
43353 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
43354 atomic_inc(&smBufAllocCount);
43355 #ifdef CONFIG_CIFS_STATS2
43356- atomic_inc(&totSmBufAllocCount);
43357+ atomic_inc_unchecked(&totSmBufAllocCount);
43358 #endif /* CONFIG_CIFS_STATS2 */
43359
43360 }
43361diff --git a/fs/coda/cache.c b/fs/coda/cache.c
43362index 6901578..d402eb5 100644
43363--- a/fs/coda/cache.c
43364+++ b/fs/coda/cache.c
43365@@ -24,7 +24,7 @@
43366 #include "coda_linux.h"
43367 #include "coda_cache.h"
43368
43369-static atomic_t permission_epoch = ATOMIC_INIT(0);
43370+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
43371
43372 /* replace or extend an acl cache hit */
43373 void coda_cache_enter(struct inode *inode, int mask)
43374@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
43375 struct coda_inode_info *cii = ITOC(inode);
43376
43377 spin_lock(&cii->c_lock);
43378- cii->c_cached_epoch = atomic_read(&permission_epoch);
43379+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
43380 if (cii->c_uid != current_fsuid()) {
43381 cii->c_uid = current_fsuid();
43382 cii->c_cached_perm = mask;
43383@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
43384 {
43385 struct coda_inode_info *cii = ITOC(inode);
43386 spin_lock(&cii->c_lock);
43387- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
43388+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
43389 spin_unlock(&cii->c_lock);
43390 }
43391
43392 /* remove all acl caches */
43393 void coda_cache_clear_all(struct super_block *sb)
43394 {
43395- atomic_inc(&permission_epoch);
43396+ atomic_inc_unchecked(&permission_epoch);
43397 }
43398
43399
43400@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
43401 spin_lock(&cii->c_lock);
43402 hit = (mask & cii->c_cached_perm) == mask &&
43403 cii->c_uid == current_fsuid() &&
43404- cii->c_cached_epoch == atomic_read(&permission_epoch);
43405+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
43406 spin_unlock(&cii->c_lock);
43407
43408 return hit;
43409diff --git a/fs/compat.c b/fs/compat.c
43410index f2944ac..62845d2 100644
43411--- a/fs/compat.c
43412+++ b/fs/compat.c
43413@@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
43414
43415 set_fs(KERNEL_DS);
43416 /* The __user pointer cast is valid because of the set_fs() */
43417- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
43418+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
43419 set_fs(oldfs);
43420 /* truncating is ok because it's a user address */
43421 if (!ret)
43422@@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
43423 goto out;
43424
43425 ret = -EINVAL;
43426- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
43427+ if (nr_segs > UIO_MAXIOV)
43428 goto out;
43429 if (nr_segs > fast_segs) {
43430 ret = -ENOMEM;
43431@@ -831,6 +831,7 @@ struct compat_old_linux_dirent {
43432
43433 struct compat_readdir_callback {
43434 struct compat_old_linux_dirent __user *dirent;
43435+ struct file * file;
43436 int result;
43437 };
43438
43439@@ -848,6 +849,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
43440 buf->result = -EOVERFLOW;
43441 return -EOVERFLOW;
43442 }
43443+
43444+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43445+ return 0;
43446+
43447 buf->result++;
43448 dirent = buf->dirent;
43449 if (!access_ok(VERIFY_WRITE, dirent,
43450@@ -880,6 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
43451
43452 buf.result = 0;
43453 buf.dirent = dirent;
43454+ buf.file = file;
43455
43456 error = vfs_readdir(file, compat_fillonedir, &buf);
43457 if (buf.result)
43458@@ -900,6 +906,7 @@ struct compat_linux_dirent {
43459 struct compat_getdents_callback {
43460 struct compat_linux_dirent __user *current_dir;
43461 struct compat_linux_dirent __user *previous;
43462+ struct file * file;
43463 int count;
43464 int error;
43465 };
43466@@ -921,6 +928,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
43467 buf->error = -EOVERFLOW;
43468 return -EOVERFLOW;
43469 }
43470+
43471+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43472+ return 0;
43473+
43474 dirent = buf->previous;
43475 if (dirent) {
43476 if (__put_user(offset, &dirent->d_off))
43477@@ -968,6 +979,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
43478 buf.previous = NULL;
43479 buf.count = count;
43480 buf.error = 0;
43481+ buf.file = file;
43482
43483 error = vfs_readdir(file, compat_filldir, &buf);
43484 if (error >= 0)
43485@@ -989,6 +1001,7 @@ out:
43486 struct compat_getdents_callback64 {
43487 struct linux_dirent64 __user *current_dir;
43488 struct linux_dirent64 __user *previous;
43489+ struct file * file;
43490 int count;
43491 int error;
43492 };
43493@@ -1005,6 +1018,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
43494 buf->error = -EINVAL; /* only used if we fail.. */
43495 if (reclen > buf->count)
43496 return -EINVAL;
43497+
43498+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43499+ return 0;
43500+
43501 dirent = buf->previous;
43502
43503 if (dirent) {
43504@@ -1056,13 +1073,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
43505 buf.previous = NULL;
43506 buf.count = count;
43507 buf.error = 0;
43508+ buf.file = file;
43509
43510 error = vfs_readdir(file, compat_filldir64, &buf);
43511 if (error >= 0)
43512 error = buf.error;
43513 lastdirent = buf.previous;
43514 if (lastdirent) {
43515- typeof(lastdirent->d_off) d_off = file->f_pos;
43516+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
43517 if (__put_user_unaligned(d_off, &lastdirent->d_off))
43518 error = -EFAULT;
43519 else
43520diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
43521index 112e45a..b59845b 100644
43522--- a/fs/compat_binfmt_elf.c
43523+++ b/fs/compat_binfmt_elf.c
43524@@ -30,11 +30,13 @@
43525 #undef elf_phdr
43526 #undef elf_shdr
43527 #undef elf_note
43528+#undef elf_dyn
43529 #undef elf_addr_t
43530 #define elfhdr elf32_hdr
43531 #define elf_phdr elf32_phdr
43532 #define elf_shdr elf32_shdr
43533 #define elf_note elf32_note
43534+#define elf_dyn Elf32_Dyn
43535 #define elf_addr_t Elf32_Addr
43536
43537 /*
43538diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
43539index debdfe0..75d31d4 100644
43540--- a/fs/compat_ioctl.c
43541+++ b/fs/compat_ioctl.c
43542@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
43543
43544 err = get_user(palp, &up->palette);
43545 err |= get_user(length, &up->length);
43546+ if (err)
43547+ return -EFAULT;
43548
43549 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
43550 err = put_user(compat_ptr(palp), &up_native->palette);
43551@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
43552 return -EFAULT;
43553 if (__get_user(udata, &ss32->iomem_base))
43554 return -EFAULT;
43555- ss.iomem_base = compat_ptr(udata);
43556+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
43557 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
43558 __get_user(ss.port_high, &ss32->port_high))
43559 return -EFAULT;
43560@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
43561 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
43562 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
43563 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
43564- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43565+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43566 return -EFAULT;
43567
43568 return ioctl_preallocate(file, p);
43569@@ -1610,8 +1612,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
43570 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
43571 {
43572 unsigned int a, b;
43573- a = *(unsigned int *)p;
43574- b = *(unsigned int *)q;
43575+ a = *(const unsigned int *)p;
43576+ b = *(const unsigned int *)q;
43577 if (a > b)
43578 return 1;
43579 if (a < b)
43580diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
43581index 7e6c52d..94bc756 100644
43582--- a/fs/configfs/dir.c
43583+++ b/fs/configfs/dir.c
43584@@ -1564,7 +1564,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43585 }
43586 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
43587 struct configfs_dirent *next;
43588- const char * name;
43589+ const unsigned char * name;
43590+ char d_name[sizeof(next->s_dentry->d_iname)];
43591 int len;
43592 struct inode *inode = NULL;
43593
43594@@ -1574,7 +1575,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43595 continue;
43596
43597 name = configfs_get_name(next);
43598- len = strlen(name);
43599+ if (next->s_dentry && name == next->s_dentry->d_iname) {
43600+ len = next->s_dentry->d_name.len;
43601+ memcpy(d_name, name, len);
43602+ name = d_name;
43603+ } else
43604+ len = strlen(name);
43605
43606 /*
43607 * We'll have a dentry and an inode for
43608diff --git a/fs/dcache.c b/fs/dcache.c
43609index b80531c..8ca7e2d 100644
43610--- a/fs/dcache.c
43611+++ b/fs/dcache.c
43612@@ -3084,7 +3084,7 @@ void __init vfs_caches_init(unsigned long mempages)
43613 mempages -= reserve;
43614
43615 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
43616- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
43617+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
43618
43619 dcache_init();
43620 inode_init();
43621diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
43622index b80bc84..0d46d1a 100644
43623--- a/fs/debugfs/inode.c
43624+++ b/fs/debugfs/inode.c
43625@@ -408,7 +408,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
43626 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
43627 {
43628 return debugfs_create_file(name,
43629+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43630+ S_IFDIR | S_IRWXU,
43631+#else
43632 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43633+#endif
43634 parent, NULL, NULL);
43635 }
43636 EXPORT_SYMBOL_GPL(debugfs_create_dir);
43637diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
43638index ab35b11..b30af66 100644
43639--- a/fs/ecryptfs/inode.c
43640+++ b/fs/ecryptfs/inode.c
43641@@ -672,7 +672,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
43642 old_fs = get_fs();
43643 set_fs(get_ds());
43644 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
43645- (char __user *)lower_buf,
43646+ (char __force_user *)lower_buf,
43647 lower_bufsiz);
43648 set_fs(old_fs);
43649 if (rc < 0)
43650@@ -718,7 +718,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
43651 }
43652 old_fs = get_fs();
43653 set_fs(get_ds());
43654- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
43655+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
43656 set_fs(old_fs);
43657 if (rc < 0) {
43658 kfree(buf);
43659@@ -733,7 +733,7 @@ out:
43660 static void
43661 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
43662 {
43663- char *buf = nd_get_link(nd);
43664+ const char *buf = nd_get_link(nd);
43665 if (!IS_ERR(buf)) {
43666 /* Free the char* */
43667 kfree(buf);
43668diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
43669index c0038f6..47ab347 100644
43670--- a/fs/ecryptfs/miscdev.c
43671+++ b/fs/ecryptfs/miscdev.c
43672@@ -355,7 +355,7 @@ check_list:
43673 goto out_unlock_msg_ctx;
43674 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
43675 if (msg_ctx->msg) {
43676- if (copy_to_user(&buf[i], packet_length, packet_length_size))
43677+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
43678 goto out_unlock_msg_ctx;
43679 i += packet_length_size;
43680 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
43681diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
43682index b2a34a1..162fa69 100644
43683--- a/fs/ecryptfs/read_write.c
43684+++ b/fs/ecryptfs/read_write.c
43685@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
43686 return -EIO;
43687 fs_save = get_fs();
43688 set_fs(get_ds());
43689- rc = vfs_write(lower_file, data, size, &offset);
43690+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
43691 set_fs(fs_save);
43692 mark_inode_dirty_sync(ecryptfs_inode);
43693 return rc;
43694@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
43695 return -EIO;
43696 fs_save = get_fs();
43697 set_fs(get_ds());
43698- rc = vfs_read(lower_file, data, size, &offset);
43699+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
43700 set_fs(fs_save);
43701 return rc;
43702 }
43703diff --git a/fs/exec.c b/fs/exec.c
43704index 29e5f84..8bfc7cb 100644
43705--- a/fs/exec.c
43706+++ b/fs/exec.c
43707@@ -55,6 +55,15 @@
43708 #include <linux/pipe_fs_i.h>
43709 #include <linux/oom.h>
43710 #include <linux/compat.h>
43711+#include <linux/random.h>
43712+#include <linux/seq_file.h>
43713+
43714+#ifdef CONFIG_PAX_REFCOUNT
43715+#include <linux/kallsyms.h>
43716+#include <linux/kdebug.h>
43717+#endif
43718+
43719+#include <trace/events/fs.h>
43720
43721 #include <asm/uaccess.h>
43722 #include <asm/mmu_context.h>
43723@@ -66,6 +75,18 @@
43724
43725 #include <trace/events/sched.h>
43726
43727+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
43728+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
43729+{
43730+ WARN_ONCE(1, "PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
43731+}
43732+#endif
43733+
43734+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
43735+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
43736+EXPORT_SYMBOL(pax_set_initial_flags_func);
43737+#endif
43738+
43739 int core_uses_pid;
43740 char core_pattern[CORENAME_MAX_SIZE] = "core";
43741 unsigned int core_pipe_limit;
43742@@ -75,7 +96,7 @@ struct core_name {
43743 char *corename;
43744 int used, size;
43745 };
43746-static atomic_t call_count = ATOMIC_INIT(1);
43747+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
43748
43749 /* The maximal length of core_pattern is also specified in sysctl.c */
43750
43751@@ -191,18 +212,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43752 int write)
43753 {
43754 struct page *page;
43755- int ret;
43756
43757-#ifdef CONFIG_STACK_GROWSUP
43758- if (write) {
43759- ret = expand_downwards(bprm->vma, pos);
43760- if (ret < 0)
43761- return NULL;
43762- }
43763-#endif
43764- ret = get_user_pages(current, bprm->mm, pos,
43765- 1, write, 1, &page, NULL);
43766- if (ret <= 0)
43767+ if (0 > expand_downwards(bprm->vma, pos))
43768+ return NULL;
43769+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
43770 return NULL;
43771
43772 if (write) {
43773@@ -218,6 +231,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43774 if (size <= ARG_MAX)
43775 return page;
43776
43777+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43778+ // only allow 512KB for argv+env on suid/sgid binaries
43779+ // to prevent easy ASLR exhaustion
43780+ if (((bprm->cred->euid != current_euid()) ||
43781+ (bprm->cred->egid != current_egid())) &&
43782+ (size > (512 * 1024))) {
43783+ put_page(page);
43784+ return NULL;
43785+ }
43786+#endif
43787+
43788 /*
43789 * Limit to 1/4-th the stack size for the argv+env strings.
43790 * This ensures that:
43791@@ -277,6 +301,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43792 vma->vm_end = STACK_TOP_MAX;
43793 vma->vm_start = vma->vm_end - PAGE_SIZE;
43794 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
43795+
43796+#ifdef CONFIG_PAX_SEGMEXEC
43797+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
43798+#endif
43799+
43800 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
43801 INIT_LIST_HEAD(&vma->anon_vma_chain);
43802
43803@@ -291,6 +320,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43804 mm->stack_vm = mm->total_vm = 1;
43805 up_write(&mm->mmap_sem);
43806 bprm->p = vma->vm_end - sizeof(void *);
43807+
43808+#ifdef CONFIG_PAX_RANDUSTACK
43809+ if (randomize_va_space)
43810+ bprm->p ^= random32() & ~PAGE_MASK;
43811+#endif
43812+
43813 return 0;
43814 err:
43815 up_write(&mm->mmap_sem);
43816@@ -399,19 +434,7 @@ err:
43817 return err;
43818 }
43819
43820-struct user_arg_ptr {
43821-#ifdef CONFIG_COMPAT
43822- bool is_compat;
43823-#endif
43824- union {
43825- const char __user *const __user *native;
43826-#ifdef CONFIG_COMPAT
43827- compat_uptr_t __user *compat;
43828-#endif
43829- } ptr;
43830-};
43831-
43832-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43833+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43834 {
43835 const char __user *native;
43836
43837@@ -420,14 +443,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43838 compat_uptr_t compat;
43839
43840 if (get_user(compat, argv.ptr.compat + nr))
43841- return ERR_PTR(-EFAULT);
43842+ return (const char __force_user *)ERR_PTR(-EFAULT);
43843
43844 return compat_ptr(compat);
43845 }
43846 #endif
43847
43848 if (get_user(native, argv.ptr.native + nr))
43849- return ERR_PTR(-EFAULT);
43850+ return (const char __force_user *)ERR_PTR(-EFAULT);
43851
43852 return native;
43853 }
43854@@ -446,7 +469,7 @@ static int count(struct user_arg_ptr argv, int max)
43855 if (!p)
43856 break;
43857
43858- if (IS_ERR(p))
43859+ if (IS_ERR((const char __force_kernel *)p))
43860 return -EFAULT;
43861
43862 if (i++ >= max)
43863@@ -480,7 +503,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
43864
43865 ret = -EFAULT;
43866 str = get_user_arg_ptr(argv, argc);
43867- if (IS_ERR(str))
43868+ if (IS_ERR((const char __force_kernel *)str))
43869 goto out;
43870
43871 len = strnlen_user(str, MAX_ARG_STRLEN);
43872@@ -562,7 +585,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
43873 int r;
43874 mm_segment_t oldfs = get_fs();
43875 struct user_arg_ptr argv = {
43876- .ptr.native = (const char __user *const __user *)__argv,
43877+ .ptr.native = (const char __force_user *const __force_user *)__argv,
43878 };
43879
43880 set_fs(KERNEL_DS);
43881@@ -597,7 +620,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43882 unsigned long new_end = old_end - shift;
43883 struct mmu_gather tlb;
43884
43885- BUG_ON(new_start > new_end);
43886+ if (new_start >= new_end || new_start < mmap_min_addr)
43887+ return -ENOMEM;
43888
43889 /*
43890 * ensure there are no vmas between where we want to go
43891@@ -606,6 +630,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43892 if (vma != find_vma(mm, new_start))
43893 return -EFAULT;
43894
43895+#ifdef CONFIG_PAX_SEGMEXEC
43896+ BUG_ON(pax_find_mirror_vma(vma));
43897+#endif
43898+
43899 /*
43900 * cover the whole range: [new_start, old_end)
43901 */
43902@@ -686,10 +714,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43903 stack_top = arch_align_stack(stack_top);
43904 stack_top = PAGE_ALIGN(stack_top);
43905
43906- if (unlikely(stack_top < mmap_min_addr) ||
43907- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
43908- return -ENOMEM;
43909-
43910 stack_shift = vma->vm_end - stack_top;
43911
43912 bprm->p -= stack_shift;
43913@@ -701,8 +725,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
43914 bprm->exec -= stack_shift;
43915
43916 down_write(&mm->mmap_sem);
43917+
43918+ /* Move stack pages down in memory. */
43919+ if (stack_shift) {
43920+ ret = shift_arg_pages(vma, stack_shift);
43921+ if (ret)
43922+ goto out_unlock;
43923+ }
43924+
43925 vm_flags = VM_STACK_FLAGS;
43926
43927+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43928+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43929+ vm_flags &= ~VM_EXEC;
43930+
43931+#ifdef CONFIG_PAX_MPROTECT
43932+ if (mm->pax_flags & MF_PAX_MPROTECT)
43933+ vm_flags &= ~VM_MAYEXEC;
43934+#endif
43935+
43936+ }
43937+#endif
43938+
43939 /*
43940 * Adjust stack execute permissions; explicitly enable for
43941 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
43942@@ -721,13 +765,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43943 goto out_unlock;
43944 BUG_ON(prev != vma);
43945
43946- /* Move stack pages down in memory. */
43947- if (stack_shift) {
43948- ret = shift_arg_pages(vma, stack_shift);
43949- if (ret)
43950- goto out_unlock;
43951- }
43952-
43953 /* mprotect_fixup is overkill to remove the temporary stack flags */
43954 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
43955
43956@@ -785,6 +822,8 @@ struct file *open_exec(const char *name)
43957
43958 fsnotify_open(file);
43959
43960+ trace_open_exec(name);
43961+
43962 err = deny_write_access(file);
43963 if (err)
43964 goto exit;
43965@@ -808,7 +847,7 @@ int kernel_read(struct file *file, loff_t offset,
43966 old_fs = get_fs();
43967 set_fs(get_ds());
43968 /* The cast to a user pointer is valid due to the set_fs() */
43969- result = vfs_read(file, (void __user *)addr, count, &pos);
43970+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
43971 set_fs(old_fs);
43972 return result;
43973 }
43974@@ -1254,7 +1293,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
43975 }
43976 rcu_read_unlock();
43977
43978- if (p->fs->users > n_fs) {
43979+ if (atomic_read(&p->fs->users) > n_fs) {
43980 bprm->unsafe |= LSM_UNSAFE_SHARE;
43981 } else {
43982 res = -EAGAIN;
43983@@ -1451,6 +1490,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
43984
43985 EXPORT_SYMBOL(search_binary_handler);
43986
43987+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43988+static DEFINE_PER_CPU(u64, exec_counter);
43989+static int __init init_exec_counters(void)
43990+{
43991+ unsigned int cpu;
43992+
43993+ for_each_possible_cpu(cpu) {
43994+ per_cpu(exec_counter, cpu) = (u64)cpu;
43995+ }
43996+
43997+ return 0;
43998+}
43999+early_initcall(init_exec_counters);
44000+static inline void increment_exec_counter(void)
44001+{
44002+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
44003+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
44004+}
44005+#else
44006+static inline void increment_exec_counter(void) {}
44007+#endif
44008+
44009 /*
44010 * sys_execve() executes a new program.
44011 */
44012@@ -1459,6 +1520,11 @@ static int do_execve_common(const char *filename,
44013 struct user_arg_ptr envp,
44014 struct pt_regs *regs)
44015 {
44016+#ifdef CONFIG_GRKERNSEC
44017+ struct file *old_exec_file;
44018+ struct acl_subject_label *old_acl;
44019+ struct rlimit old_rlim[RLIM_NLIMITS];
44020+#endif
44021 struct linux_binprm *bprm;
44022 struct file *file;
44023 struct files_struct *displaced;
44024@@ -1466,6 +1532,8 @@ static int do_execve_common(const char *filename,
44025 int retval;
44026 const struct cred *cred = current_cred();
44027
44028+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
44029+
44030 /*
44031 * We move the actual failure in case of RLIMIT_NPROC excess from
44032 * set*uid() to execve() because too many poorly written programs
44033@@ -1506,12 +1574,27 @@ static int do_execve_common(const char *filename,
44034 if (IS_ERR(file))
44035 goto out_unmark;
44036
44037+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
44038+ retval = -EPERM;
44039+ goto out_file;
44040+ }
44041+
44042 sched_exec();
44043
44044 bprm->file = file;
44045 bprm->filename = filename;
44046 bprm->interp = filename;
44047
44048+ if (gr_process_user_ban()) {
44049+ retval = -EPERM;
44050+ goto out_file;
44051+ }
44052+
44053+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
44054+ retval = -EACCES;
44055+ goto out_file;
44056+ }
44057+
44058 retval = bprm_mm_init(bprm);
44059 if (retval)
44060 goto out_file;
44061@@ -1528,24 +1611,65 @@ static int do_execve_common(const char *filename,
44062 if (retval < 0)
44063 goto out;
44064
44065+#ifdef CONFIG_GRKERNSEC
44066+ old_acl = current->acl;
44067+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
44068+ old_exec_file = current->exec_file;
44069+ get_file(file);
44070+ current->exec_file = file;
44071+#endif
44072+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44073+ /* limit suid stack to 8MB
44074+ we saved the old limits above and will restore them if this exec fails
44075+ */
44076+ if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
44077+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
44078+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
44079+#endif
44080+
44081+ if (!gr_tpe_allow(file)) {
44082+ retval = -EACCES;
44083+ goto out_fail;
44084+ }
44085+
44086+ if (gr_check_crash_exec(file)) {
44087+ retval = -EACCES;
44088+ goto out_fail;
44089+ }
44090+
44091+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
44092+ bprm->unsafe);
44093+ if (retval < 0)
44094+ goto out_fail;
44095+
44096 retval = copy_strings_kernel(1, &bprm->filename, bprm);
44097 if (retval < 0)
44098- goto out;
44099+ goto out_fail;
44100
44101 bprm->exec = bprm->p;
44102 retval = copy_strings(bprm->envc, envp, bprm);
44103 if (retval < 0)
44104- goto out;
44105+ goto out_fail;
44106
44107 retval = copy_strings(bprm->argc, argv, bprm);
44108 if (retval < 0)
44109- goto out;
44110+ goto out_fail;
44111+
44112+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
44113+
44114+ gr_handle_exec_args(bprm, argv);
44115
44116 retval = search_binary_handler(bprm,regs);
44117 if (retval < 0)
44118- goto out;
44119+ goto out_fail;
44120+#ifdef CONFIG_GRKERNSEC
44121+ if (old_exec_file)
44122+ fput(old_exec_file);
44123+#endif
44124
44125 /* execve succeeded */
44126+
44127+ increment_exec_counter();
44128 current->fs->in_exec = 0;
44129 current->in_execve = 0;
44130 acct_update_integrals(current);
44131@@ -1554,6 +1678,14 @@ static int do_execve_common(const char *filename,
44132 put_files_struct(displaced);
44133 return retval;
44134
44135+out_fail:
44136+#ifdef CONFIG_GRKERNSEC
44137+ current->acl = old_acl;
44138+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
44139+ fput(current->exec_file);
44140+ current->exec_file = old_exec_file;
44141+#endif
44142+
44143 out:
44144 if (bprm->mm) {
44145 acct_arg_size(bprm, 0);
44146@@ -1627,7 +1759,7 @@ static int expand_corename(struct core_name *cn)
44147 {
44148 char *old_corename = cn->corename;
44149
44150- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
44151+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
44152 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
44153
44154 if (!cn->corename) {
44155@@ -1724,7 +1856,7 @@ static int format_corename(struct core_name *cn, long signr)
44156 int pid_in_pattern = 0;
44157 int err = 0;
44158
44159- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
44160+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
44161 cn->corename = kmalloc(cn->size, GFP_KERNEL);
44162 cn->used = 0;
44163
44164@@ -1821,6 +1953,250 @@ out:
44165 return ispipe;
44166 }
44167
44168+int pax_check_flags(unsigned long *flags)
44169+{
44170+ int retval = 0;
44171+
44172+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
44173+ if (*flags & MF_PAX_SEGMEXEC)
44174+ {
44175+ *flags &= ~MF_PAX_SEGMEXEC;
44176+ retval = -EINVAL;
44177+ }
44178+#endif
44179+
44180+ if ((*flags & MF_PAX_PAGEEXEC)
44181+
44182+#ifdef CONFIG_PAX_PAGEEXEC
44183+ && (*flags & MF_PAX_SEGMEXEC)
44184+#endif
44185+
44186+ )
44187+ {
44188+ *flags &= ~MF_PAX_PAGEEXEC;
44189+ retval = -EINVAL;
44190+ }
44191+
44192+ if ((*flags & MF_PAX_MPROTECT)
44193+
44194+#ifdef CONFIG_PAX_MPROTECT
44195+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44196+#endif
44197+
44198+ )
44199+ {
44200+ *flags &= ~MF_PAX_MPROTECT;
44201+ retval = -EINVAL;
44202+ }
44203+
44204+ if ((*flags & MF_PAX_EMUTRAMP)
44205+
44206+#ifdef CONFIG_PAX_EMUTRAMP
44207+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44208+#endif
44209+
44210+ )
44211+ {
44212+ *flags &= ~MF_PAX_EMUTRAMP;
44213+ retval = -EINVAL;
44214+ }
44215+
44216+ return retval;
44217+}
44218+
44219+EXPORT_SYMBOL(pax_check_flags);
44220+
44221+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44222+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
44223+{
44224+ struct task_struct *tsk = current;
44225+ struct mm_struct *mm = current->mm;
44226+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
44227+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
44228+ char *path_exec = NULL;
44229+ char *path_fault = NULL;
44230+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
44231+
44232+ if (buffer_exec && buffer_fault) {
44233+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
44234+
44235+ down_read(&mm->mmap_sem);
44236+ vma = mm->mmap;
44237+ while (vma && (!vma_exec || !vma_fault)) {
44238+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
44239+ vma_exec = vma;
44240+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
44241+ vma_fault = vma;
44242+ vma = vma->vm_next;
44243+ }
44244+ if (vma_exec) {
44245+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
44246+ if (IS_ERR(path_exec))
44247+ path_exec = "<path too long>";
44248+ else {
44249+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
44250+ if (path_exec) {
44251+ *path_exec = 0;
44252+ path_exec = buffer_exec;
44253+ } else
44254+ path_exec = "<path too long>";
44255+ }
44256+ }
44257+ if (vma_fault) {
44258+ start = vma_fault->vm_start;
44259+ end = vma_fault->vm_end;
44260+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
44261+ if (vma_fault->vm_file) {
44262+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
44263+ if (IS_ERR(path_fault))
44264+ path_fault = "<path too long>";
44265+ else {
44266+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
44267+ if (path_fault) {
44268+ *path_fault = 0;
44269+ path_fault = buffer_fault;
44270+ } else
44271+ path_fault = "<path too long>";
44272+ }
44273+ } else
44274+ path_fault = "<anonymous mapping>";
44275+ }
44276+ up_read(&mm->mmap_sem);
44277+ }
44278+ if (tsk->signal->curr_ip)
44279+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
44280+ else
44281+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
44282+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
44283+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
44284+ task_uid(tsk), task_euid(tsk), pc, sp);
44285+ free_page((unsigned long)buffer_exec);
44286+ free_page((unsigned long)buffer_fault);
44287+ pax_report_insns(regs, pc, sp);
44288+ do_coredump(SIGKILL, SIGKILL, regs);
44289+}
44290+#endif
44291+
44292+#ifdef CONFIG_PAX_REFCOUNT
44293+void pax_report_refcount_overflow(struct pt_regs *regs)
44294+{
44295+ if (current->signal->curr_ip)
44296+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44297+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
44298+ else
44299+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44300+ current->comm, task_pid_nr(current), current_uid(), current_euid());
44301+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
44302+ show_regs(regs);
44303+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
44304+}
44305+#endif
44306+
44307+#ifdef CONFIG_PAX_USERCOPY
44308+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
44309+static noinline int check_stack_object(const void *obj, unsigned long len)
44310+{
44311+ const void * const stack = task_stack_page(current);
44312+ const void * const stackend = stack + THREAD_SIZE;
44313+
44314+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44315+ const void *frame = NULL;
44316+ const void *oldframe;
44317+#endif
44318+
44319+ if (obj + len < obj)
44320+ return -1;
44321+
44322+ if (obj + len <= stack || stackend <= obj)
44323+ return 0;
44324+
44325+ if (obj < stack || stackend < obj + len)
44326+ return -1;
44327+
44328+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44329+ oldframe = __builtin_frame_address(1);
44330+ if (oldframe)
44331+ frame = __builtin_frame_address(2);
44332+ /*
44333+ low ----------------------------------------------> high
44334+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
44335+ ^----------------^
44336+ allow copies only within here
44337+ */
44338+ while (stack <= frame && frame < stackend) {
44339+ /* if obj + len extends past the last frame, this
44340+ check won't pass and the next frame will be 0,
44341+ causing us to bail out and correctly report
44342+ the copy as invalid
44343+ */
44344+ if (obj + len <= frame)
44345+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
44346+ oldframe = frame;
44347+ frame = *(const void * const *)frame;
44348+ }
44349+ return -1;
44350+#else
44351+ return 1;
44352+#endif
44353+}
44354+
44355+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
44356+{
44357+ if (current->signal->curr_ip)
44358+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44359+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44360+ else
44361+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44362+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44363+ dump_stack();
44364+ gr_handle_kernel_exploit();
44365+ do_group_exit(SIGKILL);
44366+}
44367+#endif
44368+
44369+void check_object_size(const void *ptr, unsigned long n, bool to)
44370+{
44371+
44372+#ifdef CONFIG_PAX_USERCOPY
44373+ const char *type;
44374+
44375+ if (!n)
44376+ return;
44377+
44378+ type = check_heap_object(ptr, n, to);
44379+ if (!type) {
44380+ if (check_stack_object(ptr, n) != -1)
44381+ return;
44382+ type = "<process stack>";
44383+ }
44384+
44385+ pax_report_usercopy(ptr, n, to, type);
44386+#endif
44387+
44388+}
44389+EXPORT_SYMBOL(check_object_size);
44390+
44391+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
44392+void pax_track_stack(void)
44393+{
44394+ unsigned long sp = (unsigned long)&sp;
44395+ if (sp < current_thread_info()->lowest_stack &&
44396+ sp > (unsigned long)task_stack_page(current))
44397+ current_thread_info()->lowest_stack = sp;
44398+}
44399+EXPORT_SYMBOL(pax_track_stack);
44400+#endif
44401+
44402+#ifdef CONFIG_PAX_SIZE_OVERFLOW
44403+void report_size_overflow(const char *file, unsigned int line, const char *func)
44404+{
44405+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line);
44406+ dump_stack();
44407+ do_group_exit(SIGKILL);
44408+}
44409+EXPORT_SYMBOL(report_size_overflow);
44410+#endif
44411+
44412 static int zap_process(struct task_struct *start, int exit_code)
44413 {
44414 struct task_struct *t;
44415@@ -2018,17 +2394,17 @@ static void wait_for_dump_helpers(struct file *file)
44416 pipe = file->f_path.dentry->d_inode->i_pipe;
44417
44418 pipe_lock(pipe);
44419- pipe->readers++;
44420- pipe->writers--;
44421+ atomic_inc(&pipe->readers);
44422+ atomic_dec(&pipe->writers);
44423
44424- while ((pipe->readers > 1) && (!signal_pending(current))) {
44425+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
44426 wake_up_interruptible_sync(&pipe->wait);
44427 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44428 pipe_wait(pipe);
44429 }
44430
44431- pipe->readers--;
44432- pipe->writers++;
44433+ atomic_dec(&pipe->readers);
44434+ atomic_inc(&pipe->writers);
44435 pipe_unlock(pipe);
44436
44437 }
44438@@ -2089,7 +2465,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44439 int retval = 0;
44440 int flag = 0;
44441 int ispipe;
44442- static atomic_t core_dump_count = ATOMIC_INIT(0);
44443+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
44444 struct coredump_params cprm = {
44445 .signr = signr,
44446 .regs = regs,
44447@@ -2104,6 +2480,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44448
44449 audit_core_dumps(signr);
44450
44451+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
44452+ gr_handle_brute_attach(current, cprm.mm_flags);
44453+
44454 binfmt = mm->binfmt;
44455 if (!binfmt || !binfmt->core_dump)
44456 goto fail;
44457@@ -2171,7 +2550,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44458 }
44459 cprm.limit = RLIM_INFINITY;
44460
44461- dump_count = atomic_inc_return(&core_dump_count);
44462+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
44463 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
44464 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
44465 task_tgid_vnr(current), current->comm);
44466@@ -2198,6 +2577,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44467 } else {
44468 struct inode *inode;
44469
44470+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
44471+
44472 if (cprm.limit < binfmt->min_coredump)
44473 goto fail_unlock;
44474
44475@@ -2241,7 +2622,7 @@ close_fail:
44476 filp_close(cprm.file, NULL);
44477 fail_dropcount:
44478 if (ispipe)
44479- atomic_dec(&core_dump_count);
44480+ atomic_dec_unchecked(&core_dump_count);
44481 fail_unlock:
44482 kfree(cn.corename);
44483 fail_corename:
44484@@ -2260,7 +2641,7 @@ fail:
44485 */
44486 int dump_write(struct file *file, const void *addr, int nr)
44487 {
44488- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
44489+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
44490 }
44491 EXPORT_SYMBOL(dump_write);
44492
44493diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
44494index a8cbe1b..fed04cb 100644
44495--- a/fs/ext2/balloc.c
44496+++ b/fs/ext2/balloc.c
44497@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
44498
44499 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44500 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44501- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44502+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44503 sbi->s_resuid != current_fsuid() &&
44504 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44505 return 0;
44506diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
44507index baac1b1..1499b62 100644
44508--- a/fs/ext3/balloc.c
44509+++ b/fs/ext3/balloc.c
44510@@ -1438,9 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
44511
44512 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44513 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44514- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44515+ if (free_blocks < root_blocks + 1 &&
44516 !use_reservation && sbi->s_resuid != current_fsuid() &&
44517- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44518+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
44519+ !capable_nolog(CAP_SYS_RESOURCE)) {
44520 return 0;
44521 }
44522 return 1;
44523diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
44524index 8da837b..ed3835b 100644
44525--- a/fs/ext4/balloc.c
44526+++ b/fs/ext4/balloc.c
44527@@ -463,8 +463,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
44528 /* Hm, nope. Are (enough) root reserved clusters available? */
44529 if (sbi->s_resuid == current_fsuid() ||
44530 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
44531- capable(CAP_SYS_RESOURCE) ||
44532- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
44533+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
44534+ capable_nolog(CAP_SYS_RESOURCE)) {
44535
44536 if (free_clusters >= (nclusters + dirty_clusters))
44537 return 1;
44538diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
44539index 0e01e90..ae2bd5e 100644
44540--- a/fs/ext4/ext4.h
44541+++ b/fs/ext4/ext4.h
44542@@ -1225,19 +1225,19 @@ struct ext4_sb_info {
44543 unsigned long s_mb_last_start;
44544
44545 /* stats for buddy allocator */
44546- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
44547- atomic_t s_bal_success; /* we found long enough chunks */
44548- atomic_t s_bal_allocated; /* in blocks */
44549- atomic_t s_bal_ex_scanned; /* total extents scanned */
44550- atomic_t s_bal_goals; /* goal hits */
44551- atomic_t s_bal_breaks; /* too long searches */
44552- atomic_t s_bal_2orders; /* 2^order hits */
44553+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
44554+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
44555+ atomic_unchecked_t s_bal_allocated; /* in blocks */
44556+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
44557+ atomic_unchecked_t s_bal_goals; /* goal hits */
44558+ atomic_unchecked_t s_bal_breaks; /* too long searches */
44559+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
44560 spinlock_t s_bal_lock;
44561 unsigned long s_mb_buddies_generated;
44562 unsigned long long s_mb_generation_time;
44563- atomic_t s_mb_lost_chunks;
44564- atomic_t s_mb_preallocated;
44565- atomic_t s_mb_discarded;
44566+ atomic_unchecked_t s_mb_lost_chunks;
44567+ atomic_unchecked_t s_mb_preallocated;
44568+ atomic_unchecked_t s_mb_discarded;
44569 atomic_t s_lock_busy;
44570
44571 /* locality groups */
44572diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
44573index 1365903..9727522 100644
44574--- a/fs/ext4/ioctl.c
44575+++ b/fs/ext4/ioctl.c
44576@@ -261,7 +261,6 @@ group_extend_out:
44577 err = ext4_move_extents(filp, donor_filp, me.orig_start,
44578 me.donor_start, me.len, &me.moved_len);
44579 mnt_drop_write_file(filp);
44580- mnt_drop_write(filp->f_path.mnt);
44581
44582 if (copy_to_user((struct move_extent __user *)arg,
44583 &me, sizeof(me)))
44584diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
44585index 6b0a57e..1955a44 100644
44586--- a/fs/ext4/mballoc.c
44587+++ b/fs/ext4/mballoc.c
44588@@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
44589 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
44590
44591 if (EXT4_SB(sb)->s_mb_stats)
44592- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
44593+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
44594
44595 break;
44596 }
44597@@ -2041,7 +2041,7 @@ repeat:
44598 ac->ac_status = AC_STATUS_CONTINUE;
44599 ac->ac_flags |= EXT4_MB_HINT_FIRST;
44600 cr = 3;
44601- atomic_inc(&sbi->s_mb_lost_chunks);
44602+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
44603 goto repeat;
44604 }
44605 }
44606@@ -2545,25 +2545,25 @@ int ext4_mb_release(struct super_block *sb)
44607 if (sbi->s_mb_stats) {
44608 ext4_msg(sb, KERN_INFO,
44609 "mballoc: %u blocks %u reqs (%u success)",
44610- atomic_read(&sbi->s_bal_allocated),
44611- atomic_read(&sbi->s_bal_reqs),
44612- atomic_read(&sbi->s_bal_success));
44613+ atomic_read_unchecked(&sbi->s_bal_allocated),
44614+ atomic_read_unchecked(&sbi->s_bal_reqs),
44615+ atomic_read_unchecked(&sbi->s_bal_success));
44616 ext4_msg(sb, KERN_INFO,
44617 "mballoc: %u extents scanned, %u goal hits, "
44618 "%u 2^N hits, %u breaks, %u lost",
44619- atomic_read(&sbi->s_bal_ex_scanned),
44620- atomic_read(&sbi->s_bal_goals),
44621- atomic_read(&sbi->s_bal_2orders),
44622- atomic_read(&sbi->s_bal_breaks),
44623- atomic_read(&sbi->s_mb_lost_chunks));
44624+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
44625+ atomic_read_unchecked(&sbi->s_bal_goals),
44626+ atomic_read_unchecked(&sbi->s_bal_2orders),
44627+ atomic_read_unchecked(&sbi->s_bal_breaks),
44628+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
44629 ext4_msg(sb, KERN_INFO,
44630 "mballoc: %lu generated and it took %Lu",
44631 sbi->s_mb_buddies_generated,
44632 sbi->s_mb_generation_time);
44633 ext4_msg(sb, KERN_INFO,
44634 "mballoc: %u preallocated, %u discarded",
44635- atomic_read(&sbi->s_mb_preallocated),
44636- atomic_read(&sbi->s_mb_discarded));
44637+ atomic_read_unchecked(&sbi->s_mb_preallocated),
44638+ atomic_read_unchecked(&sbi->s_mb_discarded));
44639 }
44640
44641 free_percpu(sbi->s_locality_groups);
44642@@ -3045,16 +3045,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
44643 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
44644
44645 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
44646- atomic_inc(&sbi->s_bal_reqs);
44647- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44648+ atomic_inc_unchecked(&sbi->s_bal_reqs);
44649+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44650 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
44651- atomic_inc(&sbi->s_bal_success);
44652- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
44653+ atomic_inc_unchecked(&sbi->s_bal_success);
44654+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
44655 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
44656 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
44657- atomic_inc(&sbi->s_bal_goals);
44658+ atomic_inc_unchecked(&sbi->s_bal_goals);
44659 if (ac->ac_found > sbi->s_mb_max_to_scan)
44660- atomic_inc(&sbi->s_bal_breaks);
44661+ atomic_inc_unchecked(&sbi->s_bal_breaks);
44662 }
44663
44664 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
44665@@ -3458,7 +3458,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
44666 trace_ext4_mb_new_inode_pa(ac, pa);
44667
44668 ext4_mb_use_inode_pa(ac, pa);
44669- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
44670+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
44671
44672 ei = EXT4_I(ac->ac_inode);
44673 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44674@@ -3518,7 +3518,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
44675 trace_ext4_mb_new_group_pa(ac, pa);
44676
44677 ext4_mb_use_group_pa(ac, pa);
44678- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44679+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44680
44681 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44682 lg = ac->ac_lg;
44683@@ -3607,7 +3607,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
44684 * from the bitmap and continue.
44685 */
44686 }
44687- atomic_add(free, &sbi->s_mb_discarded);
44688+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
44689
44690 return err;
44691 }
44692@@ -3625,7 +3625,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
44693 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
44694 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
44695 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
44696- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44697+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44698 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
44699
44700 return 0;
44701diff --git a/fs/fcntl.c b/fs/fcntl.c
44702index 75e7c1f..1eb3e4d 100644
44703--- a/fs/fcntl.c
44704+++ b/fs/fcntl.c
44705@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
44706 if (err)
44707 return err;
44708
44709+ if (gr_handle_chroot_fowner(pid, type))
44710+ return -ENOENT;
44711+ if (gr_check_protected_task_fowner(pid, type))
44712+ return -EACCES;
44713+
44714 f_modown(filp, pid, type, force);
44715 return 0;
44716 }
44717@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
44718
44719 static int f_setown_ex(struct file *filp, unsigned long arg)
44720 {
44721- struct f_owner_ex * __user owner_p = (void * __user)arg;
44722+ struct f_owner_ex __user *owner_p = (void __user *)arg;
44723 struct f_owner_ex owner;
44724 struct pid *pid;
44725 int type;
44726@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
44727
44728 static int f_getown_ex(struct file *filp, unsigned long arg)
44729 {
44730- struct f_owner_ex * __user owner_p = (void * __user)arg;
44731+ struct f_owner_ex __user *owner_p = (void __user *)arg;
44732 struct f_owner_ex owner;
44733 int ret = 0;
44734
44735@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
44736 switch (cmd) {
44737 case F_DUPFD:
44738 case F_DUPFD_CLOEXEC:
44739+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
44740 if (arg >= rlimit(RLIMIT_NOFILE))
44741 break;
44742 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
44743diff --git a/fs/fifo.c b/fs/fifo.c
44744index cf6f434..3d7942c 100644
44745--- a/fs/fifo.c
44746+++ b/fs/fifo.c
44747@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
44748 */
44749 filp->f_op = &read_pipefifo_fops;
44750 pipe->r_counter++;
44751- if (pipe->readers++ == 0)
44752+ if (atomic_inc_return(&pipe->readers) == 1)
44753 wake_up_partner(inode);
44754
44755- if (!pipe->writers) {
44756+ if (!atomic_read(&pipe->writers)) {
44757 if ((filp->f_flags & O_NONBLOCK)) {
44758 /* suppress POLLHUP until we have
44759 * seen a writer */
44760@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
44761 * errno=ENXIO when there is no process reading the FIFO.
44762 */
44763 ret = -ENXIO;
44764- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
44765+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
44766 goto err;
44767
44768 filp->f_op = &write_pipefifo_fops;
44769 pipe->w_counter++;
44770- if (!pipe->writers++)
44771+ if (atomic_inc_return(&pipe->writers) == 1)
44772 wake_up_partner(inode);
44773
44774- if (!pipe->readers) {
44775+ if (!atomic_read(&pipe->readers)) {
44776 if (wait_for_partner(inode, &pipe->r_counter))
44777 goto err_wr;
44778 }
44779@@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
44780 */
44781 filp->f_op = &rdwr_pipefifo_fops;
44782
44783- pipe->readers++;
44784- pipe->writers++;
44785+ atomic_inc(&pipe->readers);
44786+ atomic_inc(&pipe->writers);
44787 pipe->r_counter++;
44788 pipe->w_counter++;
44789- if (pipe->readers == 1 || pipe->writers == 1)
44790+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
44791 wake_up_partner(inode);
44792 break;
44793
44794@@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
44795 return 0;
44796
44797 err_rd:
44798- if (!--pipe->readers)
44799+ if (atomic_dec_and_test(&pipe->readers))
44800 wake_up_interruptible(&pipe->wait);
44801 ret = -ERESTARTSYS;
44802 goto err;
44803
44804 err_wr:
44805- if (!--pipe->writers)
44806+ if (atomic_dec_and_test(&pipe->writers))
44807 wake_up_interruptible(&pipe->wait);
44808 ret = -ERESTARTSYS;
44809 goto err;
44810
44811 err:
44812- if (!pipe->readers && !pipe->writers)
44813+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
44814 free_pipe_info(inode);
44815
44816 err_nocleanup:
44817diff --git a/fs/file.c b/fs/file.c
44818index ba3f605..fade102 100644
44819--- a/fs/file.c
44820+++ b/fs/file.c
44821@@ -15,6 +15,7 @@
44822 #include <linux/slab.h>
44823 #include <linux/vmalloc.h>
44824 #include <linux/file.h>
44825+#include <linux/security.h>
44826 #include <linux/fdtable.h>
44827 #include <linux/bitops.h>
44828 #include <linux/interrupt.h>
44829@@ -255,6 +256,7 @@ int expand_files(struct files_struct *files, int nr)
44830 * N.B. For clone tasks sharing a files structure, this test
44831 * will limit the total number of files that can be opened.
44832 */
44833+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
44834 if (nr >= rlimit(RLIMIT_NOFILE))
44835 return -EMFILE;
44836
44837diff --git a/fs/filesystems.c b/fs/filesystems.c
44838index 96f2428..f5eeb8e 100644
44839--- a/fs/filesystems.c
44840+++ b/fs/filesystems.c
44841@@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
44842 int len = dot ? dot - name : strlen(name);
44843
44844 fs = __get_fs_type(name, len);
44845+
44846+#ifdef CONFIG_GRKERNSEC_MODHARDEN
44847+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
44848+#else
44849 if (!fs && (request_module("%.*s", len, name) == 0))
44850+#endif
44851 fs = __get_fs_type(name, len);
44852
44853 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
44854diff --git a/fs/fs_struct.c b/fs/fs_struct.c
44855index e159e68..e7d2a6f 100644
44856--- a/fs/fs_struct.c
44857+++ b/fs/fs_struct.c
44858@@ -4,6 +4,7 @@
44859 #include <linux/path.h>
44860 #include <linux/slab.h>
44861 #include <linux/fs_struct.h>
44862+#include <linux/grsecurity.h>
44863 #include "internal.h"
44864
44865 static inline void path_get_longterm(struct path *path)
44866@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
44867 write_seqcount_begin(&fs->seq);
44868 old_root = fs->root;
44869 fs->root = *path;
44870+ gr_set_chroot_entries(current, path);
44871 write_seqcount_end(&fs->seq);
44872 spin_unlock(&fs->lock);
44873 if (old_root.dentry)
44874@@ -65,6 +67,17 @@ static inline int replace_path(struct path *p, const struct path *old, const str
44875 return 1;
44876 }
44877
44878+static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
44879+{
44880+ if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
44881+ return 0;
44882+ *p = *new;
44883+
44884+ gr_set_chroot_entries(task, new);
44885+
44886+ return 1;
44887+}
44888+
44889 void chroot_fs_refs(struct path *old_root, struct path *new_root)
44890 {
44891 struct task_struct *g, *p;
44892@@ -79,7 +92,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
44893 int hits = 0;
44894 spin_lock(&fs->lock);
44895 write_seqcount_begin(&fs->seq);
44896- hits += replace_path(&fs->root, old_root, new_root);
44897+ hits += replace_root_path(p, &fs->root, old_root, new_root);
44898 hits += replace_path(&fs->pwd, old_root, new_root);
44899 write_seqcount_end(&fs->seq);
44900 while (hits--) {
44901@@ -111,7 +124,8 @@ void exit_fs(struct task_struct *tsk)
44902 task_lock(tsk);
44903 spin_lock(&fs->lock);
44904 tsk->fs = NULL;
44905- kill = !--fs->users;
44906+ gr_clear_chroot_entries(tsk);
44907+ kill = !atomic_dec_return(&fs->users);
44908 spin_unlock(&fs->lock);
44909 task_unlock(tsk);
44910 if (kill)
44911@@ -124,7 +138,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44912 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
44913 /* We don't need to lock fs - think why ;-) */
44914 if (fs) {
44915- fs->users = 1;
44916+ atomic_set(&fs->users, 1);
44917 fs->in_exec = 0;
44918 spin_lock_init(&fs->lock);
44919 seqcount_init(&fs->seq);
44920@@ -133,6 +147,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44921 spin_lock(&old->lock);
44922 fs->root = old->root;
44923 path_get_longterm(&fs->root);
44924+ /* instead of calling gr_set_chroot_entries here,
44925+ we call it from every caller of this function
44926+ */
44927 fs->pwd = old->pwd;
44928 path_get_longterm(&fs->pwd);
44929 spin_unlock(&old->lock);
44930@@ -151,8 +168,9 @@ int unshare_fs_struct(void)
44931
44932 task_lock(current);
44933 spin_lock(&fs->lock);
44934- kill = !--fs->users;
44935+ kill = !atomic_dec_return(&fs->users);
44936 current->fs = new_fs;
44937+ gr_set_chroot_entries(current, &new_fs->root);
44938 spin_unlock(&fs->lock);
44939 task_unlock(current);
44940
44941@@ -165,13 +183,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
44942
44943 int current_umask(void)
44944 {
44945- return current->fs->umask;
44946+ return current->fs->umask | gr_acl_umask();
44947 }
44948 EXPORT_SYMBOL(current_umask);
44949
44950 /* to be mentioned only in INIT_TASK */
44951 struct fs_struct init_fs = {
44952- .users = 1,
44953+ .users = ATOMIC_INIT(1),
44954 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
44955 .seq = SEQCNT_ZERO,
44956 .umask = 0022,
44957@@ -187,12 +205,13 @@ void daemonize_fs_struct(void)
44958 task_lock(current);
44959
44960 spin_lock(&init_fs.lock);
44961- init_fs.users++;
44962+ atomic_inc(&init_fs.users);
44963 spin_unlock(&init_fs.lock);
44964
44965 spin_lock(&fs->lock);
44966 current->fs = &init_fs;
44967- kill = !--fs->users;
44968+ gr_set_chroot_entries(current, &current->fs->root);
44969+ kill = !atomic_dec_return(&fs->users);
44970 spin_unlock(&fs->lock);
44971
44972 task_unlock(current);
44973diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
44974index 9905350..02eaec4 100644
44975--- a/fs/fscache/cookie.c
44976+++ b/fs/fscache/cookie.c
44977@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
44978 parent ? (char *) parent->def->name : "<no-parent>",
44979 def->name, netfs_data);
44980
44981- fscache_stat(&fscache_n_acquires);
44982+ fscache_stat_unchecked(&fscache_n_acquires);
44983
44984 /* if there's no parent cookie, then we don't create one here either */
44985 if (!parent) {
44986- fscache_stat(&fscache_n_acquires_null);
44987+ fscache_stat_unchecked(&fscache_n_acquires_null);
44988 _leave(" [no parent]");
44989 return NULL;
44990 }
44991@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
44992 /* allocate and initialise a cookie */
44993 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
44994 if (!cookie) {
44995- fscache_stat(&fscache_n_acquires_oom);
44996+ fscache_stat_unchecked(&fscache_n_acquires_oom);
44997 _leave(" [ENOMEM]");
44998 return NULL;
44999 }
45000@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
45001
45002 switch (cookie->def->type) {
45003 case FSCACHE_COOKIE_TYPE_INDEX:
45004- fscache_stat(&fscache_n_cookie_index);
45005+ fscache_stat_unchecked(&fscache_n_cookie_index);
45006 break;
45007 case FSCACHE_COOKIE_TYPE_DATAFILE:
45008- fscache_stat(&fscache_n_cookie_data);
45009+ fscache_stat_unchecked(&fscache_n_cookie_data);
45010 break;
45011 default:
45012- fscache_stat(&fscache_n_cookie_special);
45013+ fscache_stat_unchecked(&fscache_n_cookie_special);
45014 break;
45015 }
45016
45017@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
45018 if (fscache_acquire_non_index_cookie(cookie) < 0) {
45019 atomic_dec(&parent->n_children);
45020 __fscache_cookie_put(cookie);
45021- fscache_stat(&fscache_n_acquires_nobufs);
45022+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
45023 _leave(" = NULL");
45024 return NULL;
45025 }
45026 }
45027
45028- fscache_stat(&fscache_n_acquires_ok);
45029+ fscache_stat_unchecked(&fscache_n_acquires_ok);
45030 _leave(" = %p", cookie);
45031 return cookie;
45032 }
45033@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
45034 cache = fscache_select_cache_for_object(cookie->parent);
45035 if (!cache) {
45036 up_read(&fscache_addremove_sem);
45037- fscache_stat(&fscache_n_acquires_no_cache);
45038+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
45039 _leave(" = -ENOMEDIUM [no cache]");
45040 return -ENOMEDIUM;
45041 }
45042@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
45043 object = cache->ops->alloc_object(cache, cookie);
45044 fscache_stat_d(&fscache_n_cop_alloc_object);
45045 if (IS_ERR(object)) {
45046- fscache_stat(&fscache_n_object_no_alloc);
45047+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
45048 ret = PTR_ERR(object);
45049 goto error;
45050 }
45051
45052- fscache_stat(&fscache_n_object_alloc);
45053+ fscache_stat_unchecked(&fscache_n_object_alloc);
45054
45055 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
45056
45057@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
45058 struct fscache_object *object;
45059 struct hlist_node *_p;
45060
45061- fscache_stat(&fscache_n_updates);
45062+ fscache_stat_unchecked(&fscache_n_updates);
45063
45064 if (!cookie) {
45065- fscache_stat(&fscache_n_updates_null);
45066+ fscache_stat_unchecked(&fscache_n_updates_null);
45067 _leave(" [no cookie]");
45068 return;
45069 }
45070@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
45071 struct fscache_object *object;
45072 unsigned long event;
45073
45074- fscache_stat(&fscache_n_relinquishes);
45075+ fscache_stat_unchecked(&fscache_n_relinquishes);
45076 if (retire)
45077- fscache_stat(&fscache_n_relinquishes_retire);
45078+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
45079
45080 if (!cookie) {
45081- fscache_stat(&fscache_n_relinquishes_null);
45082+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
45083 _leave(" [no cookie]");
45084 return;
45085 }
45086@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
45087
45088 /* wait for the cookie to finish being instantiated (or to fail) */
45089 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
45090- fscache_stat(&fscache_n_relinquishes_waitcrt);
45091+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
45092 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
45093 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
45094 }
45095diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
45096index f6aad48..88dcf26 100644
45097--- a/fs/fscache/internal.h
45098+++ b/fs/fscache/internal.h
45099@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
45100 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
45101 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
45102
45103-extern atomic_t fscache_n_op_pend;
45104-extern atomic_t fscache_n_op_run;
45105-extern atomic_t fscache_n_op_enqueue;
45106-extern atomic_t fscache_n_op_deferred_release;
45107-extern atomic_t fscache_n_op_release;
45108-extern atomic_t fscache_n_op_gc;
45109-extern atomic_t fscache_n_op_cancelled;
45110-extern atomic_t fscache_n_op_rejected;
45111+extern atomic_unchecked_t fscache_n_op_pend;
45112+extern atomic_unchecked_t fscache_n_op_run;
45113+extern atomic_unchecked_t fscache_n_op_enqueue;
45114+extern atomic_unchecked_t fscache_n_op_deferred_release;
45115+extern atomic_unchecked_t fscache_n_op_release;
45116+extern atomic_unchecked_t fscache_n_op_gc;
45117+extern atomic_unchecked_t fscache_n_op_cancelled;
45118+extern atomic_unchecked_t fscache_n_op_rejected;
45119
45120-extern atomic_t fscache_n_attr_changed;
45121-extern atomic_t fscache_n_attr_changed_ok;
45122-extern atomic_t fscache_n_attr_changed_nobufs;
45123-extern atomic_t fscache_n_attr_changed_nomem;
45124-extern atomic_t fscache_n_attr_changed_calls;
45125+extern atomic_unchecked_t fscache_n_attr_changed;
45126+extern atomic_unchecked_t fscache_n_attr_changed_ok;
45127+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
45128+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
45129+extern atomic_unchecked_t fscache_n_attr_changed_calls;
45130
45131-extern atomic_t fscache_n_allocs;
45132-extern atomic_t fscache_n_allocs_ok;
45133-extern atomic_t fscache_n_allocs_wait;
45134-extern atomic_t fscache_n_allocs_nobufs;
45135-extern atomic_t fscache_n_allocs_intr;
45136-extern atomic_t fscache_n_allocs_object_dead;
45137-extern atomic_t fscache_n_alloc_ops;
45138-extern atomic_t fscache_n_alloc_op_waits;
45139+extern atomic_unchecked_t fscache_n_allocs;
45140+extern atomic_unchecked_t fscache_n_allocs_ok;
45141+extern atomic_unchecked_t fscache_n_allocs_wait;
45142+extern atomic_unchecked_t fscache_n_allocs_nobufs;
45143+extern atomic_unchecked_t fscache_n_allocs_intr;
45144+extern atomic_unchecked_t fscache_n_allocs_object_dead;
45145+extern atomic_unchecked_t fscache_n_alloc_ops;
45146+extern atomic_unchecked_t fscache_n_alloc_op_waits;
45147
45148-extern atomic_t fscache_n_retrievals;
45149-extern atomic_t fscache_n_retrievals_ok;
45150-extern atomic_t fscache_n_retrievals_wait;
45151-extern atomic_t fscache_n_retrievals_nodata;
45152-extern atomic_t fscache_n_retrievals_nobufs;
45153-extern atomic_t fscache_n_retrievals_intr;
45154-extern atomic_t fscache_n_retrievals_nomem;
45155-extern atomic_t fscache_n_retrievals_object_dead;
45156-extern atomic_t fscache_n_retrieval_ops;
45157-extern atomic_t fscache_n_retrieval_op_waits;
45158+extern atomic_unchecked_t fscache_n_retrievals;
45159+extern atomic_unchecked_t fscache_n_retrievals_ok;
45160+extern atomic_unchecked_t fscache_n_retrievals_wait;
45161+extern atomic_unchecked_t fscache_n_retrievals_nodata;
45162+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
45163+extern atomic_unchecked_t fscache_n_retrievals_intr;
45164+extern atomic_unchecked_t fscache_n_retrievals_nomem;
45165+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
45166+extern atomic_unchecked_t fscache_n_retrieval_ops;
45167+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
45168
45169-extern atomic_t fscache_n_stores;
45170-extern atomic_t fscache_n_stores_ok;
45171-extern atomic_t fscache_n_stores_again;
45172-extern atomic_t fscache_n_stores_nobufs;
45173-extern atomic_t fscache_n_stores_oom;
45174-extern atomic_t fscache_n_store_ops;
45175-extern atomic_t fscache_n_store_calls;
45176-extern atomic_t fscache_n_store_pages;
45177-extern atomic_t fscache_n_store_radix_deletes;
45178-extern atomic_t fscache_n_store_pages_over_limit;
45179+extern atomic_unchecked_t fscache_n_stores;
45180+extern atomic_unchecked_t fscache_n_stores_ok;
45181+extern atomic_unchecked_t fscache_n_stores_again;
45182+extern atomic_unchecked_t fscache_n_stores_nobufs;
45183+extern atomic_unchecked_t fscache_n_stores_oom;
45184+extern atomic_unchecked_t fscache_n_store_ops;
45185+extern atomic_unchecked_t fscache_n_store_calls;
45186+extern atomic_unchecked_t fscache_n_store_pages;
45187+extern atomic_unchecked_t fscache_n_store_radix_deletes;
45188+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
45189
45190-extern atomic_t fscache_n_store_vmscan_not_storing;
45191-extern atomic_t fscache_n_store_vmscan_gone;
45192-extern atomic_t fscache_n_store_vmscan_busy;
45193-extern atomic_t fscache_n_store_vmscan_cancelled;
45194+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45195+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
45196+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
45197+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45198
45199-extern atomic_t fscache_n_marks;
45200-extern atomic_t fscache_n_uncaches;
45201+extern atomic_unchecked_t fscache_n_marks;
45202+extern atomic_unchecked_t fscache_n_uncaches;
45203
45204-extern atomic_t fscache_n_acquires;
45205-extern atomic_t fscache_n_acquires_null;
45206-extern atomic_t fscache_n_acquires_no_cache;
45207-extern atomic_t fscache_n_acquires_ok;
45208-extern atomic_t fscache_n_acquires_nobufs;
45209-extern atomic_t fscache_n_acquires_oom;
45210+extern atomic_unchecked_t fscache_n_acquires;
45211+extern atomic_unchecked_t fscache_n_acquires_null;
45212+extern atomic_unchecked_t fscache_n_acquires_no_cache;
45213+extern atomic_unchecked_t fscache_n_acquires_ok;
45214+extern atomic_unchecked_t fscache_n_acquires_nobufs;
45215+extern atomic_unchecked_t fscache_n_acquires_oom;
45216
45217-extern atomic_t fscache_n_updates;
45218-extern atomic_t fscache_n_updates_null;
45219-extern atomic_t fscache_n_updates_run;
45220+extern atomic_unchecked_t fscache_n_updates;
45221+extern atomic_unchecked_t fscache_n_updates_null;
45222+extern atomic_unchecked_t fscache_n_updates_run;
45223
45224-extern atomic_t fscache_n_relinquishes;
45225-extern atomic_t fscache_n_relinquishes_null;
45226-extern atomic_t fscache_n_relinquishes_waitcrt;
45227-extern atomic_t fscache_n_relinquishes_retire;
45228+extern atomic_unchecked_t fscache_n_relinquishes;
45229+extern atomic_unchecked_t fscache_n_relinquishes_null;
45230+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45231+extern atomic_unchecked_t fscache_n_relinquishes_retire;
45232
45233-extern atomic_t fscache_n_cookie_index;
45234-extern atomic_t fscache_n_cookie_data;
45235-extern atomic_t fscache_n_cookie_special;
45236+extern atomic_unchecked_t fscache_n_cookie_index;
45237+extern atomic_unchecked_t fscache_n_cookie_data;
45238+extern atomic_unchecked_t fscache_n_cookie_special;
45239
45240-extern atomic_t fscache_n_object_alloc;
45241-extern atomic_t fscache_n_object_no_alloc;
45242-extern atomic_t fscache_n_object_lookups;
45243-extern atomic_t fscache_n_object_lookups_negative;
45244-extern atomic_t fscache_n_object_lookups_positive;
45245-extern atomic_t fscache_n_object_lookups_timed_out;
45246-extern atomic_t fscache_n_object_created;
45247-extern atomic_t fscache_n_object_avail;
45248-extern atomic_t fscache_n_object_dead;
45249+extern atomic_unchecked_t fscache_n_object_alloc;
45250+extern atomic_unchecked_t fscache_n_object_no_alloc;
45251+extern atomic_unchecked_t fscache_n_object_lookups;
45252+extern atomic_unchecked_t fscache_n_object_lookups_negative;
45253+extern atomic_unchecked_t fscache_n_object_lookups_positive;
45254+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
45255+extern atomic_unchecked_t fscache_n_object_created;
45256+extern atomic_unchecked_t fscache_n_object_avail;
45257+extern atomic_unchecked_t fscache_n_object_dead;
45258
45259-extern atomic_t fscache_n_checkaux_none;
45260-extern atomic_t fscache_n_checkaux_okay;
45261-extern atomic_t fscache_n_checkaux_update;
45262-extern atomic_t fscache_n_checkaux_obsolete;
45263+extern atomic_unchecked_t fscache_n_checkaux_none;
45264+extern atomic_unchecked_t fscache_n_checkaux_okay;
45265+extern atomic_unchecked_t fscache_n_checkaux_update;
45266+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
45267
45268 extern atomic_t fscache_n_cop_alloc_object;
45269 extern atomic_t fscache_n_cop_lookup_object;
45270@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
45271 atomic_inc(stat);
45272 }
45273
45274+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
45275+{
45276+ atomic_inc_unchecked(stat);
45277+}
45278+
45279 static inline void fscache_stat_d(atomic_t *stat)
45280 {
45281 atomic_dec(stat);
45282@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
45283
45284 #define __fscache_stat(stat) (NULL)
45285 #define fscache_stat(stat) do {} while (0)
45286+#define fscache_stat_unchecked(stat) do {} while (0)
45287 #define fscache_stat_d(stat) do {} while (0)
45288 #endif
45289
45290diff --git a/fs/fscache/object.c b/fs/fscache/object.c
45291index b6b897c..0ffff9c 100644
45292--- a/fs/fscache/object.c
45293+++ b/fs/fscache/object.c
45294@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45295 /* update the object metadata on disk */
45296 case FSCACHE_OBJECT_UPDATING:
45297 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
45298- fscache_stat(&fscache_n_updates_run);
45299+ fscache_stat_unchecked(&fscache_n_updates_run);
45300 fscache_stat(&fscache_n_cop_update_object);
45301 object->cache->ops->update_object(object);
45302 fscache_stat_d(&fscache_n_cop_update_object);
45303@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45304 spin_lock(&object->lock);
45305 object->state = FSCACHE_OBJECT_DEAD;
45306 spin_unlock(&object->lock);
45307- fscache_stat(&fscache_n_object_dead);
45308+ fscache_stat_unchecked(&fscache_n_object_dead);
45309 goto terminal_transit;
45310
45311 /* handle the parent cache of this object being withdrawn from
45312@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45313 spin_lock(&object->lock);
45314 object->state = FSCACHE_OBJECT_DEAD;
45315 spin_unlock(&object->lock);
45316- fscache_stat(&fscache_n_object_dead);
45317+ fscache_stat_unchecked(&fscache_n_object_dead);
45318 goto terminal_transit;
45319
45320 /* complain about the object being woken up once it is
45321@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
45322 parent->cookie->def->name, cookie->def->name,
45323 object->cache->tag->name);
45324
45325- fscache_stat(&fscache_n_object_lookups);
45326+ fscache_stat_unchecked(&fscache_n_object_lookups);
45327 fscache_stat(&fscache_n_cop_lookup_object);
45328 ret = object->cache->ops->lookup_object(object);
45329 fscache_stat_d(&fscache_n_cop_lookup_object);
45330@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
45331 if (ret == -ETIMEDOUT) {
45332 /* probably stuck behind another object, so move this one to
45333 * the back of the queue */
45334- fscache_stat(&fscache_n_object_lookups_timed_out);
45335+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
45336 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45337 }
45338
45339@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
45340
45341 spin_lock(&object->lock);
45342 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45343- fscache_stat(&fscache_n_object_lookups_negative);
45344+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
45345
45346 /* transit here to allow write requests to begin stacking up
45347 * and read requests to begin returning ENODATA */
45348@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
45349 * result, in which case there may be data available */
45350 spin_lock(&object->lock);
45351 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45352- fscache_stat(&fscache_n_object_lookups_positive);
45353+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
45354
45355 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
45356
45357@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
45358 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45359 } else {
45360 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
45361- fscache_stat(&fscache_n_object_created);
45362+ fscache_stat_unchecked(&fscache_n_object_created);
45363
45364 object->state = FSCACHE_OBJECT_AVAILABLE;
45365 spin_unlock(&object->lock);
45366@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
45367 fscache_enqueue_dependents(object);
45368
45369 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
45370- fscache_stat(&fscache_n_object_avail);
45371+ fscache_stat_unchecked(&fscache_n_object_avail);
45372
45373 _leave("");
45374 }
45375@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45376 enum fscache_checkaux result;
45377
45378 if (!object->cookie->def->check_aux) {
45379- fscache_stat(&fscache_n_checkaux_none);
45380+ fscache_stat_unchecked(&fscache_n_checkaux_none);
45381 return FSCACHE_CHECKAUX_OKAY;
45382 }
45383
45384@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45385 switch (result) {
45386 /* entry okay as is */
45387 case FSCACHE_CHECKAUX_OKAY:
45388- fscache_stat(&fscache_n_checkaux_okay);
45389+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
45390 break;
45391
45392 /* entry requires update */
45393 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
45394- fscache_stat(&fscache_n_checkaux_update);
45395+ fscache_stat_unchecked(&fscache_n_checkaux_update);
45396 break;
45397
45398 /* entry requires deletion */
45399 case FSCACHE_CHECKAUX_OBSOLETE:
45400- fscache_stat(&fscache_n_checkaux_obsolete);
45401+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
45402 break;
45403
45404 default:
45405diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
45406index 30afdfa..2256596 100644
45407--- a/fs/fscache/operation.c
45408+++ b/fs/fscache/operation.c
45409@@ -17,7 +17,7 @@
45410 #include <linux/slab.h>
45411 #include "internal.h"
45412
45413-atomic_t fscache_op_debug_id;
45414+atomic_unchecked_t fscache_op_debug_id;
45415 EXPORT_SYMBOL(fscache_op_debug_id);
45416
45417 /**
45418@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
45419 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
45420 ASSERTCMP(atomic_read(&op->usage), >, 0);
45421
45422- fscache_stat(&fscache_n_op_enqueue);
45423+ fscache_stat_unchecked(&fscache_n_op_enqueue);
45424 switch (op->flags & FSCACHE_OP_TYPE) {
45425 case FSCACHE_OP_ASYNC:
45426 _debug("queue async");
45427@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
45428 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
45429 if (op->processor)
45430 fscache_enqueue_operation(op);
45431- fscache_stat(&fscache_n_op_run);
45432+ fscache_stat_unchecked(&fscache_n_op_run);
45433 }
45434
45435 /*
45436@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45437 if (object->n_ops > 1) {
45438 atomic_inc(&op->usage);
45439 list_add_tail(&op->pend_link, &object->pending_ops);
45440- fscache_stat(&fscache_n_op_pend);
45441+ fscache_stat_unchecked(&fscache_n_op_pend);
45442 } else if (!list_empty(&object->pending_ops)) {
45443 atomic_inc(&op->usage);
45444 list_add_tail(&op->pend_link, &object->pending_ops);
45445- fscache_stat(&fscache_n_op_pend);
45446+ fscache_stat_unchecked(&fscache_n_op_pend);
45447 fscache_start_operations(object);
45448 } else {
45449 ASSERTCMP(object->n_in_progress, ==, 0);
45450@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45451 object->n_exclusive++; /* reads and writes must wait */
45452 atomic_inc(&op->usage);
45453 list_add_tail(&op->pend_link, &object->pending_ops);
45454- fscache_stat(&fscache_n_op_pend);
45455+ fscache_stat_unchecked(&fscache_n_op_pend);
45456 ret = 0;
45457 } else {
45458 /* not allowed to submit ops in any other state */
45459@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
45460 if (object->n_exclusive > 0) {
45461 atomic_inc(&op->usage);
45462 list_add_tail(&op->pend_link, &object->pending_ops);
45463- fscache_stat(&fscache_n_op_pend);
45464+ fscache_stat_unchecked(&fscache_n_op_pend);
45465 } else if (!list_empty(&object->pending_ops)) {
45466 atomic_inc(&op->usage);
45467 list_add_tail(&op->pend_link, &object->pending_ops);
45468- fscache_stat(&fscache_n_op_pend);
45469+ fscache_stat_unchecked(&fscache_n_op_pend);
45470 fscache_start_operations(object);
45471 } else {
45472 ASSERTCMP(object->n_exclusive, ==, 0);
45473@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
45474 object->n_ops++;
45475 atomic_inc(&op->usage);
45476 list_add_tail(&op->pend_link, &object->pending_ops);
45477- fscache_stat(&fscache_n_op_pend);
45478+ fscache_stat_unchecked(&fscache_n_op_pend);
45479 ret = 0;
45480 } else if (object->state == FSCACHE_OBJECT_DYING ||
45481 object->state == FSCACHE_OBJECT_LC_DYING ||
45482 object->state == FSCACHE_OBJECT_WITHDRAWING) {
45483- fscache_stat(&fscache_n_op_rejected);
45484+ fscache_stat_unchecked(&fscache_n_op_rejected);
45485 ret = -ENOBUFS;
45486 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
45487 fscache_report_unexpected_submission(object, op, ostate);
45488@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
45489
45490 ret = -EBUSY;
45491 if (!list_empty(&op->pend_link)) {
45492- fscache_stat(&fscache_n_op_cancelled);
45493+ fscache_stat_unchecked(&fscache_n_op_cancelled);
45494 list_del_init(&op->pend_link);
45495 object->n_ops--;
45496 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
45497@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
45498 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
45499 BUG();
45500
45501- fscache_stat(&fscache_n_op_release);
45502+ fscache_stat_unchecked(&fscache_n_op_release);
45503
45504 if (op->release) {
45505 op->release(op);
45506@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
45507 * lock, and defer it otherwise */
45508 if (!spin_trylock(&object->lock)) {
45509 _debug("defer put");
45510- fscache_stat(&fscache_n_op_deferred_release);
45511+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
45512
45513 cache = object->cache;
45514 spin_lock(&cache->op_gc_list_lock);
45515@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
45516
45517 _debug("GC DEFERRED REL OBJ%x OP%x",
45518 object->debug_id, op->debug_id);
45519- fscache_stat(&fscache_n_op_gc);
45520+ fscache_stat_unchecked(&fscache_n_op_gc);
45521
45522 ASSERTCMP(atomic_read(&op->usage), ==, 0);
45523
45524diff --git a/fs/fscache/page.c b/fs/fscache/page.c
45525index 3f7a59b..cf196cc 100644
45526--- a/fs/fscache/page.c
45527+++ b/fs/fscache/page.c
45528@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45529 val = radix_tree_lookup(&cookie->stores, page->index);
45530 if (!val) {
45531 rcu_read_unlock();
45532- fscache_stat(&fscache_n_store_vmscan_not_storing);
45533+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
45534 __fscache_uncache_page(cookie, page);
45535 return true;
45536 }
45537@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45538 spin_unlock(&cookie->stores_lock);
45539
45540 if (xpage) {
45541- fscache_stat(&fscache_n_store_vmscan_cancelled);
45542- fscache_stat(&fscache_n_store_radix_deletes);
45543+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
45544+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45545 ASSERTCMP(xpage, ==, page);
45546 } else {
45547- fscache_stat(&fscache_n_store_vmscan_gone);
45548+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
45549 }
45550
45551 wake_up_bit(&cookie->flags, 0);
45552@@ -107,7 +107,7 @@ page_busy:
45553 /* we might want to wait here, but that could deadlock the allocator as
45554 * the work threads writing to the cache may all end up sleeping
45555 * on memory allocation */
45556- fscache_stat(&fscache_n_store_vmscan_busy);
45557+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
45558 return false;
45559 }
45560 EXPORT_SYMBOL(__fscache_maybe_release_page);
45561@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
45562 FSCACHE_COOKIE_STORING_TAG);
45563 if (!radix_tree_tag_get(&cookie->stores, page->index,
45564 FSCACHE_COOKIE_PENDING_TAG)) {
45565- fscache_stat(&fscache_n_store_radix_deletes);
45566+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45567 xpage = radix_tree_delete(&cookie->stores, page->index);
45568 }
45569 spin_unlock(&cookie->stores_lock);
45570@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
45571
45572 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
45573
45574- fscache_stat(&fscache_n_attr_changed_calls);
45575+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
45576
45577 if (fscache_object_is_active(object)) {
45578 fscache_stat(&fscache_n_cop_attr_changed);
45579@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45580
45581 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45582
45583- fscache_stat(&fscache_n_attr_changed);
45584+ fscache_stat_unchecked(&fscache_n_attr_changed);
45585
45586 op = kzalloc(sizeof(*op), GFP_KERNEL);
45587 if (!op) {
45588- fscache_stat(&fscache_n_attr_changed_nomem);
45589+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
45590 _leave(" = -ENOMEM");
45591 return -ENOMEM;
45592 }
45593@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45594 if (fscache_submit_exclusive_op(object, op) < 0)
45595 goto nobufs;
45596 spin_unlock(&cookie->lock);
45597- fscache_stat(&fscache_n_attr_changed_ok);
45598+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
45599 fscache_put_operation(op);
45600 _leave(" = 0");
45601 return 0;
45602@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45603 nobufs:
45604 spin_unlock(&cookie->lock);
45605 kfree(op);
45606- fscache_stat(&fscache_n_attr_changed_nobufs);
45607+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
45608 _leave(" = %d", -ENOBUFS);
45609 return -ENOBUFS;
45610 }
45611@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
45612 /* allocate a retrieval operation and attempt to submit it */
45613 op = kzalloc(sizeof(*op), GFP_NOIO);
45614 if (!op) {
45615- fscache_stat(&fscache_n_retrievals_nomem);
45616+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45617 return NULL;
45618 }
45619
45620@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45621 return 0;
45622 }
45623
45624- fscache_stat(&fscache_n_retrievals_wait);
45625+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
45626
45627 jif = jiffies;
45628 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
45629 fscache_wait_bit_interruptible,
45630 TASK_INTERRUPTIBLE) != 0) {
45631- fscache_stat(&fscache_n_retrievals_intr);
45632+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
45633 _leave(" = -ERESTARTSYS");
45634 return -ERESTARTSYS;
45635 }
45636@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45637 */
45638 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45639 struct fscache_retrieval *op,
45640- atomic_t *stat_op_waits,
45641- atomic_t *stat_object_dead)
45642+ atomic_unchecked_t *stat_op_waits,
45643+ atomic_unchecked_t *stat_object_dead)
45644 {
45645 int ret;
45646
45647@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45648 goto check_if_dead;
45649
45650 _debug(">>> WT");
45651- fscache_stat(stat_op_waits);
45652+ fscache_stat_unchecked(stat_op_waits);
45653 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
45654 fscache_wait_bit_interruptible,
45655 TASK_INTERRUPTIBLE) < 0) {
45656@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45657
45658 check_if_dead:
45659 if (unlikely(fscache_object_is_dead(object))) {
45660- fscache_stat(stat_object_dead);
45661+ fscache_stat_unchecked(stat_object_dead);
45662 return -ENOBUFS;
45663 }
45664 return 0;
45665@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45666
45667 _enter("%p,%p,,,", cookie, page);
45668
45669- fscache_stat(&fscache_n_retrievals);
45670+ fscache_stat_unchecked(&fscache_n_retrievals);
45671
45672 if (hlist_empty(&cookie->backing_objects))
45673 goto nobufs;
45674@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45675 goto nobufs_unlock;
45676 spin_unlock(&cookie->lock);
45677
45678- fscache_stat(&fscache_n_retrieval_ops);
45679+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
45680
45681 /* pin the netfs read context in case we need to do the actual netfs
45682 * read because we've encountered a cache read failure */
45683@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45684
45685 error:
45686 if (ret == -ENOMEM)
45687- fscache_stat(&fscache_n_retrievals_nomem);
45688+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45689 else if (ret == -ERESTARTSYS)
45690- fscache_stat(&fscache_n_retrievals_intr);
45691+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
45692 else if (ret == -ENODATA)
45693- fscache_stat(&fscache_n_retrievals_nodata);
45694+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45695 else if (ret < 0)
45696- fscache_stat(&fscache_n_retrievals_nobufs);
45697+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45698 else
45699- fscache_stat(&fscache_n_retrievals_ok);
45700+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
45701
45702 fscache_put_retrieval(op);
45703 _leave(" = %d", ret);
45704@@ -429,7 +429,7 @@ nobufs_unlock:
45705 spin_unlock(&cookie->lock);
45706 kfree(op);
45707 nobufs:
45708- fscache_stat(&fscache_n_retrievals_nobufs);
45709+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45710 _leave(" = -ENOBUFS");
45711 return -ENOBUFS;
45712 }
45713@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45714
45715 _enter("%p,,%d,,,", cookie, *nr_pages);
45716
45717- fscache_stat(&fscache_n_retrievals);
45718+ fscache_stat_unchecked(&fscache_n_retrievals);
45719
45720 if (hlist_empty(&cookie->backing_objects))
45721 goto nobufs;
45722@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45723 goto nobufs_unlock;
45724 spin_unlock(&cookie->lock);
45725
45726- fscache_stat(&fscache_n_retrieval_ops);
45727+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
45728
45729 /* pin the netfs read context in case we need to do the actual netfs
45730 * read because we've encountered a cache read failure */
45731@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45732
45733 error:
45734 if (ret == -ENOMEM)
45735- fscache_stat(&fscache_n_retrievals_nomem);
45736+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45737 else if (ret == -ERESTARTSYS)
45738- fscache_stat(&fscache_n_retrievals_intr);
45739+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
45740 else if (ret == -ENODATA)
45741- fscache_stat(&fscache_n_retrievals_nodata);
45742+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45743 else if (ret < 0)
45744- fscache_stat(&fscache_n_retrievals_nobufs);
45745+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45746 else
45747- fscache_stat(&fscache_n_retrievals_ok);
45748+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
45749
45750 fscache_put_retrieval(op);
45751 _leave(" = %d", ret);
45752@@ -545,7 +545,7 @@ nobufs_unlock:
45753 spin_unlock(&cookie->lock);
45754 kfree(op);
45755 nobufs:
45756- fscache_stat(&fscache_n_retrievals_nobufs);
45757+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45758 _leave(" = -ENOBUFS");
45759 return -ENOBUFS;
45760 }
45761@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45762
45763 _enter("%p,%p,,,", cookie, page);
45764
45765- fscache_stat(&fscache_n_allocs);
45766+ fscache_stat_unchecked(&fscache_n_allocs);
45767
45768 if (hlist_empty(&cookie->backing_objects))
45769 goto nobufs;
45770@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45771 goto nobufs_unlock;
45772 spin_unlock(&cookie->lock);
45773
45774- fscache_stat(&fscache_n_alloc_ops);
45775+ fscache_stat_unchecked(&fscache_n_alloc_ops);
45776
45777 ret = fscache_wait_for_retrieval_activation(
45778 object, op,
45779@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45780
45781 error:
45782 if (ret == -ERESTARTSYS)
45783- fscache_stat(&fscache_n_allocs_intr);
45784+ fscache_stat_unchecked(&fscache_n_allocs_intr);
45785 else if (ret < 0)
45786- fscache_stat(&fscache_n_allocs_nobufs);
45787+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45788 else
45789- fscache_stat(&fscache_n_allocs_ok);
45790+ fscache_stat_unchecked(&fscache_n_allocs_ok);
45791
45792 fscache_put_retrieval(op);
45793 _leave(" = %d", ret);
45794@@ -625,7 +625,7 @@ nobufs_unlock:
45795 spin_unlock(&cookie->lock);
45796 kfree(op);
45797 nobufs:
45798- fscache_stat(&fscache_n_allocs_nobufs);
45799+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45800 _leave(" = -ENOBUFS");
45801 return -ENOBUFS;
45802 }
45803@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45804
45805 spin_lock(&cookie->stores_lock);
45806
45807- fscache_stat(&fscache_n_store_calls);
45808+ fscache_stat_unchecked(&fscache_n_store_calls);
45809
45810 /* find a page to store */
45811 page = NULL;
45812@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45813 page = results[0];
45814 _debug("gang %d [%lx]", n, page->index);
45815 if (page->index > op->store_limit) {
45816- fscache_stat(&fscache_n_store_pages_over_limit);
45817+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
45818 goto superseded;
45819 }
45820
45821@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45822 spin_unlock(&cookie->stores_lock);
45823 spin_unlock(&object->lock);
45824
45825- fscache_stat(&fscache_n_store_pages);
45826+ fscache_stat_unchecked(&fscache_n_store_pages);
45827 fscache_stat(&fscache_n_cop_write_page);
45828 ret = object->cache->ops->write_page(op, page);
45829 fscache_stat_d(&fscache_n_cop_write_page);
45830@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45831 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45832 ASSERT(PageFsCache(page));
45833
45834- fscache_stat(&fscache_n_stores);
45835+ fscache_stat_unchecked(&fscache_n_stores);
45836
45837 op = kzalloc(sizeof(*op), GFP_NOIO);
45838 if (!op)
45839@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45840 spin_unlock(&cookie->stores_lock);
45841 spin_unlock(&object->lock);
45842
45843- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
45844+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
45845 op->store_limit = object->store_limit;
45846
45847 if (fscache_submit_op(object, &op->op) < 0)
45848@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45849
45850 spin_unlock(&cookie->lock);
45851 radix_tree_preload_end();
45852- fscache_stat(&fscache_n_store_ops);
45853- fscache_stat(&fscache_n_stores_ok);
45854+ fscache_stat_unchecked(&fscache_n_store_ops);
45855+ fscache_stat_unchecked(&fscache_n_stores_ok);
45856
45857 /* the work queue now carries its own ref on the object */
45858 fscache_put_operation(&op->op);
45859@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45860 return 0;
45861
45862 already_queued:
45863- fscache_stat(&fscache_n_stores_again);
45864+ fscache_stat_unchecked(&fscache_n_stores_again);
45865 already_pending:
45866 spin_unlock(&cookie->stores_lock);
45867 spin_unlock(&object->lock);
45868 spin_unlock(&cookie->lock);
45869 radix_tree_preload_end();
45870 kfree(op);
45871- fscache_stat(&fscache_n_stores_ok);
45872+ fscache_stat_unchecked(&fscache_n_stores_ok);
45873 _leave(" = 0");
45874 return 0;
45875
45876@@ -851,14 +851,14 @@ nobufs:
45877 spin_unlock(&cookie->lock);
45878 radix_tree_preload_end();
45879 kfree(op);
45880- fscache_stat(&fscache_n_stores_nobufs);
45881+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
45882 _leave(" = -ENOBUFS");
45883 return -ENOBUFS;
45884
45885 nomem_free:
45886 kfree(op);
45887 nomem:
45888- fscache_stat(&fscache_n_stores_oom);
45889+ fscache_stat_unchecked(&fscache_n_stores_oom);
45890 _leave(" = -ENOMEM");
45891 return -ENOMEM;
45892 }
45893@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
45894 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45895 ASSERTCMP(page, !=, NULL);
45896
45897- fscache_stat(&fscache_n_uncaches);
45898+ fscache_stat_unchecked(&fscache_n_uncaches);
45899
45900 /* cache withdrawal may beat us to it */
45901 if (!PageFsCache(page))
45902@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
45903 unsigned long loop;
45904
45905 #ifdef CONFIG_FSCACHE_STATS
45906- atomic_add(pagevec->nr, &fscache_n_marks);
45907+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
45908 #endif
45909
45910 for (loop = 0; loop < pagevec->nr; loop++) {
45911diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
45912index 4765190..2a067f2 100644
45913--- a/fs/fscache/stats.c
45914+++ b/fs/fscache/stats.c
45915@@ -18,95 +18,95 @@
45916 /*
45917 * operation counters
45918 */
45919-atomic_t fscache_n_op_pend;
45920-atomic_t fscache_n_op_run;
45921-atomic_t fscache_n_op_enqueue;
45922-atomic_t fscache_n_op_requeue;
45923-atomic_t fscache_n_op_deferred_release;
45924-atomic_t fscache_n_op_release;
45925-atomic_t fscache_n_op_gc;
45926-atomic_t fscache_n_op_cancelled;
45927-atomic_t fscache_n_op_rejected;
45928+atomic_unchecked_t fscache_n_op_pend;
45929+atomic_unchecked_t fscache_n_op_run;
45930+atomic_unchecked_t fscache_n_op_enqueue;
45931+atomic_unchecked_t fscache_n_op_requeue;
45932+atomic_unchecked_t fscache_n_op_deferred_release;
45933+atomic_unchecked_t fscache_n_op_release;
45934+atomic_unchecked_t fscache_n_op_gc;
45935+atomic_unchecked_t fscache_n_op_cancelled;
45936+atomic_unchecked_t fscache_n_op_rejected;
45937
45938-atomic_t fscache_n_attr_changed;
45939-atomic_t fscache_n_attr_changed_ok;
45940-atomic_t fscache_n_attr_changed_nobufs;
45941-atomic_t fscache_n_attr_changed_nomem;
45942-atomic_t fscache_n_attr_changed_calls;
45943+atomic_unchecked_t fscache_n_attr_changed;
45944+atomic_unchecked_t fscache_n_attr_changed_ok;
45945+atomic_unchecked_t fscache_n_attr_changed_nobufs;
45946+atomic_unchecked_t fscache_n_attr_changed_nomem;
45947+atomic_unchecked_t fscache_n_attr_changed_calls;
45948
45949-atomic_t fscache_n_allocs;
45950-atomic_t fscache_n_allocs_ok;
45951-atomic_t fscache_n_allocs_wait;
45952-atomic_t fscache_n_allocs_nobufs;
45953-atomic_t fscache_n_allocs_intr;
45954-atomic_t fscache_n_allocs_object_dead;
45955-atomic_t fscache_n_alloc_ops;
45956-atomic_t fscache_n_alloc_op_waits;
45957+atomic_unchecked_t fscache_n_allocs;
45958+atomic_unchecked_t fscache_n_allocs_ok;
45959+atomic_unchecked_t fscache_n_allocs_wait;
45960+atomic_unchecked_t fscache_n_allocs_nobufs;
45961+atomic_unchecked_t fscache_n_allocs_intr;
45962+atomic_unchecked_t fscache_n_allocs_object_dead;
45963+atomic_unchecked_t fscache_n_alloc_ops;
45964+atomic_unchecked_t fscache_n_alloc_op_waits;
45965
45966-atomic_t fscache_n_retrievals;
45967-atomic_t fscache_n_retrievals_ok;
45968-atomic_t fscache_n_retrievals_wait;
45969-atomic_t fscache_n_retrievals_nodata;
45970-atomic_t fscache_n_retrievals_nobufs;
45971-atomic_t fscache_n_retrievals_intr;
45972-atomic_t fscache_n_retrievals_nomem;
45973-atomic_t fscache_n_retrievals_object_dead;
45974-atomic_t fscache_n_retrieval_ops;
45975-atomic_t fscache_n_retrieval_op_waits;
45976+atomic_unchecked_t fscache_n_retrievals;
45977+atomic_unchecked_t fscache_n_retrievals_ok;
45978+atomic_unchecked_t fscache_n_retrievals_wait;
45979+atomic_unchecked_t fscache_n_retrievals_nodata;
45980+atomic_unchecked_t fscache_n_retrievals_nobufs;
45981+atomic_unchecked_t fscache_n_retrievals_intr;
45982+atomic_unchecked_t fscache_n_retrievals_nomem;
45983+atomic_unchecked_t fscache_n_retrievals_object_dead;
45984+atomic_unchecked_t fscache_n_retrieval_ops;
45985+atomic_unchecked_t fscache_n_retrieval_op_waits;
45986
45987-atomic_t fscache_n_stores;
45988-atomic_t fscache_n_stores_ok;
45989-atomic_t fscache_n_stores_again;
45990-atomic_t fscache_n_stores_nobufs;
45991-atomic_t fscache_n_stores_oom;
45992-atomic_t fscache_n_store_ops;
45993-atomic_t fscache_n_store_calls;
45994-atomic_t fscache_n_store_pages;
45995-atomic_t fscache_n_store_radix_deletes;
45996-atomic_t fscache_n_store_pages_over_limit;
45997+atomic_unchecked_t fscache_n_stores;
45998+atomic_unchecked_t fscache_n_stores_ok;
45999+atomic_unchecked_t fscache_n_stores_again;
46000+atomic_unchecked_t fscache_n_stores_nobufs;
46001+atomic_unchecked_t fscache_n_stores_oom;
46002+atomic_unchecked_t fscache_n_store_ops;
46003+atomic_unchecked_t fscache_n_store_calls;
46004+atomic_unchecked_t fscache_n_store_pages;
46005+atomic_unchecked_t fscache_n_store_radix_deletes;
46006+atomic_unchecked_t fscache_n_store_pages_over_limit;
46007
46008-atomic_t fscache_n_store_vmscan_not_storing;
46009-atomic_t fscache_n_store_vmscan_gone;
46010-atomic_t fscache_n_store_vmscan_busy;
46011-atomic_t fscache_n_store_vmscan_cancelled;
46012+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
46013+atomic_unchecked_t fscache_n_store_vmscan_gone;
46014+atomic_unchecked_t fscache_n_store_vmscan_busy;
46015+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
46016
46017-atomic_t fscache_n_marks;
46018-atomic_t fscache_n_uncaches;
46019+atomic_unchecked_t fscache_n_marks;
46020+atomic_unchecked_t fscache_n_uncaches;
46021
46022-atomic_t fscache_n_acquires;
46023-atomic_t fscache_n_acquires_null;
46024-atomic_t fscache_n_acquires_no_cache;
46025-atomic_t fscache_n_acquires_ok;
46026-atomic_t fscache_n_acquires_nobufs;
46027-atomic_t fscache_n_acquires_oom;
46028+atomic_unchecked_t fscache_n_acquires;
46029+atomic_unchecked_t fscache_n_acquires_null;
46030+atomic_unchecked_t fscache_n_acquires_no_cache;
46031+atomic_unchecked_t fscache_n_acquires_ok;
46032+atomic_unchecked_t fscache_n_acquires_nobufs;
46033+atomic_unchecked_t fscache_n_acquires_oom;
46034
46035-atomic_t fscache_n_updates;
46036-atomic_t fscache_n_updates_null;
46037-atomic_t fscache_n_updates_run;
46038+atomic_unchecked_t fscache_n_updates;
46039+atomic_unchecked_t fscache_n_updates_null;
46040+atomic_unchecked_t fscache_n_updates_run;
46041
46042-atomic_t fscache_n_relinquishes;
46043-atomic_t fscache_n_relinquishes_null;
46044-atomic_t fscache_n_relinquishes_waitcrt;
46045-atomic_t fscache_n_relinquishes_retire;
46046+atomic_unchecked_t fscache_n_relinquishes;
46047+atomic_unchecked_t fscache_n_relinquishes_null;
46048+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
46049+atomic_unchecked_t fscache_n_relinquishes_retire;
46050
46051-atomic_t fscache_n_cookie_index;
46052-atomic_t fscache_n_cookie_data;
46053-atomic_t fscache_n_cookie_special;
46054+atomic_unchecked_t fscache_n_cookie_index;
46055+atomic_unchecked_t fscache_n_cookie_data;
46056+atomic_unchecked_t fscache_n_cookie_special;
46057
46058-atomic_t fscache_n_object_alloc;
46059-atomic_t fscache_n_object_no_alloc;
46060-atomic_t fscache_n_object_lookups;
46061-atomic_t fscache_n_object_lookups_negative;
46062-atomic_t fscache_n_object_lookups_positive;
46063-atomic_t fscache_n_object_lookups_timed_out;
46064-atomic_t fscache_n_object_created;
46065-atomic_t fscache_n_object_avail;
46066-atomic_t fscache_n_object_dead;
46067+atomic_unchecked_t fscache_n_object_alloc;
46068+atomic_unchecked_t fscache_n_object_no_alloc;
46069+atomic_unchecked_t fscache_n_object_lookups;
46070+atomic_unchecked_t fscache_n_object_lookups_negative;
46071+atomic_unchecked_t fscache_n_object_lookups_positive;
46072+atomic_unchecked_t fscache_n_object_lookups_timed_out;
46073+atomic_unchecked_t fscache_n_object_created;
46074+atomic_unchecked_t fscache_n_object_avail;
46075+atomic_unchecked_t fscache_n_object_dead;
46076
46077-atomic_t fscache_n_checkaux_none;
46078-atomic_t fscache_n_checkaux_okay;
46079-atomic_t fscache_n_checkaux_update;
46080-atomic_t fscache_n_checkaux_obsolete;
46081+atomic_unchecked_t fscache_n_checkaux_none;
46082+atomic_unchecked_t fscache_n_checkaux_okay;
46083+atomic_unchecked_t fscache_n_checkaux_update;
46084+atomic_unchecked_t fscache_n_checkaux_obsolete;
46085
46086 atomic_t fscache_n_cop_alloc_object;
46087 atomic_t fscache_n_cop_lookup_object;
46088@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
46089 seq_puts(m, "FS-Cache statistics\n");
46090
46091 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
46092- atomic_read(&fscache_n_cookie_index),
46093- atomic_read(&fscache_n_cookie_data),
46094- atomic_read(&fscache_n_cookie_special));
46095+ atomic_read_unchecked(&fscache_n_cookie_index),
46096+ atomic_read_unchecked(&fscache_n_cookie_data),
46097+ atomic_read_unchecked(&fscache_n_cookie_special));
46098
46099 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
46100- atomic_read(&fscache_n_object_alloc),
46101- atomic_read(&fscache_n_object_no_alloc),
46102- atomic_read(&fscache_n_object_avail),
46103- atomic_read(&fscache_n_object_dead));
46104+ atomic_read_unchecked(&fscache_n_object_alloc),
46105+ atomic_read_unchecked(&fscache_n_object_no_alloc),
46106+ atomic_read_unchecked(&fscache_n_object_avail),
46107+ atomic_read_unchecked(&fscache_n_object_dead));
46108 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
46109- atomic_read(&fscache_n_checkaux_none),
46110- atomic_read(&fscache_n_checkaux_okay),
46111- atomic_read(&fscache_n_checkaux_update),
46112- atomic_read(&fscache_n_checkaux_obsolete));
46113+ atomic_read_unchecked(&fscache_n_checkaux_none),
46114+ atomic_read_unchecked(&fscache_n_checkaux_okay),
46115+ atomic_read_unchecked(&fscache_n_checkaux_update),
46116+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
46117
46118 seq_printf(m, "Pages : mrk=%u unc=%u\n",
46119- atomic_read(&fscache_n_marks),
46120- atomic_read(&fscache_n_uncaches));
46121+ atomic_read_unchecked(&fscache_n_marks),
46122+ atomic_read_unchecked(&fscache_n_uncaches));
46123
46124 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
46125 " oom=%u\n",
46126- atomic_read(&fscache_n_acquires),
46127- atomic_read(&fscache_n_acquires_null),
46128- atomic_read(&fscache_n_acquires_no_cache),
46129- atomic_read(&fscache_n_acquires_ok),
46130- atomic_read(&fscache_n_acquires_nobufs),
46131- atomic_read(&fscache_n_acquires_oom));
46132+ atomic_read_unchecked(&fscache_n_acquires),
46133+ atomic_read_unchecked(&fscache_n_acquires_null),
46134+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
46135+ atomic_read_unchecked(&fscache_n_acquires_ok),
46136+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
46137+ atomic_read_unchecked(&fscache_n_acquires_oom));
46138
46139 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
46140- atomic_read(&fscache_n_object_lookups),
46141- atomic_read(&fscache_n_object_lookups_negative),
46142- atomic_read(&fscache_n_object_lookups_positive),
46143- atomic_read(&fscache_n_object_created),
46144- atomic_read(&fscache_n_object_lookups_timed_out));
46145+ atomic_read_unchecked(&fscache_n_object_lookups),
46146+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
46147+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
46148+ atomic_read_unchecked(&fscache_n_object_created),
46149+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
46150
46151 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
46152- atomic_read(&fscache_n_updates),
46153- atomic_read(&fscache_n_updates_null),
46154- atomic_read(&fscache_n_updates_run));
46155+ atomic_read_unchecked(&fscache_n_updates),
46156+ atomic_read_unchecked(&fscache_n_updates_null),
46157+ atomic_read_unchecked(&fscache_n_updates_run));
46158
46159 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
46160- atomic_read(&fscache_n_relinquishes),
46161- atomic_read(&fscache_n_relinquishes_null),
46162- atomic_read(&fscache_n_relinquishes_waitcrt),
46163- atomic_read(&fscache_n_relinquishes_retire));
46164+ atomic_read_unchecked(&fscache_n_relinquishes),
46165+ atomic_read_unchecked(&fscache_n_relinquishes_null),
46166+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
46167+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
46168
46169 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
46170- atomic_read(&fscache_n_attr_changed),
46171- atomic_read(&fscache_n_attr_changed_ok),
46172- atomic_read(&fscache_n_attr_changed_nobufs),
46173- atomic_read(&fscache_n_attr_changed_nomem),
46174- atomic_read(&fscache_n_attr_changed_calls));
46175+ atomic_read_unchecked(&fscache_n_attr_changed),
46176+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
46177+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
46178+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
46179+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
46180
46181 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
46182- atomic_read(&fscache_n_allocs),
46183- atomic_read(&fscache_n_allocs_ok),
46184- atomic_read(&fscache_n_allocs_wait),
46185- atomic_read(&fscache_n_allocs_nobufs),
46186- atomic_read(&fscache_n_allocs_intr));
46187+ atomic_read_unchecked(&fscache_n_allocs),
46188+ atomic_read_unchecked(&fscache_n_allocs_ok),
46189+ atomic_read_unchecked(&fscache_n_allocs_wait),
46190+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
46191+ atomic_read_unchecked(&fscache_n_allocs_intr));
46192 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
46193- atomic_read(&fscache_n_alloc_ops),
46194- atomic_read(&fscache_n_alloc_op_waits),
46195- atomic_read(&fscache_n_allocs_object_dead));
46196+ atomic_read_unchecked(&fscache_n_alloc_ops),
46197+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
46198+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
46199
46200 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
46201 " int=%u oom=%u\n",
46202- atomic_read(&fscache_n_retrievals),
46203- atomic_read(&fscache_n_retrievals_ok),
46204- atomic_read(&fscache_n_retrievals_wait),
46205- atomic_read(&fscache_n_retrievals_nodata),
46206- atomic_read(&fscache_n_retrievals_nobufs),
46207- atomic_read(&fscache_n_retrievals_intr),
46208- atomic_read(&fscache_n_retrievals_nomem));
46209+ atomic_read_unchecked(&fscache_n_retrievals),
46210+ atomic_read_unchecked(&fscache_n_retrievals_ok),
46211+ atomic_read_unchecked(&fscache_n_retrievals_wait),
46212+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
46213+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
46214+ atomic_read_unchecked(&fscache_n_retrievals_intr),
46215+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
46216 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
46217- atomic_read(&fscache_n_retrieval_ops),
46218- atomic_read(&fscache_n_retrieval_op_waits),
46219- atomic_read(&fscache_n_retrievals_object_dead));
46220+ atomic_read_unchecked(&fscache_n_retrieval_ops),
46221+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
46222+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
46223
46224 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
46225- atomic_read(&fscache_n_stores),
46226- atomic_read(&fscache_n_stores_ok),
46227- atomic_read(&fscache_n_stores_again),
46228- atomic_read(&fscache_n_stores_nobufs),
46229- atomic_read(&fscache_n_stores_oom));
46230+ atomic_read_unchecked(&fscache_n_stores),
46231+ atomic_read_unchecked(&fscache_n_stores_ok),
46232+ atomic_read_unchecked(&fscache_n_stores_again),
46233+ atomic_read_unchecked(&fscache_n_stores_nobufs),
46234+ atomic_read_unchecked(&fscache_n_stores_oom));
46235 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
46236- atomic_read(&fscache_n_store_ops),
46237- atomic_read(&fscache_n_store_calls),
46238- atomic_read(&fscache_n_store_pages),
46239- atomic_read(&fscache_n_store_radix_deletes),
46240- atomic_read(&fscache_n_store_pages_over_limit));
46241+ atomic_read_unchecked(&fscache_n_store_ops),
46242+ atomic_read_unchecked(&fscache_n_store_calls),
46243+ atomic_read_unchecked(&fscache_n_store_pages),
46244+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
46245+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
46246
46247 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
46248- atomic_read(&fscache_n_store_vmscan_not_storing),
46249- atomic_read(&fscache_n_store_vmscan_gone),
46250- atomic_read(&fscache_n_store_vmscan_busy),
46251- atomic_read(&fscache_n_store_vmscan_cancelled));
46252+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
46253+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
46254+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
46255+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
46256
46257 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
46258- atomic_read(&fscache_n_op_pend),
46259- atomic_read(&fscache_n_op_run),
46260- atomic_read(&fscache_n_op_enqueue),
46261- atomic_read(&fscache_n_op_cancelled),
46262- atomic_read(&fscache_n_op_rejected));
46263+ atomic_read_unchecked(&fscache_n_op_pend),
46264+ atomic_read_unchecked(&fscache_n_op_run),
46265+ atomic_read_unchecked(&fscache_n_op_enqueue),
46266+ atomic_read_unchecked(&fscache_n_op_cancelled),
46267+ atomic_read_unchecked(&fscache_n_op_rejected));
46268 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
46269- atomic_read(&fscache_n_op_deferred_release),
46270- atomic_read(&fscache_n_op_release),
46271- atomic_read(&fscache_n_op_gc));
46272+ atomic_read_unchecked(&fscache_n_op_deferred_release),
46273+ atomic_read_unchecked(&fscache_n_op_release),
46274+ atomic_read_unchecked(&fscache_n_op_gc));
46275
46276 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
46277 atomic_read(&fscache_n_cop_alloc_object),
46278diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
46279index 3426521..3b75162 100644
46280--- a/fs/fuse/cuse.c
46281+++ b/fs/fuse/cuse.c
46282@@ -587,10 +587,12 @@ static int __init cuse_init(void)
46283 INIT_LIST_HEAD(&cuse_conntbl[i]);
46284
46285 /* inherit and extend fuse_dev_operations */
46286- cuse_channel_fops = fuse_dev_operations;
46287- cuse_channel_fops.owner = THIS_MODULE;
46288- cuse_channel_fops.open = cuse_channel_open;
46289- cuse_channel_fops.release = cuse_channel_release;
46290+ pax_open_kernel();
46291+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
46292+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
46293+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
46294+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
46295+ pax_close_kernel();
46296
46297 cuse_class = class_create(THIS_MODULE, "cuse");
46298 if (IS_ERR(cuse_class))
46299diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
46300index 7df2b5e..5804aa7 100644
46301--- a/fs/fuse/dev.c
46302+++ b/fs/fuse/dev.c
46303@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
46304 ret = 0;
46305 pipe_lock(pipe);
46306
46307- if (!pipe->readers) {
46308+ if (!atomic_read(&pipe->readers)) {
46309 send_sig(SIGPIPE, current, 0);
46310 if (!ret)
46311 ret = -EPIPE;
46312diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
46313index bc43832..0cfe5a6 100644
46314--- a/fs/fuse/dir.c
46315+++ b/fs/fuse/dir.c
46316@@ -1181,7 +1181,7 @@ static char *read_link(struct dentry *dentry)
46317 return link;
46318 }
46319
46320-static void free_link(char *link)
46321+static void free_link(const char *link)
46322 {
46323 if (!IS_ERR(link))
46324 free_page((unsigned long) link);
46325diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
46326index a9ba244..d9df391 100644
46327--- a/fs/gfs2/inode.c
46328+++ b/fs/gfs2/inode.c
46329@@ -1496,7 +1496,7 @@ out:
46330
46331 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46332 {
46333- char *s = nd_get_link(nd);
46334+ const char *s = nd_get_link(nd);
46335 if (!IS_ERR(s))
46336 kfree(s);
46337 }
46338diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
46339index 001ef01..f7d5f07 100644
46340--- a/fs/hugetlbfs/inode.c
46341+++ b/fs/hugetlbfs/inode.c
46342@@ -920,7 +920,7 @@ static struct file_system_type hugetlbfs_fs_type = {
46343 .kill_sb = kill_litter_super,
46344 };
46345
46346-static struct vfsmount *hugetlbfs_vfsmount;
46347+struct vfsmount *hugetlbfs_vfsmount;
46348
46349 static int can_do_hugetlb_shm(void)
46350 {
46351diff --git a/fs/inode.c b/fs/inode.c
46352index 9f4f5fe..6214688 100644
46353--- a/fs/inode.c
46354+++ b/fs/inode.c
46355@@ -860,8 +860,8 @@ unsigned int get_next_ino(void)
46356
46357 #ifdef CONFIG_SMP
46358 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
46359- static atomic_t shared_last_ino;
46360- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
46361+ static atomic_unchecked_t shared_last_ino;
46362+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
46363
46364 res = next - LAST_INO_BATCH;
46365 }
46366diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
46367index 4a6cf28..d3a29d3 100644
46368--- a/fs/jffs2/erase.c
46369+++ b/fs/jffs2/erase.c
46370@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
46371 struct jffs2_unknown_node marker = {
46372 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
46373 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46374- .totlen = cpu_to_je32(c->cleanmarker_size)
46375+ .totlen = cpu_to_je32(c->cleanmarker_size),
46376+ .hdr_crc = cpu_to_je32(0)
46377 };
46378
46379 jffs2_prealloc_raw_node_refs(c, jeb, 1);
46380diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
46381index 74d9be1..d5dd140 100644
46382--- a/fs/jffs2/wbuf.c
46383+++ b/fs/jffs2/wbuf.c
46384@@ -1022,7 +1022,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
46385 {
46386 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
46387 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46388- .totlen = constant_cpu_to_je32(8)
46389+ .totlen = constant_cpu_to_je32(8),
46390+ .hdr_crc = constant_cpu_to_je32(0)
46391 };
46392
46393 /*
46394diff --git a/fs/jfs/super.c b/fs/jfs/super.c
46395index 4a82950..bcaa0cb 100644
46396--- a/fs/jfs/super.c
46397+++ b/fs/jfs/super.c
46398@@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
46399
46400 jfs_inode_cachep =
46401 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
46402- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
46403+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
46404 init_once);
46405 if (jfs_inode_cachep == NULL)
46406 return -ENOMEM;
46407diff --git a/fs/libfs.c b/fs/libfs.c
46408index 18d08f5..fe3dc64 100644
46409--- a/fs/libfs.c
46410+++ b/fs/libfs.c
46411@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46412
46413 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
46414 struct dentry *next;
46415+ char d_name[sizeof(next->d_iname)];
46416+ const unsigned char *name;
46417+
46418 next = list_entry(p, struct dentry, d_u.d_child);
46419 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
46420 if (!simple_positive(next)) {
46421@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46422
46423 spin_unlock(&next->d_lock);
46424 spin_unlock(&dentry->d_lock);
46425- if (filldir(dirent, next->d_name.name,
46426+ name = next->d_name.name;
46427+ if (name == next->d_iname) {
46428+ memcpy(d_name, name, next->d_name.len);
46429+ name = d_name;
46430+ }
46431+ if (filldir(dirent, name,
46432 next->d_name.len, filp->f_pos,
46433 next->d_inode->i_ino,
46434 dt_type(next->d_inode)) < 0)
46435diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
46436index 8392cb8..80d6193 100644
46437--- a/fs/lockd/clntproc.c
46438+++ b/fs/lockd/clntproc.c
46439@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
46440 /*
46441 * Cookie counter for NLM requests
46442 */
46443-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
46444+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
46445
46446 void nlmclnt_next_cookie(struct nlm_cookie *c)
46447 {
46448- u32 cookie = atomic_inc_return(&nlm_cookie);
46449+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
46450
46451 memcpy(c->data, &cookie, 4);
46452 c->len=4;
46453diff --git a/fs/locks.c b/fs/locks.c
46454index 6a64f15..c3dacf2 100644
46455--- a/fs/locks.c
46456+++ b/fs/locks.c
46457@@ -308,7 +308,7 @@ static int flock_make_lock(struct file *filp, struct file_lock **lock,
46458 return 0;
46459 }
46460
46461-static int assign_type(struct file_lock *fl, int type)
46462+static int assign_type(struct file_lock *fl, long type)
46463 {
46464 switch (type) {
46465 case F_RDLCK:
46466@@ -445,7 +445,7 @@ static const struct lock_manager_operations lease_manager_ops = {
46467 /*
46468 * Initialize a lease, use the default lock manager operations
46469 */
46470-static int lease_init(struct file *filp, int type, struct file_lock *fl)
46471+static int lease_init(struct file *filp, long type, struct file_lock *fl)
46472 {
46473 if (assign_type(fl, type) != 0)
46474 return -EINVAL;
46475@@ -463,7 +463,7 @@ static int lease_init(struct file *filp, int type, struct file_lock *fl)
46476 }
46477
46478 /* Allocate a file_lock initialised to this type of lease */
46479-static struct file_lock *lease_alloc(struct file *filp, int type)
46480+static struct file_lock *lease_alloc(struct file *filp, long type)
46481 {
46482 struct file_lock *fl = locks_alloc_lock();
46483 int error = -ENOMEM;
46484@@ -2075,16 +2075,16 @@ void locks_remove_flock(struct file *filp)
46485 return;
46486
46487 if (filp->f_op && filp->f_op->flock) {
46488- struct file_lock fl = {
46489+ struct file_lock flock = {
46490 .fl_pid = current->tgid,
46491 .fl_file = filp,
46492 .fl_flags = FL_FLOCK,
46493 .fl_type = F_UNLCK,
46494 .fl_end = OFFSET_MAX,
46495 };
46496- filp->f_op->flock(filp, F_SETLKW, &fl);
46497- if (fl.fl_ops && fl.fl_ops->fl_release_private)
46498- fl.fl_ops->fl_release_private(&fl);
46499+ filp->f_op->flock(filp, F_SETLKW, &flock);
46500+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
46501+ flock.fl_ops->fl_release_private(&flock);
46502 }
46503
46504 lock_flocks();
46505diff --git a/fs/namei.c b/fs/namei.c
46506index c427919..232326c 100644
46507--- a/fs/namei.c
46508+++ b/fs/namei.c
46509@@ -278,16 +278,32 @@ int generic_permission(struct inode *inode, int mask)
46510 if (ret != -EACCES)
46511 return ret;
46512
46513+#ifdef CONFIG_GRKERNSEC
46514+ /* we'll block if we have to log due to a denied capability use */
46515+ if (mask & MAY_NOT_BLOCK)
46516+ return -ECHILD;
46517+#endif
46518+
46519 if (S_ISDIR(inode->i_mode)) {
46520 /* DACs are overridable for directories */
46521- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46522- return 0;
46523 if (!(mask & MAY_WRITE))
46524- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46525+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46526+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46527 return 0;
46528+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46529+ return 0;
46530 return -EACCES;
46531 }
46532 /*
46533+ * Searching includes executable on directories, else just read.
46534+ */
46535+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46536+ if (mask == MAY_READ)
46537+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46538+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46539+ return 0;
46540+
46541+ /*
46542 * Read/write DACs are always overridable.
46543 * Executable DACs are overridable when there is
46544 * at least one exec bit set.
46545@@ -296,14 +312,6 @@ int generic_permission(struct inode *inode, int mask)
46546 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46547 return 0;
46548
46549- /*
46550- * Searching includes executable on directories, else just read.
46551- */
46552- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46553- if (mask == MAY_READ)
46554- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46555- return 0;
46556-
46557 return -EACCES;
46558 }
46559
46560@@ -652,11 +660,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
46561 return error;
46562 }
46563
46564+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
46565+ dentry->d_inode, dentry, nd->path.mnt)) {
46566+ error = -EACCES;
46567+ *p = ERR_PTR(error); /* no ->put_link(), please */
46568+ path_put(&nd->path);
46569+ return error;
46570+ }
46571+
46572 nd->last_type = LAST_BIND;
46573 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
46574 error = PTR_ERR(*p);
46575 if (!IS_ERR(*p)) {
46576- char *s = nd_get_link(nd);
46577+ const char *s = nd_get_link(nd);
46578 error = 0;
46579 if (s)
46580 error = __vfs_follow_link(nd, s);
46581@@ -1355,6 +1371,9 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
46582 if (!res)
46583 res = walk_component(nd, path, &nd->last,
46584 nd->last_type, LOOKUP_FOLLOW);
46585+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode)) {
46586+ res = -EACCES;
46587+ }
46588 put_link(nd, &link, cookie);
46589 } while (res > 0);
46590
46591@@ -1746,6 +1765,9 @@ static int path_lookupat(int dfd, const char *name,
46592 err = follow_link(&link, nd, &cookie);
46593 if (!err)
46594 err = lookup_last(nd, &path);
46595+ if (!err && gr_handle_symlink_owner(&link, nd->inode)) {
46596+ err = -EACCES;
46597+ }
46598 put_link(nd, &link, cookie);
46599 }
46600 }
46601@@ -1753,6 +1775,21 @@ static int path_lookupat(int dfd, const char *name,
46602 if (!err)
46603 err = complete_walk(nd);
46604
46605+ if (!(nd->flags & LOOKUP_PARENT)) {
46606+#ifdef CONFIG_GRKERNSEC
46607+ if (flags & LOOKUP_RCU) {
46608+ if (!err)
46609+ path_put(&nd->path);
46610+ err = -ECHILD;
46611+ } else
46612+#endif
46613+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46614+ if (!err)
46615+ path_put(&nd->path);
46616+ err = -ENOENT;
46617+ }
46618+ }
46619+
46620 if (!err && nd->flags & LOOKUP_DIRECTORY) {
46621 if (!nd->inode->i_op->lookup) {
46622 path_put(&nd->path);
46623@@ -1780,6 +1817,15 @@ static int do_path_lookup(int dfd, const char *name,
46624 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
46625
46626 if (likely(!retval)) {
46627+ if (*name != '/' && nd->path.dentry && nd->inode) {
46628+#ifdef CONFIG_GRKERNSEC
46629+ if (flags & LOOKUP_RCU)
46630+ return -ECHILD;
46631+#endif
46632+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
46633+ return -ENOENT;
46634+ }
46635+
46636 if (unlikely(!audit_dummy_context())) {
46637 if (nd->path.dentry && nd->inode)
46638 audit_inode(name, nd->path.dentry);
46639@@ -2126,6 +2172,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
46640 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
46641 return -EPERM;
46642
46643+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
46644+ return -EPERM;
46645+ if (gr_handle_rawio(inode))
46646+ return -EPERM;
46647+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
46648+ return -EACCES;
46649+
46650 return 0;
46651 }
46652
46653@@ -2187,6 +2240,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46654 error = complete_walk(nd);
46655 if (error)
46656 return ERR_PTR(error);
46657+#ifdef CONFIG_GRKERNSEC
46658+ if (nd->flags & LOOKUP_RCU) {
46659+ error = -ECHILD;
46660+ goto exit;
46661+ }
46662+#endif
46663+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46664+ error = -ENOENT;
46665+ goto exit;
46666+ }
46667 audit_inode(pathname, nd->path.dentry);
46668 if (open_flag & O_CREAT) {
46669 error = -EISDIR;
46670@@ -2197,6 +2260,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46671 error = complete_walk(nd);
46672 if (error)
46673 return ERR_PTR(error);
46674+#ifdef CONFIG_GRKERNSEC
46675+ if (nd->flags & LOOKUP_RCU) {
46676+ error = -ECHILD;
46677+ goto exit;
46678+ }
46679+#endif
46680+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
46681+ error = -ENOENT;
46682+ goto exit;
46683+ }
46684 audit_inode(pathname, dir);
46685 goto ok;
46686 }
46687@@ -2218,6 +2291,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46688 error = complete_walk(nd);
46689 if (error)
46690 return ERR_PTR(error);
46691+#ifdef CONFIG_GRKERNSEC
46692+ if (nd->flags & LOOKUP_RCU) {
46693+ error = -ECHILD;
46694+ goto exit;
46695+ }
46696+#endif
46697+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46698+ error = -ENOENT;
46699+ goto exit;
46700+ }
46701
46702 error = -ENOTDIR;
46703 if (nd->flags & LOOKUP_DIRECTORY) {
46704@@ -2258,6 +2341,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46705 /* Negative dentry, just create the file */
46706 if (!dentry->d_inode) {
46707 umode_t mode = op->mode;
46708+
46709+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
46710+ error = -EACCES;
46711+ goto exit_mutex_unlock;
46712+ }
46713+
46714 if (!IS_POSIXACL(dir->d_inode))
46715 mode &= ~current_umask();
46716 /*
46717@@ -2281,6 +2370,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46718 error = vfs_create(dir->d_inode, dentry, mode, nd);
46719 if (error)
46720 goto exit_mutex_unlock;
46721+ else
46722+ gr_handle_create(path->dentry, path->mnt);
46723 mutex_unlock(&dir->d_inode->i_mutex);
46724 dput(nd->path.dentry);
46725 nd->path.dentry = dentry;
46726@@ -2290,6 +2381,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46727 /*
46728 * It already exists.
46729 */
46730+
46731+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
46732+ error = -ENOENT;
46733+ goto exit_mutex_unlock;
46734+ }
46735+
46736+ /* only check if O_CREAT is specified, all other checks need to go
46737+ into may_open */
46738+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
46739+ error = -EACCES;
46740+ goto exit_mutex_unlock;
46741+ }
46742+
46743 mutex_unlock(&dir->d_inode->i_mutex);
46744 audit_inode(pathname, path->dentry);
46745
46746@@ -2407,8 +2511,14 @@ static struct file *path_openat(int dfd, const char *pathname,
46747 error = follow_link(&link, nd, &cookie);
46748 if (unlikely(error))
46749 filp = ERR_PTR(error);
46750- else
46751+ else {
46752 filp = do_last(nd, &path, op, pathname);
46753+ if (!IS_ERR(filp) && gr_handle_symlink_owner(&link, nd->inode)) {
46754+ if (filp)
46755+ fput(filp);
46756+ filp = ERR_PTR(-EACCES);
46757+ }
46758+ }
46759 put_link(nd, &link, cookie);
46760 }
46761 out:
46762@@ -2502,6 +2612,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
46763 *path = nd.path;
46764 return dentry;
46765 eexist:
46766+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
46767+ dput(dentry);
46768+ dentry = ERR_PTR(-ENOENT);
46769+ goto fail;
46770+ }
46771 dput(dentry);
46772 dentry = ERR_PTR(-EEXIST);
46773 fail:
46774@@ -2524,6 +2639,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
46775 }
46776 EXPORT_SYMBOL(user_path_create);
46777
46778+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
46779+{
46780+ char *tmp = getname(pathname);
46781+ struct dentry *res;
46782+ if (IS_ERR(tmp))
46783+ return ERR_CAST(tmp);
46784+ res = kern_path_create(dfd, tmp, path, is_dir);
46785+ if (IS_ERR(res))
46786+ putname(tmp);
46787+ else
46788+ *to = tmp;
46789+ return res;
46790+}
46791+
46792 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
46793 {
46794 int error = may_create(dir, dentry);
46795@@ -2591,6 +2720,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46796 error = mnt_want_write(path.mnt);
46797 if (error)
46798 goto out_dput;
46799+
46800+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
46801+ error = -EPERM;
46802+ goto out_drop_write;
46803+ }
46804+
46805+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
46806+ error = -EACCES;
46807+ goto out_drop_write;
46808+ }
46809+
46810 error = security_path_mknod(&path, dentry, mode, dev);
46811 if (error)
46812 goto out_drop_write;
46813@@ -2608,6 +2748,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46814 }
46815 out_drop_write:
46816 mnt_drop_write(path.mnt);
46817+
46818+ if (!error)
46819+ gr_handle_create(dentry, path.mnt);
46820 out_dput:
46821 dput(dentry);
46822 mutex_unlock(&path.dentry->d_inode->i_mutex);
46823@@ -2661,12 +2804,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
46824 error = mnt_want_write(path.mnt);
46825 if (error)
46826 goto out_dput;
46827+
46828+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
46829+ error = -EACCES;
46830+ goto out_drop_write;
46831+ }
46832+
46833 error = security_path_mkdir(&path, dentry, mode);
46834 if (error)
46835 goto out_drop_write;
46836 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
46837 out_drop_write:
46838 mnt_drop_write(path.mnt);
46839+
46840+ if (!error)
46841+ gr_handle_create(dentry, path.mnt);
46842 out_dput:
46843 dput(dentry);
46844 mutex_unlock(&path.dentry->d_inode->i_mutex);
46845@@ -2746,6 +2898,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46846 char * name;
46847 struct dentry *dentry;
46848 struct nameidata nd;
46849+ ino_t saved_ino = 0;
46850+ dev_t saved_dev = 0;
46851
46852 error = user_path_parent(dfd, pathname, &nd, &name);
46853 if (error)
46854@@ -2774,6 +2928,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
46855 error = -ENOENT;
46856 goto exit3;
46857 }
46858+
46859+ saved_ino = dentry->d_inode->i_ino;
46860+ saved_dev = gr_get_dev_from_dentry(dentry);
46861+
46862+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
46863+ error = -EACCES;
46864+ goto exit3;
46865+ }
46866+
46867 error = mnt_want_write(nd.path.mnt);
46868 if (error)
46869 goto exit3;
46870@@ -2781,6 +2944,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46871 if (error)
46872 goto exit4;
46873 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
46874+ if (!error && (saved_dev || saved_ino))
46875+ gr_handle_delete(saved_ino, saved_dev);
46876 exit4:
46877 mnt_drop_write(nd.path.mnt);
46878 exit3:
46879@@ -2843,6 +3008,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46880 struct dentry *dentry;
46881 struct nameidata nd;
46882 struct inode *inode = NULL;
46883+ ino_t saved_ino = 0;
46884+ dev_t saved_dev = 0;
46885
46886 error = user_path_parent(dfd, pathname, &nd, &name);
46887 if (error)
46888@@ -2865,6 +3032,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46889 if (!inode)
46890 goto slashes;
46891 ihold(inode);
46892+
46893+ if (inode->i_nlink <= 1) {
46894+ saved_ino = inode->i_ino;
46895+ saved_dev = gr_get_dev_from_dentry(dentry);
46896+ }
46897+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
46898+ error = -EACCES;
46899+ goto exit2;
46900+ }
46901+
46902 error = mnt_want_write(nd.path.mnt);
46903 if (error)
46904 goto exit2;
46905@@ -2872,6 +3049,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46906 if (error)
46907 goto exit3;
46908 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
46909+ if (!error && (saved_ino || saved_dev))
46910+ gr_handle_delete(saved_ino, saved_dev);
46911 exit3:
46912 mnt_drop_write(nd.path.mnt);
46913 exit2:
46914@@ -2947,10 +3126,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
46915 error = mnt_want_write(path.mnt);
46916 if (error)
46917 goto out_dput;
46918+
46919+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
46920+ error = -EACCES;
46921+ goto out_drop_write;
46922+ }
46923+
46924 error = security_path_symlink(&path, dentry, from);
46925 if (error)
46926 goto out_drop_write;
46927 error = vfs_symlink(path.dentry->d_inode, dentry, from);
46928+ if (!error)
46929+ gr_handle_create(dentry, path.mnt);
46930 out_drop_write:
46931 mnt_drop_write(path.mnt);
46932 out_dput:
46933@@ -3025,6 +3212,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46934 {
46935 struct dentry *new_dentry;
46936 struct path old_path, new_path;
46937+ char *to = NULL;
46938 int how = 0;
46939 int error;
46940
46941@@ -3048,7 +3236,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46942 if (error)
46943 return error;
46944
46945- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
46946+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
46947 error = PTR_ERR(new_dentry);
46948 if (IS_ERR(new_dentry))
46949 goto out;
46950@@ -3059,13 +3247,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46951 error = mnt_want_write(new_path.mnt);
46952 if (error)
46953 goto out_dput;
46954+
46955+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
46956+ old_path.dentry->d_inode,
46957+ old_path.dentry->d_inode->i_mode, to)) {
46958+ error = -EACCES;
46959+ goto out_drop_write;
46960+ }
46961+
46962+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
46963+ old_path.dentry, old_path.mnt, to)) {
46964+ error = -EACCES;
46965+ goto out_drop_write;
46966+ }
46967+
46968 error = security_path_link(old_path.dentry, &new_path, new_dentry);
46969 if (error)
46970 goto out_drop_write;
46971 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
46972+ if (!error)
46973+ gr_handle_create(new_dentry, new_path.mnt);
46974 out_drop_write:
46975 mnt_drop_write(new_path.mnt);
46976 out_dput:
46977+ putname(to);
46978 dput(new_dentry);
46979 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
46980 path_put(&new_path);
46981@@ -3299,6 +3504,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46982 if (new_dentry == trap)
46983 goto exit5;
46984
46985+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
46986+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
46987+ to);
46988+ if (error)
46989+ goto exit5;
46990+
46991 error = mnt_want_write(oldnd.path.mnt);
46992 if (error)
46993 goto exit5;
46994@@ -3308,6 +3519,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46995 goto exit6;
46996 error = vfs_rename(old_dir->d_inode, old_dentry,
46997 new_dir->d_inode, new_dentry);
46998+ if (!error)
46999+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
47000+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
47001 exit6:
47002 mnt_drop_write(oldnd.path.mnt);
47003 exit5:
47004@@ -3333,6 +3547,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
47005
47006 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
47007 {
47008+ char tmpbuf[64];
47009+ const char *newlink;
47010 int len;
47011
47012 len = PTR_ERR(link);
47013@@ -3342,7 +3558,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
47014 len = strlen(link);
47015 if (len > (unsigned) buflen)
47016 len = buflen;
47017- if (copy_to_user(buffer, link, len))
47018+
47019+ if (len < sizeof(tmpbuf)) {
47020+ memcpy(tmpbuf, link, len);
47021+ newlink = tmpbuf;
47022+ } else
47023+ newlink = link;
47024+
47025+ if (copy_to_user(buffer, newlink, len))
47026 len = -EFAULT;
47027 out:
47028 return len;
47029diff --git a/fs/namespace.c b/fs/namespace.c
47030index 4e46539..b28253c 100644
47031--- a/fs/namespace.c
47032+++ b/fs/namespace.c
47033@@ -1156,6 +1156,9 @@ static int do_umount(struct mount *mnt, int flags)
47034 if (!(sb->s_flags & MS_RDONLY))
47035 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
47036 up_write(&sb->s_umount);
47037+
47038+ gr_log_remount(mnt->mnt_devname, retval);
47039+
47040 return retval;
47041 }
47042
47043@@ -1175,6 +1178,9 @@ static int do_umount(struct mount *mnt, int flags)
47044 br_write_unlock(vfsmount_lock);
47045 up_write(&namespace_sem);
47046 release_mounts(&umount_list);
47047+
47048+ gr_log_unmount(mnt->mnt_devname, retval);
47049+
47050 return retval;
47051 }
47052
47053@@ -2176,6 +2182,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47054 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
47055 MS_STRICTATIME);
47056
47057+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
47058+ retval = -EPERM;
47059+ goto dput_out;
47060+ }
47061+
47062+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
47063+ retval = -EPERM;
47064+ goto dput_out;
47065+ }
47066+
47067 if (flags & MS_REMOUNT)
47068 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
47069 data_page);
47070@@ -2190,6 +2206,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47071 dev_name, data_page);
47072 dput_out:
47073 path_put(&path);
47074+
47075+ gr_log_mount(dev_name, dir_name, retval);
47076+
47077 return retval;
47078 }
47079
47080@@ -2471,6 +2490,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
47081 if (error)
47082 goto out2;
47083
47084+ if (gr_handle_chroot_pivot()) {
47085+ error = -EPERM;
47086+ goto out2;
47087+ }
47088+
47089 get_fs_root(current->fs, &root);
47090 error = lock_mount(&old);
47091 if (error)
47092diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
47093index e8bbfa5..864f936 100644
47094--- a/fs/nfs/inode.c
47095+++ b/fs/nfs/inode.c
47096@@ -152,7 +152,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
47097 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
47098 nfsi->attrtimeo_timestamp = jiffies;
47099
47100- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
47101+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
47102 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
47103 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
47104 else
47105@@ -1005,16 +1005,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
47106 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
47107 }
47108
47109-static atomic_long_t nfs_attr_generation_counter;
47110+static atomic_long_unchecked_t nfs_attr_generation_counter;
47111
47112 static unsigned long nfs_read_attr_generation_counter(void)
47113 {
47114- return atomic_long_read(&nfs_attr_generation_counter);
47115+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
47116 }
47117
47118 unsigned long nfs_inc_attr_generation_counter(void)
47119 {
47120- return atomic_long_inc_return(&nfs_attr_generation_counter);
47121+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
47122 }
47123
47124 void nfs_fattr_init(struct nfs_fattr *fattr)
47125diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
47126index 5686661..80a9a3a 100644
47127--- a/fs/nfsd/vfs.c
47128+++ b/fs/nfsd/vfs.c
47129@@ -933,7 +933,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47130 } else {
47131 oldfs = get_fs();
47132 set_fs(KERNEL_DS);
47133- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
47134+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
47135 set_fs(oldfs);
47136 }
47137
47138@@ -1037,7 +1037,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47139
47140 /* Write the data. */
47141 oldfs = get_fs(); set_fs(KERNEL_DS);
47142- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
47143+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
47144 set_fs(oldfs);
47145 if (host_err < 0)
47146 goto out_nfserr;
47147@@ -1573,7 +1573,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
47148 */
47149
47150 oldfs = get_fs(); set_fs(KERNEL_DS);
47151- host_err = inode->i_op->readlink(path.dentry, buf, *lenp);
47152+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
47153 set_fs(oldfs);
47154
47155 if (host_err < 0)
47156diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
47157index 3568c8a..e0240d8 100644
47158--- a/fs/notify/fanotify/fanotify_user.c
47159+++ b/fs/notify/fanotify/fanotify_user.c
47160@@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
47161 goto out_close_fd;
47162
47163 ret = -EFAULT;
47164- if (copy_to_user(buf, &fanotify_event_metadata,
47165+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
47166+ copy_to_user(buf, &fanotify_event_metadata,
47167 fanotify_event_metadata.event_len))
47168 goto out_kill_access_response;
47169
47170diff --git a/fs/notify/notification.c b/fs/notify/notification.c
47171index c887b13..0fdf472 100644
47172--- a/fs/notify/notification.c
47173+++ b/fs/notify/notification.c
47174@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
47175 * get set to 0 so it will never get 'freed'
47176 */
47177 static struct fsnotify_event *q_overflow_event;
47178-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47179+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47180
47181 /**
47182 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
47183@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47184 */
47185 u32 fsnotify_get_cookie(void)
47186 {
47187- return atomic_inc_return(&fsnotify_sync_cookie);
47188+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
47189 }
47190 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
47191
47192diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
47193index 99e3610..02c1068 100644
47194--- a/fs/ntfs/dir.c
47195+++ b/fs/ntfs/dir.c
47196@@ -1329,7 +1329,7 @@ find_next_index_buffer:
47197 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
47198 ~(s64)(ndir->itype.index.block_size - 1)));
47199 /* Bounds checks. */
47200- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47201+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47202 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
47203 "inode 0x%lx or driver bug.", vdir->i_ino);
47204 goto err_out;
47205diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
47206index 8639169..76697aa 100644
47207--- a/fs/ntfs/file.c
47208+++ b/fs/ntfs/file.c
47209@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
47210 #endif /* NTFS_RW */
47211 };
47212
47213-const struct file_operations ntfs_empty_file_ops = {};
47214+const struct file_operations ntfs_empty_file_ops __read_only;
47215
47216-const struct inode_operations ntfs_empty_inode_ops = {};
47217+const struct inode_operations ntfs_empty_inode_ops __read_only;
47218diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
47219index 210c352..a174f83 100644
47220--- a/fs/ocfs2/localalloc.c
47221+++ b/fs/ocfs2/localalloc.c
47222@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
47223 goto bail;
47224 }
47225
47226- atomic_inc(&osb->alloc_stats.moves);
47227+ atomic_inc_unchecked(&osb->alloc_stats.moves);
47228
47229 bail:
47230 if (handle)
47231diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
47232index d355e6e..578d905 100644
47233--- a/fs/ocfs2/ocfs2.h
47234+++ b/fs/ocfs2/ocfs2.h
47235@@ -235,11 +235,11 @@ enum ocfs2_vol_state
47236
47237 struct ocfs2_alloc_stats
47238 {
47239- atomic_t moves;
47240- atomic_t local_data;
47241- atomic_t bitmap_data;
47242- atomic_t bg_allocs;
47243- atomic_t bg_extends;
47244+ atomic_unchecked_t moves;
47245+ atomic_unchecked_t local_data;
47246+ atomic_unchecked_t bitmap_data;
47247+ atomic_unchecked_t bg_allocs;
47248+ atomic_unchecked_t bg_extends;
47249 };
47250
47251 enum ocfs2_local_alloc_state
47252diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
47253index f169da4..9112253 100644
47254--- a/fs/ocfs2/suballoc.c
47255+++ b/fs/ocfs2/suballoc.c
47256@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
47257 mlog_errno(status);
47258 goto bail;
47259 }
47260- atomic_inc(&osb->alloc_stats.bg_extends);
47261+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
47262
47263 /* You should never ask for this much metadata */
47264 BUG_ON(bits_wanted >
47265@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
47266 mlog_errno(status);
47267 goto bail;
47268 }
47269- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47270+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47271
47272 *suballoc_loc = res.sr_bg_blkno;
47273 *suballoc_bit_start = res.sr_bit_offset;
47274@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
47275 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
47276 res->sr_bits);
47277
47278- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47279+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47280
47281 BUG_ON(res->sr_bits != 1);
47282
47283@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
47284 mlog_errno(status);
47285 goto bail;
47286 }
47287- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47288+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47289
47290 BUG_ON(res.sr_bits != 1);
47291
47292@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47293 cluster_start,
47294 num_clusters);
47295 if (!status)
47296- atomic_inc(&osb->alloc_stats.local_data);
47297+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
47298 } else {
47299 if (min_clusters > (osb->bitmap_cpg - 1)) {
47300 /* The only paths asking for contiguousness
47301@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47302 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
47303 res.sr_bg_blkno,
47304 res.sr_bit_offset);
47305- atomic_inc(&osb->alloc_stats.bitmap_data);
47306+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
47307 *num_clusters = res.sr_bits;
47308 }
47309 }
47310diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
47311index 68f4541..89cfe6a 100644
47312--- a/fs/ocfs2/super.c
47313+++ b/fs/ocfs2/super.c
47314@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
47315 "%10s => GlobalAllocs: %d LocalAllocs: %d "
47316 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
47317 "Stats",
47318- atomic_read(&osb->alloc_stats.bitmap_data),
47319- atomic_read(&osb->alloc_stats.local_data),
47320- atomic_read(&osb->alloc_stats.bg_allocs),
47321- atomic_read(&osb->alloc_stats.moves),
47322- atomic_read(&osb->alloc_stats.bg_extends));
47323+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
47324+ atomic_read_unchecked(&osb->alloc_stats.local_data),
47325+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
47326+ atomic_read_unchecked(&osb->alloc_stats.moves),
47327+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
47328
47329 out += snprintf(buf + out, len - out,
47330 "%10s => State: %u Descriptor: %llu Size: %u bits "
47331@@ -2116,11 +2116,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
47332 spin_lock_init(&osb->osb_xattr_lock);
47333 ocfs2_init_steal_slots(osb);
47334
47335- atomic_set(&osb->alloc_stats.moves, 0);
47336- atomic_set(&osb->alloc_stats.local_data, 0);
47337- atomic_set(&osb->alloc_stats.bitmap_data, 0);
47338- atomic_set(&osb->alloc_stats.bg_allocs, 0);
47339- atomic_set(&osb->alloc_stats.bg_extends, 0);
47340+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
47341+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
47342+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
47343+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
47344+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
47345
47346 /* Copy the blockcheck stats from the superblock probe */
47347 osb->osb_ecc_stats = *stats;
47348diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
47349index 5d22872..523db20 100644
47350--- a/fs/ocfs2/symlink.c
47351+++ b/fs/ocfs2/symlink.c
47352@@ -142,7 +142,7 @@ bail:
47353
47354 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47355 {
47356- char *link = nd_get_link(nd);
47357+ const char *link = nd_get_link(nd);
47358 if (!IS_ERR(link))
47359 kfree(link);
47360 }
47361diff --git a/fs/open.c b/fs/open.c
47362index 3f1108b..822d7f7 100644
47363--- a/fs/open.c
47364+++ b/fs/open.c
47365@@ -31,6 +31,8 @@
47366 #include <linux/ima.h>
47367 #include <linux/dnotify.h>
47368
47369+#define CREATE_TRACE_POINTS
47370+#include <trace/events/fs.h>
47371 #include "internal.h"
47372
47373 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
47374@@ -112,6 +114,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
47375 error = locks_verify_truncate(inode, NULL, length);
47376 if (!error)
47377 error = security_path_truncate(&path);
47378+
47379+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
47380+ error = -EACCES;
47381+
47382 if (!error)
47383 error = do_truncate(path.dentry, length, 0, NULL);
47384
47385@@ -358,6 +364,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
47386 if (__mnt_is_readonly(path.mnt))
47387 res = -EROFS;
47388
47389+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
47390+ res = -EACCES;
47391+
47392 out_path_release:
47393 path_put(&path);
47394 out:
47395@@ -384,6 +393,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
47396 if (error)
47397 goto dput_and_out;
47398
47399+ gr_log_chdir(path.dentry, path.mnt);
47400+
47401 set_fs_pwd(current->fs, &path);
47402
47403 dput_and_out:
47404@@ -410,6 +421,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
47405 goto out_putf;
47406
47407 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
47408+
47409+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
47410+ error = -EPERM;
47411+
47412+ if (!error)
47413+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
47414+
47415 if (!error)
47416 set_fs_pwd(current->fs, &file->f_path);
47417 out_putf:
47418@@ -438,7 +456,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
47419 if (error)
47420 goto dput_and_out;
47421
47422+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
47423+ goto dput_and_out;
47424+
47425 set_fs_root(current->fs, &path);
47426+
47427+ gr_handle_chroot_chdir(&path);
47428+
47429 error = 0;
47430 dput_and_out:
47431 path_put(&path);
47432@@ -456,6 +480,16 @@ static int chmod_common(struct path *path, umode_t mode)
47433 if (error)
47434 return error;
47435 mutex_lock(&inode->i_mutex);
47436+
47437+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
47438+ error = -EACCES;
47439+ goto out_unlock;
47440+ }
47441+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
47442+ error = -EACCES;
47443+ goto out_unlock;
47444+ }
47445+
47446 error = security_path_chmod(path, mode);
47447 if (error)
47448 goto out_unlock;
47449@@ -506,6 +540,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
47450 int error;
47451 struct iattr newattrs;
47452
47453+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
47454+ return -EACCES;
47455+
47456 newattrs.ia_valid = ATTR_CTIME;
47457 if (user != (uid_t) -1) {
47458 newattrs.ia_valid |= ATTR_UID;
47459@@ -987,6 +1024,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
47460 } else {
47461 fsnotify_open(f);
47462 fd_install(fd, f);
47463+ trace_do_sys_open(tmp, flags, mode);
47464 }
47465 }
47466 putname(tmp);
47467diff --git a/fs/pipe.c b/fs/pipe.c
47468index fec5e4a..f4210f9 100644
47469--- a/fs/pipe.c
47470+++ b/fs/pipe.c
47471@@ -438,9 +438,9 @@ redo:
47472 }
47473 if (bufs) /* More to do? */
47474 continue;
47475- if (!pipe->writers)
47476+ if (!atomic_read(&pipe->writers))
47477 break;
47478- if (!pipe->waiting_writers) {
47479+ if (!atomic_read(&pipe->waiting_writers)) {
47480 /* syscall merging: Usually we must not sleep
47481 * if O_NONBLOCK is set, or if we got some data.
47482 * But if a writer sleeps in kernel space, then
47483@@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
47484 mutex_lock(&inode->i_mutex);
47485 pipe = inode->i_pipe;
47486
47487- if (!pipe->readers) {
47488+ if (!atomic_read(&pipe->readers)) {
47489 send_sig(SIGPIPE, current, 0);
47490 ret = -EPIPE;
47491 goto out;
47492@@ -553,7 +553,7 @@ redo1:
47493 for (;;) {
47494 int bufs;
47495
47496- if (!pipe->readers) {
47497+ if (!atomic_read(&pipe->readers)) {
47498 send_sig(SIGPIPE, current, 0);
47499 if (!ret)
47500 ret = -EPIPE;
47501@@ -644,9 +644,9 @@ redo2:
47502 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
47503 do_wakeup = 0;
47504 }
47505- pipe->waiting_writers++;
47506+ atomic_inc(&pipe->waiting_writers);
47507 pipe_wait(pipe);
47508- pipe->waiting_writers--;
47509+ atomic_dec(&pipe->waiting_writers);
47510 }
47511 out:
47512 mutex_unlock(&inode->i_mutex);
47513@@ -713,7 +713,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47514 mask = 0;
47515 if (filp->f_mode & FMODE_READ) {
47516 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
47517- if (!pipe->writers && filp->f_version != pipe->w_counter)
47518+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
47519 mask |= POLLHUP;
47520 }
47521
47522@@ -723,7 +723,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47523 * Most Unices do not set POLLERR for FIFOs but on Linux they
47524 * behave exactly like pipes for poll().
47525 */
47526- if (!pipe->readers)
47527+ if (!atomic_read(&pipe->readers))
47528 mask |= POLLERR;
47529 }
47530
47531@@ -737,10 +737,10 @@ pipe_release(struct inode *inode, int decr, int decw)
47532
47533 mutex_lock(&inode->i_mutex);
47534 pipe = inode->i_pipe;
47535- pipe->readers -= decr;
47536- pipe->writers -= decw;
47537+ atomic_sub(decr, &pipe->readers);
47538+ atomic_sub(decw, &pipe->writers);
47539
47540- if (!pipe->readers && !pipe->writers) {
47541+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
47542 free_pipe_info(inode);
47543 } else {
47544 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
47545@@ -830,7 +830,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
47546
47547 if (inode->i_pipe) {
47548 ret = 0;
47549- inode->i_pipe->readers++;
47550+ atomic_inc(&inode->i_pipe->readers);
47551 }
47552
47553 mutex_unlock(&inode->i_mutex);
47554@@ -847,7 +847,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
47555
47556 if (inode->i_pipe) {
47557 ret = 0;
47558- inode->i_pipe->writers++;
47559+ atomic_inc(&inode->i_pipe->writers);
47560 }
47561
47562 mutex_unlock(&inode->i_mutex);
47563@@ -865,9 +865,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
47564 if (inode->i_pipe) {
47565 ret = 0;
47566 if (filp->f_mode & FMODE_READ)
47567- inode->i_pipe->readers++;
47568+ atomic_inc(&inode->i_pipe->readers);
47569 if (filp->f_mode & FMODE_WRITE)
47570- inode->i_pipe->writers++;
47571+ atomic_inc(&inode->i_pipe->writers);
47572 }
47573
47574 mutex_unlock(&inode->i_mutex);
47575@@ -959,7 +959,7 @@ void free_pipe_info(struct inode *inode)
47576 inode->i_pipe = NULL;
47577 }
47578
47579-static struct vfsmount *pipe_mnt __read_mostly;
47580+struct vfsmount *pipe_mnt __read_mostly;
47581
47582 /*
47583 * pipefs_dname() is called from d_path().
47584@@ -989,7 +989,8 @@ static struct inode * get_pipe_inode(void)
47585 goto fail_iput;
47586 inode->i_pipe = pipe;
47587
47588- pipe->readers = pipe->writers = 1;
47589+ atomic_set(&pipe->readers, 1);
47590+ atomic_set(&pipe->writers, 1);
47591 inode->i_fop = &rdwr_pipefifo_fops;
47592
47593 /*
47594diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
47595index 15af622..0e9f4467 100644
47596--- a/fs/proc/Kconfig
47597+++ b/fs/proc/Kconfig
47598@@ -30,12 +30,12 @@ config PROC_FS
47599
47600 config PROC_KCORE
47601 bool "/proc/kcore support" if !ARM
47602- depends on PROC_FS && MMU
47603+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
47604
47605 config PROC_VMCORE
47606 bool "/proc/vmcore support"
47607- depends on PROC_FS && CRASH_DUMP
47608- default y
47609+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
47610+ default n
47611 help
47612 Exports the dump image of crashed kernel in ELF format.
47613
47614@@ -59,8 +59,8 @@ config PROC_SYSCTL
47615 limited in memory.
47616
47617 config PROC_PAGE_MONITOR
47618- default y
47619- depends on PROC_FS && MMU
47620+ default n
47621+ depends on PROC_FS && MMU && !GRKERNSEC
47622 bool "Enable /proc page monitoring" if EXPERT
47623 help
47624 Various /proc files exist to monitor process memory utilization:
47625diff --git a/fs/proc/array.c b/fs/proc/array.c
47626index f9bd395..acb7847 100644
47627--- a/fs/proc/array.c
47628+++ b/fs/proc/array.c
47629@@ -60,6 +60,7 @@
47630 #include <linux/tty.h>
47631 #include <linux/string.h>
47632 #include <linux/mman.h>
47633+#include <linux/grsecurity.h>
47634 #include <linux/proc_fs.h>
47635 #include <linux/ioport.h>
47636 #include <linux/uaccess.h>
47637@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
47638 seq_putc(m, '\n');
47639 }
47640
47641+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47642+static inline void task_pax(struct seq_file *m, struct task_struct *p)
47643+{
47644+ if (p->mm)
47645+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
47646+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
47647+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
47648+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
47649+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
47650+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
47651+ else
47652+ seq_printf(m, "PaX:\t-----\n");
47653+}
47654+#endif
47655+
47656 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47657 struct pid *pid, struct task_struct *task)
47658 {
47659@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47660 task_cpus_allowed(m, task);
47661 cpuset_task_status_allowed(m, task);
47662 task_context_switch_counts(m, task);
47663+
47664+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47665+ task_pax(m, task);
47666+#endif
47667+
47668+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
47669+ task_grsec_rbac(m, task);
47670+#endif
47671+
47672 return 0;
47673 }
47674
47675+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47676+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47677+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
47678+ _mm->pax_flags & MF_PAX_SEGMEXEC))
47679+#endif
47680+
47681 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47682 struct pid *pid, struct task_struct *task, int whole)
47683 {
47684@@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47685 char tcomm[sizeof(task->comm)];
47686 unsigned long flags;
47687
47688+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47689+ if (current->exec_id != m->exec_id) {
47690+ gr_log_badprocpid("stat");
47691+ return 0;
47692+ }
47693+#endif
47694+
47695 state = *get_task_state(task);
47696 vsize = eip = esp = 0;
47697 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47698@@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47699 gtime = task->gtime;
47700 }
47701
47702+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47703+ if (PAX_RAND_FLAGS(mm)) {
47704+ eip = 0;
47705+ esp = 0;
47706+ wchan = 0;
47707+ }
47708+#endif
47709+#ifdef CONFIG_GRKERNSEC_HIDESYM
47710+ wchan = 0;
47711+ eip =0;
47712+ esp =0;
47713+#endif
47714+
47715 /* scale priority and nice values from timeslices to -20..20 */
47716 /* to make it look like a "normal" Unix priority/nice value */
47717 priority = task_prio(task);
47718@@ -485,9 +536,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47719 seq_put_decimal_ull(m, ' ', vsize);
47720 seq_put_decimal_ll(m, ' ', mm ? get_mm_rss(mm) : 0);
47721 seq_put_decimal_ull(m, ' ', rsslim);
47722+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47723+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
47724+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
47725+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
47726+#else
47727 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
47728 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
47729 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
47730+#endif
47731 seq_put_decimal_ull(m, ' ', esp);
47732 seq_put_decimal_ull(m, ' ', eip);
47733 /* The signal information here is obsolete.
47734@@ -508,9 +565,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47735 seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task));
47736 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
47737 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
47738+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47739+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->start_data : 0));
47740+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->end_data : 0));
47741+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->start_brk : 0));
47742+#else
47743 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_data : 0);
47744 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->end_data : 0);
47745 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_brk : 0);
47746+#endif
47747 seq_putc(m, '\n');
47748 if (mm)
47749 mmput(mm);
47750@@ -533,8 +596,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47751 struct pid *pid, struct task_struct *task)
47752 {
47753 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
47754- struct mm_struct *mm = get_task_mm(task);
47755+ struct mm_struct *mm;
47756
47757+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47758+ if (current->exec_id != m->exec_id) {
47759+ gr_log_badprocpid("statm");
47760+ return 0;
47761+ }
47762+#endif
47763+ mm = get_task_mm(task);
47764 if (mm) {
47765 size = task_statm(mm, &shared, &text, &data, &resident);
47766 mmput(mm);
47767@@ -556,3 +626,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47768
47769 return 0;
47770 }
47771+
47772+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47773+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
47774+{
47775+ u32 curr_ip = 0;
47776+ unsigned long flags;
47777+
47778+ if (lock_task_sighand(task, &flags)) {
47779+ curr_ip = task->signal->curr_ip;
47780+ unlock_task_sighand(task, &flags);
47781+ }
47782+
47783+ return sprintf(buffer, "%pI4\n", &curr_ip);
47784+}
47785+#endif
47786diff --git a/fs/proc/base.c b/fs/proc/base.c
47787index 9fc77b4..4877d08 100644
47788--- a/fs/proc/base.c
47789+++ b/fs/proc/base.c
47790@@ -109,6 +109,14 @@ struct pid_entry {
47791 union proc_op op;
47792 };
47793
47794+struct getdents_callback {
47795+ struct linux_dirent __user * current_dir;
47796+ struct linux_dirent __user * previous;
47797+ struct file * file;
47798+ int count;
47799+ int error;
47800+};
47801+
47802 #define NOD(NAME, MODE, IOP, FOP, OP) { \
47803 .name = (NAME), \
47804 .len = sizeof(NAME) - 1, \
47805@@ -198,11 +206,6 @@ static int proc_root_link(struct dentry *dentry, struct path *path)
47806 return result;
47807 }
47808
47809-struct mm_struct *mm_for_maps(struct task_struct *task)
47810-{
47811- return mm_access(task, PTRACE_MODE_READ);
47812-}
47813-
47814 static int proc_pid_cmdline(struct task_struct *task, char * buffer)
47815 {
47816 int res = 0;
47817@@ -213,6 +216,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
47818 if (!mm->arg_end)
47819 goto out_mm; /* Shh! No looking before we're done */
47820
47821+ if (gr_acl_handle_procpidmem(task))
47822+ goto out_mm;
47823+
47824 len = mm->arg_end - mm->arg_start;
47825
47826 if (len > PAGE_SIZE)
47827@@ -240,12 +246,28 @@ out:
47828 return res;
47829 }
47830
47831+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47832+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47833+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
47834+ _mm->pax_flags & MF_PAX_SEGMEXEC))
47835+#endif
47836+
47837 static int proc_pid_auxv(struct task_struct *task, char *buffer)
47838 {
47839- struct mm_struct *mm = mm_for_maps(task);
47840+ struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
47841 int res = PTR_ERR(mm);
47842 if (mm && !IS_ERR(mm)) {
47843 unsigned int nwords = 0;
47844+
47845+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47846+ /* allow if we're currently ptracing this task */
47847+ if (PAX_RAND_FLAGS(mm) &&
47848+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
47849+ mmput(mm);
47850+ return 0;
47851+ }
47852+#endif
47853+
47854 do {
47855 nwords += 2;
47856 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
47857@@ -259,7 +281,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
47858 }
47859
47860
47861-#ifdef CONFIG_KALLSYMS
47862+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47863 /*
47864 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
47865 * Returns the resolved symbol. If that fails, simply return the address.
47866@@ -298,7 +320,7 @@ static void unlock_trace(struct task_struct *task)
47867 mutex_unlock(&task->signal->cred_guard_mutex);
47868 }
47869
47870-#ifdef CONFIG_STACKTRACE
47871+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47872
47873 #define MAX_STACK_TRACE_DEPTH 64
47874
47875@@ -489,7 +511,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
47876 return count;
47877 }
47878
47879-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47880+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47881 static int proc_pid_syscall(struct task_struct *task, char *buffer)
47882 {
47883 long nr;
47884@@ -518,7 +540,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
47885 /************************************************************************/
47886
47887 /* permission checks */
47888-static int proc_fd_access_allowed(struct inode *inode)
47889+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
47890 {
47891 struct task_struct *task;
47892 int allowed = 0;
47893@@ -528,7 +550,10 @@ static int proc_fd_access_allowed(struct inode *inode)
47894 */
47895 task = get_proc_task(inode);
47896 if (task) {
47897- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47898+ if (log)
47899+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47900+ else
47901+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47902 put_task_struct(task);
47903 }
47904 return allowed;
47905@@ -566,10 +591,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
47906 struct task_struct *task,
47907 int hide_pid_min)
47908 {
47909+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47910+ return false;
47911+
47912+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47913+ rcu_read_lock();
47914+ {
47915+ const struct cred *tmpcred = current_cred();
47916+ const struct cred *cred = __task_cred(task);
47917+
47918+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
47919+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47920+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
47921+#endif
47922+ ) {
47923+ rcu_read_unlock();
47924+ return true;
47925+ }
47926+ }
47927+ rcu_read_unlock();
47928+
47929+ if (!pid->hide_pid)
47930+ return false;
47931+#endif
47932+
47933 if (pid->hide_pid < hide_pid_min)
47934 return true;
47935 if (in_group_p(pid->pid_gid))
47936 return true;
47937+
47938 return ptrace_may_access(task, PTRACE_MODE_READ);
47939 }
47940
47941@@ -587,7 +637,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
47942 put_task_struct(task);
47943
47944 if (!has_perms) {
47945+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47946+ {
47947+#else
47948 if (pid->hide_pid == 2) {
47949+#endif
47950 /*
47951 * Let's make getdents(), stat(), and open()
47952 * consistent with each other. If a process
47953@@ -677,7 +731,7 @@ static const struct file_operations proc_single_file_operations = {
47954 .release = single_release,
47955 };
47956
47957-static int mem_open(struct inode* inode, struct file* file)
47958+static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
47959 {
47960 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
47961 struct mm_struct *mm;
47962@@ -685,7 +739,12 @@ static int mem_open(struct inode* inode, struct file* file)
47963 if (!task)
47964 return -ESRCH;
47965
47966- mm = mm_access(task, PTRACE_MODE_ATTACH);
47967+ if (gr_acl_handle_procpidmem(task)) {
47968+ put_task_struct(task);
47969+ return -EPERM;
47970+ }
47971+
47972+ mm = mm_access(task, mode);
47973 put_task_struct(task);
47974
47975 if (IS_ERR(mm))
47976@@ -698,11 +757,24 @@ static int mem_open(struct inode* inode, struct file* file)
47977 mmput(mm);
47978 }
47979
47980+ file->private_data = mm;
47981+
47982+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47983+ file->f_version = current->exec_id;
47984+#endif
47985+
47986+ return 0;
47987+}
47988+
47989+static int mem_open(struct inode *inode, struct file *file)
47990+{
47991+ int ret;
47992+ ret = __mem_open(inode, file, PTRACE_MODE_ATTACH);
47993+
47994 /* OK to pass negative loff_t, we can catch out-of-range */
47995 file->f_mode |= FMODE_UNSIGNED_OFFSET;
47996- file->private_data = mm;
47997
47998- return 0;
47999+ return ret;
48000 }
48001
48002 static ssize_t mem_rw(struct file *file, char __user *buf,
48003@@ -713,6 +785,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
48004 ssize_t copied;
48005 char *page;
48006
48007+#ifdef CONFIG_GRKERNSEC
48008+ if (write)
48009+ return -EPERM;
48010+#endif
48011+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48012+ if (file->f_version != current->exec_id) {
48013+ gr_log_badprocpid("mem");
48014+ return 0;
48015+ }
48016+#endif
48017+
48018 if (!mm)
48019 return 0;
48020
48021@@ -801,42 +884,49 @@ static const struct file_operations proc_mem_operations = {
48022 .release = mem_release,
48023 };
48024
48025+static int environ_open(struct inode *inode, struct file *file)
48026+{
48027+ return __mem_open(inode, file, PTRACE_MODE_READ);
48028+}
48029+
48030 static ssize_t environ_read(struct file *file, char __user *buf,
48031 size_t count, loff_t *ppos)
48032 {
48033- struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
48034 char *page;
48035 unsigned long src = *ppos;
48036- int ret = -ESRCH;
48037- struct mm_struct *mm;
48038+ int ret = 0;
48039+ struct mm_struct *mm = file->private_data;
48040
48041- if (!task)
48042- goto out_no_task;
48043+ if (!mm)
48044+ return 0;
48045+
48046+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48047+ if (file->f_version != current->exec_id) {
48048+ gr_log_badprocpid("environ");
48049+ return 0;
48050+ }
48051+#endif
48052
48053- ret = -ENOMEM;
48054 page = (char *)__get_free_page(GFP_TEMPORARY);
48055 if (!page)
48056- goto out;
48057-
48058-
48059- mm = mm_for_maps(task);
48060- ret = PTR_ERR(mm);
48061- if (!mm || IS_ERR(mm))
48062- goto out_free;
48063+ return -ENOMEM;
48064
48065 ret = 0;
48066+ if (!atomic_inc_not_zero(&mm->mm_users))
48067+ goto free;
48068 while (count > 0) {
48069- int this_len, retval, max_len;
48070+ size_t this_len, max_len;
48071+ int retval;
48072+
48073+ if (src >= (mm->env_end - mm->env_start))
48074+ break;
48075
48076 this_len = mm->env_end - (mm->env_start + src);
48077
48078- if (this_len <= 0)
48079- break;
48080+ max_len = min_t(size_t, PAGE_SIZE, count);
48081+ this_len = min(max_len, this_len);
48082
48083- max_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
48084- this_len = (this_len > max_len) ? max_len : this_len;
48085-
48086- retval = access_process_vm(task, (mm->env_start + src),
48087+ retval = access_remote_vm(mm, (mm->env_start + src),
48088 page, this_len, 0);
48089
48090 if (retval <= 0) {
48091@@ -855,19 +945,18 @@ static ssize_t environ_read(struct file *file, char __user *buf,
48092 count -= retval;
48093 }
48094 *ppos = src;
48095-
48096 mmput(mm);
48097-out_free:
48098+
48099+free:
48100 free_page((unsigned long) page);
48101-out:
48102- put_task_struct(task);
48103-out_no_task:
48104 return ret;
48105 }
48106
48107 static const struct file_operations proc_environ_operations = {
48108+ .open = environ_open,
48109 .read = environ_read,
48110 .llseek = generic_file_llseek,
48111+ .release = mem_release,
48112 };
48113
48114 static ssize_t oom_adjust_read(struct file *file, char __user *buf,
48115@@ -1433,7 +1522,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
48116 path_put(&nd->path);
48117
48118 /* Are we allowed to snoop on the tasks file descriptors? */
48119- if (!proc_fd_access_allowed(inode))
48120+ if (!proc_fd_access_allowed(inode, 0))
48121 goto out;
48122
48123 error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
48124@@ -1472,8 +1561,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
48125 struct path path;
48126
48127 /* Are we allowed to snoop on the tasks file descriptors? */
48128- if (!proc_fd_access_allowed(inode))
48129- goto out;
48130+ /* logging this is needed for learning on chromium to work properly,
48131+ but we don't want to flood the logs from 'ps' which does a readlink
48132+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
48133+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
48134+ */
48135+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
48136+ if (!proc_fd_access_allowed(inode,0))
48137+ goto out;
48138+ } else {
48139+ if (!proc_fd_access_allowed(inode,1))
48140+ goto out;
48141+ }
48142
48143 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
48144 if (error)
48145@@ -1538,7 +1637,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
48146 rcu_read_lock();
48147 cred = __task_cred(task);
48148 inode->i_uid = cred->euid;
48149+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48150+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48151+#else
48152 inode->i_gid = cred->egid;
48153+#endif
48154 rcu_read_unlock();
48155 }
48156 security_task_to_inode(task, inode);
48157@@ -1574,10 +1677,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
48158 return -ENOENT;
48159 }
48160 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48161+#ifdef CONFIG_GRKERNSEC_PROC_USER
48162+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48163+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48164+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48165+#endif
48166 task_dumpable(task)) {
48167 cred = __task_cred(task);
48168 stat->uid = cred->euid;
48169+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48170+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
48171+#else
48172 stat->gid = cred->egid;
48173+#endif
48174 }
48175 }
48176 rcu_read_unlock();
48177@@ -1615,11 +1727,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
48178
48179 if (task) {
48180 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48181+#ifdef CONFIG_GRKERNSEC_PROC_USER
48182+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48183+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48184+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48185+#endif
48186 task_dumpable(task)) {
48187 rcu_read_lock();
48188 cred = __task_cred(task);
48189 inode->i_uid = cred->euid;
48190+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48191+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48192+#else
48193 inode->i_gid = cred->egid;
48194+#endif
48195 rcu_read_unlock();
48196 } else {
48197 inode->i_uid = 0;
48198@@ -1737,7 +1858,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
48199 int fd = proc_fd(inode);
48200
48201 if (task) {
48202- files = get_files_struct(task);
48203+ if (!gr_acl_handle_procpidmem(task))
48204+ files = get_files_struct(task);
48205 put_task_struct(task);
48206 }
48207 if (files) {
48208@@ -2025,11 +2147,8 @@ static int map_files_d_revalidate(struct dentry *dentry, struct nameidata *nd)
48209 if (!task)
48210 goto out_notask;
48211
48212- if (!ptrace_may_access(task, PTRACE_MODE_READ))
48213- goto out;
48214-
48215- mm = get_task_mm(task);
48216- if (!mm)
48217+ mm = mm_access(task, PTRACE_MODE_READ);
48218+ if (IS_ERR_OR_NULL(mm))
48219 goto out;
48220
48221 if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
48222@@ -2338,11 +2457,21 @@ static const struct file_operations proc_map_files_operations = {
48223 */
48224 static int proc_fd_permission(struct inode *inode, int mask)
48225 {
48226+ struct task_struct *task;
48227 int rv = generic_permission(inode, mask);
48228- if (rv == 0)
48229- return 0;
48230+
48231 if (task_pid(current) == proc_pid(inode))
48232 rv = 0;
48233+
48234+ task = get_proc_task(inode);
48235+ if (task == NULL)
48236+ return rv;
48237+
48238+ if (gr_acl_handle_procpidmem(task))
48239+ rv = -EACCES;
48240+
48241+ put_task_struct(task);
48242+
48243 return rv;
48244 }
48245
48246@@ -2452,6 +2581,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
48247 if (!task)
48248 goto out_no_task;
48249
48250+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48251+ goto out;
48252+
48253 /*
48254 * Yes, it does not scale. And it should not. Don't add
48255 * new entries into /proc/<tgid>/ without very good reasons.
48256@@ -2496,6 +2628,9 @@ static int proc_pident_readdir(struct file *filp,
48257 if (!task)
48258 goto out_no_task;
48259
48260+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48261+ goto out;
48262+
48263 ret = 0;
48264 i = filp->f_pos;
48265 switch (i) {
48266@@ -2766,7 +2901,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
48267 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
48268 void *cookie)
48269 {
48270- char *s = nd_get_link(nd);
48271+ const char *s = nd_get_link(nd);
48272 if (!IS_ERR(s))
48273 __putname(s);
48274 }
48275@@ -2967,7 +3102,7 @@ static const struct pid_entry tgid_base_stuff[] = {
48276 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
48277 #endif
48278 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48279-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48280+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48281 INF("syscall", S_IRUGO, proc_pid_syscall),
48282 #endif
48283 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48284@@ -2992,10 +3127,10 @@ static const struct pid_entry tgid_base_stuff[] = {
48285 #ifdef CONFIG_SECURITY
48286 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48287 #endif
48288-#ifdef CONFIG_KALLSYMS
48289+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48290 INF("wchan", S_IRUGO, proc_pid_wchan),
48291 #endif
48292-#ifdef CONFIG_STACKTRACE
48293+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48294 ONE("stack", S_IRUGO, proc_pid_stack),
48295 #endif
48296 #ifdef CONFIG_SCHEDSTATS
48297@@ -3029,6 +3164,9 @@ static const struct pid_entry tgid_base_stuff[] = {
48298 #ifdef CONFIG_HARDWALL
48299 INF("hardwall", S_IRUGO, proc_pid_hardwall),
48300 #endif
48301+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48302+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
48303+#endif
48304 };
48305
48306 static int proc_tgid_base_readdir(struct file * filp,
48307@@ -3155,7 +3293,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
48308 if (!inode)
48309 goto out;
48310
48311+#ifdef CONFIG_GRKERNSEC_PROC_USER
48312+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
48313+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48314+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48315+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
48316+#else
48317 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
48318+#endif
48319 inode->i_op = &proc_tgid_base_inode_operations;
48320 inode->i_fop = &proc_tgid_base_operations;
48321 inode->i_flags|=S_IMMUTABLE;
48322@@ -3197,7 +3342,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
48323 if (!task)
48324 goto out;
48325
48326+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48327+ goto out_put_task;
48328+
48329 result = proc_pid_instantiate(dir, dentry, task, NULL);
48330+out_put_task:
48331 put_task_struct(task);
48332 out:
48333 return result;
48334@@ -3260,6 +3409,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
48335 static int fake_filldir(void *buf, const char *name, int namelen,
48336 loff_t offset, u64 ino, unsigned d_type)
48337 {
48338+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
48339+ __buf->error = -EINVAL;
48340 return 0;
48341 }
48342
48343@@ -3326,7 +3477,7 @@ static const struct pid_entry tid_base_stuff[] = {
48344 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
48345 #endif
48346 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48347-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48348+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48349 INF("syscall", S_IRUGO, proc_pid_syscall),
48350 #endif
48351 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48352@@ -3350,10 +3501,10 @@ static const struct pid_entry tid_base_stuff[] = {
48353 #ifdef CONFIG_SECURITY
48354 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48355 #endif
48356-#ifdef CONFIG_KALLSYMS
48357+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48358 INF("wchan", S_IRUGO, proc_pid_wchan),
48359 #endif
48360-#ifdef CONFIG_STACKTRACE
48361+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48362 ONE("stack", S_IRUGO, proc_pid_stack),
48363 #endif
48364 #ifdef CONFIG_SCHEDSTATS
48365diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
48366index 82676e3..5f8518a 100644
48367--- a/fs/proc/cmdline.c
48368+++ b/fs/proc/cmdline.c
48369@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
48370
48371 static int __init proc_cmdline_init(void)
48372 {
48373+#ifdef CONFIG_GRKERNSEC_PROC_ADD
48374+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
48375+#else
48376 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
48377+#endif
48378 return 0;
48379 }
48380 module_init(proc_cmdline_init);
48381diff --git a/fs/proc/devices.c b/fs/proc/devices.c
48382index b143471..bb105e5 100644
48383--- a/fs/proc/devices.c
48384+++ b/fs/proc/devices.c
48385@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
48386
48387 static int __init proc_devices_init(void)
48388 {
48389+#ifdef CONFIG_GRKERNSEC_PROC_ADD
48390+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
48391+#else
48392 proc_create("devices", 0, NULL, &proc_devinfo_operations);
48393+#endif
48394 return 0;
48395 }
48396 module_init(proc_devices_init);
48397diff --git a/fs/proc/inode.c b/fs/proc/inode.c
48398index 205c922..2ee4c57 100644
48399--- a/fs/proc/inode.c
48400+++ b/fs/proc/inode.c
48401@@ -21,11 +21,17 @@
48402 #include <linux/seq_file.h>
48403 #include <linux/slab.h>
48404 #include <linux/mount.h>
48405+#include <linux/grsecurity.h>
48406
48407 #include <asm/uaccess.h>
48408
48409 #include "internal.h"
48410
48411+#ifdef CONFIG_PROC_SYSCTL
48412+extern const struct inode_operations proc_sys_inode_operations;
48413+extern const struct inode_operations proc_sys_dir_operations;
48414+#endif
48415+
48416 static void proc_evict_inode(struct inode *inode)
48417 {
48418 struct proc_dir_entry *de;
48419@@ -51,6 +57,13 @@ static void proc_evict_inode(struct inode *inode)
48420 ns_ops = PROC_I(inode)->ns_ops;
48421 if (ns_ops && ns_ops->put)
48422 ns_ops->put(PROC_I(inode)->ns);
48423+
48424+#ifdef CONFIG_PROC_SYSCTL
48425+ if (inode->i_op == &proc_sys_inode_operations ||
48426+ inode->i_op == &proc_sys_dir_operations)
48427+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
48428+#endif
48429+
48430 }
48431
48432 static struct kmem_cache * proc_inode_cachep;
48433@@ -456,7 +469,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
48434 if (de->mode) {
48435 inode->i_mode = de->mode;
48436 inode->i_uid = de->uid;
48437+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48438+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48439+#else
48440 inode->i_gid = de->gid;
48441+#endif
48442 }
48443 if (de->size)
48444 inode->i_size = de->size;
48445diff --git a/fs/proc/internal.h b/fs/proc/internal.h
48446index 5f79bb8..e9ab85d 100644
48447--- a/fs/proc/internal.h
48448+++ b/fs/proc/internal.h
48449@@ -31,8 +31,6 @@ struct vmalloc_info {
48450 unsigned long largest_chunk;
48451 };
48452
48453-extern struct mm_struct *mm_for_maps(struct task_struct *);
48454-
48455 #ifdef CONFIG_MMU
48456 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
48457 extern void get_vmalloc_info(struct vmalloc_info *vmi);
48458@@ -54,6 +52,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48459 struct pid *pid, struct task_struct *task);
48460 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48461 struct pid *pid, struct task_struct *task);
48462+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48463+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
48464+#endif
48465 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
48466
48467 extern const struct file_operations proc_pid_maps_operations;
48468diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
48469index 86c67ee..cdca321 100644
48470--- a/fs/proc/kcore.c
48471+++ b/fs/proc/kcore.c
48472@@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48473 * the addresses in the elf_phdr on our list.
48474 */
48475 start = kc_offset_to_vaddr(*fpos - elf_buflen);
48476- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
48477+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
48478+ if (tsz > buflen)
48479 tsz = buflen;
48480-
48481+
48482 while (buflen) {
48483 struct kcore_list *m;
48484
48485@@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48486 kfree(elf_buf);
48487 } else {
48488 if (kern_addr_valid(start)) {
48489- unsigned long n;
48490+ char *elf_buf;
48491+ mm_segment_t oldfs;
48492
48493- n = copy_to_user(buffer, (char *)start, tsz);
48494- /*
48495- * We cannot distinguish between fault on source
48496- * and fault on destination. When this happens
48497- * we clear too and hope it will trigger the
48498- * EFAULT again.
48499- */
48500- if (n) {
48501- if (clear_user(buffer + tsz - n,
48502- n))
48503+ elf_buf = kmalloc(tsz, GFP_KERNEL);
48504+ if (!elf_buf)
48505+ return -ENOMEM;
48506+ oldfs = get_fs();
48507+ set_fs(KERNEL_DS);
48508+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
48509+ set_fs(oldfs);
48510+ if (copy_to_user(buffer, elf_buf, tsz)) {
48511+ kfree(elf_buf);
48512 return -EFAULT;
48513+ }
48514 }
48515+ set_fs(oldfs);
48516+ kfree(elf_buf);
48517 } else {
48518 if (clear_user(buffer, tsz))
48519 return -EFAULT;
48520@@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48521
48522 static int open_kcore(struct inode *inode, struct file *filp)
48523 {
48524+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
48525+ return -EPERM;
48526+#endif
48527 if (!capable(CAP_SYS_RAWIO))
48528 return -EPERM;
48529 if (kcore_need_update)
48530diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
48531index 80e4645..53e5fcf 100644
48532--- a/fs/proc/meminfo.c
48533+++ b/fs/proc/meminfo.c
48534@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
48535 vmi.used >> 10,
48536 vmi.largest_chunk >> 10
48537 #ifdef CONFIG_MEMORY_FAILURE
48538- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
48539+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
48540 #endif
48541 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
48542 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
48543diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
48544index b1822dd..df622cb 100644
48545--- a/fs/proc/nommu.c
48546+++ b/fs/proc/nommu.c
48547@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
48548 if (len < 1)
48549 len = 1;
48550 seq_printf(m, "%*c", len, ' ');
48551- seq_path(m, &file->f_path, "");
48552+ seq_path(m, &file->f_path, "\n\\");
48553 }
48554
48555 seq_putc(m, '\n');
48556diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
48557index 06e1cc1..177cd98 100644
48558--- a/fs/proc/proc_net.c
48559+++ b/fs/proc/proc_net.c
48560@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
48561 struct task_struct *task;
48562 struct nsproxy *ns;
48563 struct net *net = NULL;
48564+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48565+ const struct cred *cred = current_cred();
48566+#endif
48567+
48568+#ifdef CONFIG_GRKERNSEC_PROC_USER
48569+ if (cred->fsuid)
48570+ return net;
48571+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48572+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
48573+ return net;
48574+#endif
48575
48576 rcu_read_lock();
48577 task = pid_task(proc_pid(dir), PIDTYPE_PID);
48578diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
48579index 21d836f..bebf3ee 100644
48580--- a/fs/proc/proc_sysctl.c
48581+++ b/fs/proc/proc_sysctl.c
48582@@ -12,11 +12,15 @@
48583 #include <linux/module.h>
48584 #include "internal.h"
48585
48586+extern int gr_handle_chroot_sysctl(const int op);
48587+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
48588+ const int op);
48589+
48590 static const struct dentry_operations proc_sys_dentry_operations;
48591 static const struct file_operations proc_sys_file_operations;
48592-static const struct inode_operations proc_sys_inode_operations;
48593+const struct inode_operations proc_sys_inode_operations;
48594 static const struct file_operations proc_sys_dir_file_operations;
48595-static const struct inode_operations proc_sys_dir_operations;
48596+const struct inode_operations proc_sys_dir_operations;
48597
48598 void proc_sys_poll_notify(struct ctl_table_poll *poll)
48599 {
48600@@ -470,8 +474,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
48601
48602 err = NULL;
48603 d_set_d_op(dentry, &proc_sys_dentry_operations);
48604+
48605+ gr_handle_proc_create(dentry, inode);
48606+
48607 d_add(dentry, inode);
48608
48609+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt))
48610+ err = ERR_PTR(-ENOENT);
48611+
48612 out:
48613 sysctl_head_finish(head);
48614 return err;
48615@@ -483,18 +493,20 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
48616 struct inode *inode = filp->f_path.dentry->d_inode;
48617 struct ctl_table_header *head = grab_header(inode);
48618 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
48619+ int op = write ? MAY_WRITE : MAY_READ;
48620 ssize_t error;
48621 size_t res;
48622
48623 if (IS_ERR(head))
48624 return PTR_ERR(head);
48625
48626+
48627 /*
48628 * At this point we know that the sysctl was not unregistered
48629 * and won't be until we finish.
48630 */
48631 error = -EPERM;
48632- if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ))
48633+ if (sysctl_perm(head->root, table, op))
48634 goto out;
48635
48636 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
48637@@ -502,6 +514,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
48638 if (!table->proc_handler)
48639 goto out;
48640
48641+#ifdef CONFIG_GRKERNSEC
48642+ error = -EPERM;
48643+ if (gr_handle_chroot_sysctl(op))
48644+ goto out;
48645+ dget(filp->f_path.dentry);
48646+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
48647+ dput(filp->f_path.dentry);
48648+ goto out;
48649+ }
48650+ dput(filp->f_path.dentry);
48651+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
48652+ goto out;
48653+ if (write && !capable(CAP_SYS_ADMIN))
48654+ goto out;
48655+#endif
48656+
48657 /* careful: calling conventions are nasty here */
48658 res = count;
48659 error = table->proc_handler(table, write, buf, &res, ppos);
48660@@ -599,6 +627,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
48661 return -ENOMEM;
48662 } else {
48663 d_set_d_op(child, &proc_sys_dentry_operations);
48664+
48665+ gr_handle_proc_create(child, inode);
48666+
48667 d_add(child, inode);
48668 }
48669 } else {
48670@@ -642,6 +673,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
48671 if ((*pos)++ < file->f_pos)
48672 return 0;
48673
48674+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
48675+ return 0;
48676+
48677 if (unlikely(S_ISLNK(table->mode)))
48678 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
48679 else
48680@@ -759,6 +793,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
48681 if (IS_ERR(head))
48682 return PTR_ERR(head);
48683
48684+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
48685+ return -ENOENT;
48686+
48687 generic_fillattr(inode, stat);
48688 if (table)
48689 stat->mode = (stat->mode & S_IFMT) | table->mode;
48690@@ -781,13 +818,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
48691 .llseek = generic_file_llseek,
48692 };
48693
48694-static const struct inode_operations proc_sys_inode_operations = {
48695+const struct inode_operations proc_sys_inode_operations = {
48696 .permission = proc_sys_permission,
48697 .setattr = proc_sys_setattr,
48698 .getattr = proc_sys_getattr,
48699 };
48700
48701-static const struct inode_operations proc_sys_dir_operations = {
48702+const struct inode_operations proc_sys_dir_operations = {
48703 .lookup = proc_sys_lookup,
48704 .permission = proc_sys_permission,
48705 .setattr = proc_sys_setattr,
48706diff --git a/fs/proc/root.c b/fs/proc/root.c
48707index eed44bf..abeb499 100644
48708--- a/fs/proc/root.c
48709+++ b/fs/proc/root.c
48710@@ -188,7 +188,15 @@ void __init proc_root_init(void)
48711 #ifdef CONFIG_PROC_DEVICETREE
48712 proc_device_tree_init();
48713 #endif
48714+#ifdef CONFIG_GRKERNSEC_PROC_ADD
48715+#ifdef CONFIG_GRKERNSEC_PROC_USER
48716+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
48717+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48718+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
48719+#endif
48720+#else
48721 proc_mkdir("bus", NULL);
48722+#endif
48723 proc_sys_init();
48724 }
48725
48726diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
48727index 7faaf2a..7793015 100644
48728--- a/fs/proc/task_mmu.c
48729+++ b/fs/proc/task_mmu.c
48730@@ -11,12 +11,19 @@
48731 #include <linux/rmap.h>
48732 #include <linux/swap.h>
48733 #include <linux/swapops.h>
48734+#include <linux/grsecurity.h>
48735
48736 #include <asm/elf.h>
48737 #include <asm/uaccess.h>
48738 #include <asm/tlbflush.h>
48739 #include "internal.h"
48740
48741+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48742+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48743+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
48744+ _mm->pax_flags & MF_PAX_SEGMEXEC))
48745+#endif
48746+
48747 void task_mem(struct seq_file *m, struct mm_struct *mm)
48748 {
48749 unsigned long data, text, lib, swap;
48750@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48751 "VmExe:\t%8lu kB\n"
48752 "VmLib:\t%8lu kB\n"
48753 "VmPTE:\t%8lu kB\n"
48754- "VmSwap:\t%8lu kB\n",
48755- hiwater_vm << (PAGE_SHIFT-10),
48756+ "VmSwap:\t%8lu kB\n"
48757+
48758+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48759+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
48760+#endif
48761+
48762+ ,hiwater_vm << (PAGE_SHIFT-10),
48763 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
48764 mm->locked_vm << (PAGE_SHIFT-10),
48765 mm->pinned_vm << (PAGE_SHIFT-10),
48766@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48767 data << (PAGE_SHIFT-10),
48768 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
48769 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
48770- swap << (PAGE_SHIFT-10));
48771+ swap << (PAGE_SHIFT-10)
48772+
48773+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48774+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48775+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
48776+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
48777+#else
48778+ , mm->context.user_cs_base
48779+ , mm->context.user_cs_limit
48780+#endif
48781+#endif
48782+
48783+ );
48784 }
48785
48786 unsigned long task_vsize(struct mm_struct *mm)
48787@@ -125,7 +149,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
48788 if (!priv->task)
48789 return ERR_PTR(-ESRCH);
48790
48791- mm = mm_for_maps(priv->task);
48792+ mm = mm_access(priv->task, PTRACE_MODE_READ);
48793 if (!mm || IS_ERR(mm))
48794 return mm;
48795 down_read(&mm->mmap_sem);
48796@@ -231,13 +255,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48797 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
48798 }
48799
48800- /* We don't show the stack guard page in /proc/maps */
48801+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48802+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
48803+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
48804+#else
48805 start = vma->vm_start;
48806- if (stack_guard_page_start(vma, start))
48807- start += PAGE_SIZE;
48808 end = vma->vm_end;
48809- if (stack_guard_page_end(vma, end))
48810- end -= PAGE_SIZE;
48811+#endif
48812
48813 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
48814 start,
48815@@ -246,7 +270,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48816 flags & VM_WRITE ? 'w' : '-',
48817 flags & VM_EXEC ? 'x' : '-',
48818 flags & VM_MAYSHARE ? 's' : 'p',
48819+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48820+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
48821+#else
48822 pgoff,
48823+#endif
48824 MAJOR(dev), MINOR(dev), ino, &len);
48825
48826 /*
48827@@ -255,7 +283,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48828 */
48829 if (file) {
48830 pad_len_spaces(m, len);
48831- seq_path(m, &file->f_path, "\n");
48832+ seq_path(m, &file->f_path, "\n\\");
48833 goto done;
48834 }
48835
48836@@ -281,8 +309,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48837 * Thread stack in /proc/PID/task/TID/maps or
48838 * the main process stack.
48839 */
48840- if (!is_pid || (vma->vm_start <= mm->start_stack &&
48841- vma->vm_end >= mm->start_stack)) {
48842+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
48843+ (vma->vm_start <= mm->start_stack &&
48844+ vma->vm_end >= mm->start_stack)) {
48845 name = "[stack]";
48846 } else {
48847 /* Thread stack in /proc/PID/maps */
48848@@ -306,6 +335,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
48849 struct proc_maps_private *priv = m->private;
48850 struct task_struct *task = priv->task;
48851
48852+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48853+ if (current->exec_id != m->exec_id) {
48854+ gr_log_badprocpid("maps");
48855+ return 0;
48856+ }
48857+#endif
48858+
48859 show_map_vma(m, vma, is_pid);
48860
48861 if (m->count < m->size) /* vma is copied successfully */
48862@@ -482,12 +518,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
48863 .private = &mss,
48864 };
48865
48866+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48867+ if (current->exec_id != m->exec_id) {
48868+ gr_log_badprocpid("smaps");
48869+ return 0;
48870+ }
48871+#endif
48872 memset(&mss, 0, sizeof mss);
48873- mss.vma = vma;
48874- /* mmap_sem is held in m_start */
48875- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48876- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48877-
48878+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48879+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
48880+#endif
48881+ mss.vma = vma;
48882+ /* mmap_sem is held in m_start */
48883+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48884+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48885+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48886+ }
48887+#endif
48888 show_map_vma(m, vma, is_pid);
48889
48890 seq_printf(m,
48891@@ -505,7 +552,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
48892 "KernelPageSize: %8lu kB\n"
48893 "MMUPageSize: %8lu kB\n"
48894 "Locked: %8lu kB\n",
48895+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48896+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
48897+#else
48898 (vma->vm_end - vma->vm_start) >> 10,
48899+#endif
48900 mss.resident >> 10,
48901 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
48902 mss.shared_clean >> 10,
48903@@ -919,7 +970,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
48904 if (!pm.buffer)
48905 goto out_task;
48906
48907- mm = mm_for_maps(task);
48908+ mm = mm_access(task, PTRACE_MODE_READ);
48909 ret = PTR_ERR(mm);
48910 if (!mm || IS_ERR(mm))
48911 goto out_free;
48912@@ -1138,6 +1189,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
48913 int n;
48914 char buffer[50];
48915
48916+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48917+ if (current->exec_id != m->exec_id) {
48918+ gr_log_badprocpid("numa_maps");
48919+ return 0;
48920+ }
48921+#endif
48922+
48923 if (!mm)
48924 return 0;
48925
48926@@ -1155,11 +1213,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
48927 mpol_to_str(buffer, sizeof(buffer), pol, 0);
48928 mpol_cond_put(pol);
48929
48930+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48931+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
48932+#else
48933 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
48934+#endif
48935
48936 if (file) {
48937 seq_printf(m, " file=");
48938- seq_path(m, &file->f_path, "\n\t= ");
48939+ seq_path(m, &file->f_path, "\n\t\\= ");
48940 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
48941 seq_printf(m, " heap");
48942 } else {
48943diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
48944index 74fe164..0848f95 100644
48945--- a/fs/proc/task_nommu.c
48946+++ b/fs/proc/task_nommu.c
48947@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48948 else
48949 bytes += kobjsize(mm);
48950
48951- if (current->fs && current->fs->users > 1)
48952+ if (current->fs && atomic_read(&current->fs->users) > 1)
48953 sbytes += kobjsize(current->fs);
48954 else
48955 bytes += kobjsize(current->fs);
48956@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
48957
48958 if (file) {
48959 pad_len_spaces(m, len);
48960- seq_path(m, &file->f_path, "");
48961+ seq_path(m, &file->f_path, "\n\\");
48962 } else if (mm) {
48963 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
48964
48965@@ -223,7 +223,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
48966 if (!priv->task)
48967 return ERR_PTR(-ESRCH);
48968
48969- mm = mm_for_maps(priv->task);
48970+ mm = mm_access(priv->task, PTRACE_MODE_READ);
48971 if (!mm || IS_ERR(mm)) {
48972 put_task_struct(priv->task);
48973 priv->task = NULL;
48974diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
48975index d67908b..d13f6a6 100644
48976--- a/fs/quota/netlink.c
48977+++ b/fs/quota/netlink.c
48978@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
48979 void quota_send_warning(short type, unsigned int id, dev_t dev,
48980 const char warntype)
48981 {
48982- static atomic_t seq;
48983+ static atomic_unchecked_t seq;
48984 struct sk_buff *skb;
48985 void *msg_head;
48986 int ret;
48987@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
48988 "VFS: Not enough memory to send quota warning.\n");
48989 return;
48990 }
48991- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
48992+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
48993 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
48994 if (!msg_head) {
48995 printk(KERN_ERR
48996diff --git a/fs/readdir.c b/fs/readdir.c
48997index cc0a822..43cb195 100644
48998--- a/fs/readdir.c
48999+++ b/fs/readdir.c
49000@@ -17,6 +17,7 @@
49001 #include <linux/security.h>
49002 #include <linux/syscalls.h>
49003 #include <linux/unistd.h>
49004+#include <linux/namei.h>
49005
49006 #include <asm/uaccess.h>
49007
49008@@ -67,6 +68,7 @@ struct old_linux_dirent {
49009
49010 struct readdir_callback {
49011 struct old_linux_dirent __user * dirent;
49012+ struct file * file;
49013 int result;
49014 };
49015
49016@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
49017 buf->result = -EOVERFLOW;
49018 return -EOVERFLOW;
49019 }
49020+
49021+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49022+ return 0;
49023+
49024 buf->result++;
49025 dirent = buf->dirent;
49026 if (!access_ok(VERIFY_WRITE, dirent,
49027@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
49028
49029 buf.result = 0;
49030 buf.dirent = dirent;
49031+ buf.file = file;
49032
49033 error = vfs_readdir(file, fillonedir, &buf);
49034 if (buf.result)
49035@@ -142,6 +149,7 @@ struct linux_dirent {
49036 struct getdents_callback {
49037 struct linux_dirent __user * current_dir;
49038 struct linux_dirent __user * previous;
49039+ struct file * file;
49040 int count;
49041 int error;
49042 };
49043@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
49044 buf->error = -EOVERFLOW;
49045 return -EOVERFLOW;
49046 }
49047+
49048+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49049+ return 0;
49050+
49051 dirent = buf->previous;
49052 if (dirent) {
49053 if (__put_user(offset, &dirent->d_off))
49054@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
49055 buf.previous = NULL;
49056 buf.count = count;
49057 buf.error = 0;
49058+ buf.file = file;
49059
49060 error = vfs_readdir(file, filldir, &buf);
49061 if (error >= 0)
49062@@ -229,6 +242,7 @@ out:
49063 struct getdents_callback64 {
49064 struct linux_dirent64 __user * current_dir;
49065 struct linux_dirent64 __user * previous;
49066+ struct file *file;
49067 int count;
49068 int error;
49069 };
49070@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
49071 buf->error = -EINVAL; /* only used if we fail.. */
49072 if (reclen > buf->count)
49073 return -EINVAL;
49074+
49075+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49076+ return 0;
49077+
49078 dirent = buf->previous;
49079 if (dirent) {
49080 if (__put_user(offset, &dirent->d_off))
49081@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
49082
49083 buf.current_dir = dirent;
49084 buf.previous = NULL;
49085+ buf.file = file;
49086 buf.count = count;
49087 buf.error = 0;
49088
49089@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
49090 error = buf.error;
49091 lastdirent = buf.previous;
49092 if (lastdirent) {
49093- typeof(lastdirent->d_off) d_off = file->f_pos;
49094+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
49095 if (__put_user(d_off, &lastdirent->d_off))
49096 error = -EFAULT;
49097 else
49098diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
49099index 2b7882b..1c5ef48 100644
49100--- a/fs/reiserfs/do_balan.c
49101+++ b/fs/reiserfs/do_balan.c
49102@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
49103 return;
49104 }
49105
49106- atomic_inc(&(fs_generation(tb->tb_sb)));
49107+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
49108 do_balance_starts(tb);
49109
49110 /* balance leaf returns 0 except if combining L R and S into
49111diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
49112index 2c1ade6..8c59d8d 100644
49113--- a/fs/reiserfs/procfs.c
49114+++ b/fs/reiserfs/procfs.c
49115@@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
49116 "SMALL_TAILS " : "NO_TAILS ",
49117 replay_only(sb) ? "REPLAY_ONLY " : "",
49118 convert_reiserfs(sb) ? "CONV " : "",
49119- atomic_read(&r->s_generation_counter),
49120+ atomic_read_unchecked(&r->s_generation_counter),
49121 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
49122 SF(s_do_balance), SF(s_unneeded_left_neighbor),
49123 SF(s_good_search_by_key_reada), SF(s_bmaps),
49124diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
49125index a59d271..e12d1cf 100644
49126--- a/fs/reiserfs/reiserfs.h
49127+++ b/fs/reiserfs/reiserfs.h
49128@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
49129 /* Comment? -Hans */
49130 wait_queue_head_t s_wait;
49131 /* To be obsoleted soon by per buffer seals.. -Hans */
49132- atomic_t s_generation_counter; // increased by one every time the
49133+ atomic_unchecked_t s_generation_counter; // increased by one every time the
49134 // tree gets re-balanced
49135 unsigned long s_properties; /* File system properties. Currently holds
49136 on-disk FS format */
49137@@ -1973,7 +1973,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
49138 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
49139
49140 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
49141-#define get_generation(s) atomic_read (&fs_generation(s))
49142+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
49143 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
49144 #define __fs_changed(gen,s) (gen != get_generation (s))
49145 #define fs_changed(gen,s) \
49146diff --git a/fs/select.c b/fs/select.c
49147index 17d33d0..da0bf5c 100644
49148--- a/fs/select.c
49149+++ b/fs/select.c
49150@@ -20,6 +20,7 @@
49151 #include <linux/export.h>
49152 #include <linux/slab.h>
49153 #include <linux/poll.h>
49154+#include <linux/security.h>
49155 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
49156 #include <linux/file.h>
49157 #include <linux/fdtable.h>
49158@@ -833,6 +834,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
49159 struct poll_list *walk = head;
49160 unsigned long todo = nfds;
49161
49162+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
49163 if (nfds > rlimit(RLIMIT_NOFILE))
49164 return -EINVAL;
49165
49166diff --git a/fs/seq_file.c b/fs/seq_file.c
49167index 0cbd049..64e705c 100644
49168--- a/fs/seq_file.c
49169+++ b/fs/seq_file.c
49170@@ -9,6 +9,7 @@
49171 #include <linux/export.h>
49172 #include <linux/seq_file.h>
49173 #include <linux/slab.h>
49174+#include <linux/sched.h>
49175
49176 #include <asm/uaccess.h>
49177 #include <asm/page.h>
49178@@ -56,6 +57,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
49179 memset(p, 0, sizeof(*p));
49180 mutex_init(&p->lock);
49181 p->op = op;
49182+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49183+ p->exec_id = current->exec_id;
49184+#endif
49185
49186 /*
49187 * Wrappers around seq_open(e.g. swaps_open) need to be
49188@@ -92,7 +96,7 @@ static int traverse(struct seq_file *m, loff_t offset)
49189 return 0;
49190 }
49191 if (!m->buf) {
49192- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
49193+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
49194 if (!m->buf)
49195 return -ENOMEM;
49196 }
49197@@ -132,7 +136,7 @@ static int traverse(struct seq_file *m, loff_t offset)
49198 Eoverflow:
49199 m->op->stop(m, p);
49200 kfree(m->buf);
49201- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
49202+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
49203 return !m->buf ? -ENOMEM : -EAGAIN;
49204 }
49205
49206@@ -187,7 +191,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
49207
49208 /* grab buffer if we didn't have one */
49209 if (!m->buf) {
49210- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
49211+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
49212 if (!m->buf)
49213 goto Enomem;
49214 }
49215@@ -228,7 +232,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
49216 goto Fill;
49217 m->op->stop(m, p);
49218 kfree(m->buf);
49219- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
49220+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
49221 if (!m->buf)
49222 goto Enomem;
49223 m->count = 0;
49224@@ -567,7 +571,7 @@ static void single_stop(struct seq_file *p, void *v)
49225 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
49226 void *data)
49227 {
49228- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
49229+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
49230 int res = -ENOMEM;
49231
49232 if (op) {
49233diff --git a/fs/splice.c b/fs/splice.c
49234index 5cac690..f833a99 100644
49235--- a/fs/splice.c
49236+++ b/fs/splice.c
49237@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49238 pipe_lock(pipe);
49239
49240 for (;;) {
49241- if (!pipe->readers) {
49242+ if (!atomic_read(&pipe->readers)) {
49243 send_sig(SIGPIPE, current, 0);
49244 if (!ret)
49245 ret = -EPIPE;
49246@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49247 do_wakeup = 0;
49248 }
49249
49250- pipe->waiting_writers++;
49251+ atomic_inc(&pipe->waiting_writers);
49252 pipe_wait(pipe);
49253- pipe->waiting_writers--;
49254+ atomic_dec(&pipe->waiting_writers);
49255 }
49256
49257 pipe_unlock(pipe);
49258@@ -563,7 +563,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
49259 old_fs = get_fs();
49260 set_fs(get_ds());
49261 /* The cast to a user pointer is valid due to the set_fs() */
49262- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
49263+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
49264 set_fs(old_fs);
49265
49266 return res;
49267@@ -578,7 +578,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
49268 old_fs = get_fs();
49269 set_fs(get_ds());
49270 /* The cast to a user pointer is valid due to the set_fs() */
49271- res = vfs_write(file, (const char __user *)buf, count, &pos);
49272+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
49273 set_fs(old_fs);
49274
49275 return res;
49276@@ -630,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
49277 goto err;
49278
49279 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
49280- vec[i].iov_base = (void __user *) page_address(page);
49281+ vec[i].iov_base = (void __force_user *) page_address(page);
49282 vec[i].iov_len = this_len;
49283 spd.pages[i] = page;
49284 spd.nr_pages++;
49285@@ -849,10 +849,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
49286 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
49287 {
49288 while (!pipe->nrbufs) {
49289- if (!pipe->writers)
49290+ if (!atomic_read(&pipe->writers))
49291 return 0;
49292
49293- if (!pipe->waiting_writers && sd->num_spliced)
49294+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
49295 return 0;
49296
49297 if (sd->flags & SPLICE_F_NONBLOCK)
49298@@ -1185,7 +1185,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
49299 * out of the pipe right after the splice_to_pipe(). So set
49300 * PIPE_READERS appropriately.
49301 */
49302- pipe->readers = 1;
49303+ atomic_set(&pipe->readers, 1);
49304
49305 current->splice_pipe = pipe;
49306 }
49307@@ -1738,9 +1738,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49308 ret = -ERESTARTSYS;
49309 break;
49310 }
49311- if (!pipe->writers)
49312+ if (!atomic_read(&pipe->writers))
49313 break;
49314- if (!pipe->waiting_writers) {
49315+ if (!atomic_read(&pipe->waiting_writers)) {
49316 if (flags & SPLICE_F_NONBLOCK) {
49317 ret = -EAGAIN;
49318 break;
49319@@ -1772,7 +1772,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49320 pipe_lock(pipe);
49321
49322 while (pipe->nrbufs >= pipe->buffers) {
49323- if (!pipe->readers) {
49324+ if (!atomic_read(&pipe->readers)) {
49325 send_sig(SIGPIPE, current, 0);
49326 ret = -EPIPE;
49327 break;
49328@@ -1785,9 +1785,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49329 ret = -ERESTARTSYS;
49330 break;
49331 }
49332- pipe->waiting_writers++;
49333+ atomic_inc(&pipe->waiting_writers);
49334 pipe_wait(pipe);
49335- pipe->waiting_writers--;
49336+ atomic_dec(&pipe->waiting_writers);
49337 }
49338
49339 pipe_unlock(pipe);
49340@@ -1823,14 +1823,14 @@ retry:
49341 pipe_double_lock(ipipe, opipe);
49342
49343 do {
49344- if (!opipe->readers) {
49345+ if (!atomic_read(&opipe->readers)) {
49346 send_sig(SIGPIPE, current, 0);
49347 if (!ret)
49348 ret = -EPIPE;
49349 break;
49350 }
49351
49352- if (!ipipe->nrbufs && !ipipe->writers)
49353+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
49354 break;
49355
49356 /*
49357@@ -1927,7 +1927,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49358 pipe_double_lock(ipipe, opipe);
49359
49360 do {
49361- if (!opipe->readers) {
49362+ if (!atomic_read(&opipe->readers)) {
49363 send_sig(SIGPIPE, current, 0);
49364 if (!ret)
49365 ret = -EPIPE;
49366@@ -1972,7 +1972,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49367 * return EAGAIN if we have the potential of some data in the
49368 * future, otherwise just return 0
49369 */
49370- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
49371+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
49372 ret = -EAGAIN;
49373
49374 pipe_unlock(ipipe);
49375diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
49376index 35a36d3..23424b2 100644
49377--- a/fs/sysfs/dir.c
49378+++ b/fs/sysfs/dir.c
49379@@ -657,6 +657,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
49380 struct sysfs_dirent *sd;
49381 int rc;
49382
49383+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
49384+ const char *parent_name = parent_sd->s_name;
49385+
49386+ mode = S_IFDIR | S_IRWXU;
49387+
49388+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
49389+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
49390+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
49391+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
49392+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
49393+#endif
49394+
49395 /* allocate */
49396 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
49397 if (!sd)
49398diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
49399index 00012e3..8392349 100644
49400--- a/fs/sysfs/file.c
49401+++ b/fs/sysfs/file.c
49402@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
49403
49404 struct sysfs_open_dirent {
49405 atomic_t refcnt;
49406- atomic_t event;
49407+ atomic_unchecked_t event;
49408 wait_queue_head_t poll;
49409 struct list_head buffers; /* goes through sysfs_buffer.list */
49410 };
49411@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
49412 if (!sysfs_get_active(attr_sd))
49413 return -ENODEV;
49414
49415- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
49416+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
49417 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
49418
49419 sysfs_put_active(attr_sd);
49420@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
49421 return -ENOMEM;
49422
49423 atomic_set(&new_od->refcnt, 0);
49424- atomic_set(&new_od->event, 1);
49425+ atomic_set_unchecked(&new_od->event, 1);
49426 init_waitqueue_head(&new_od->poll);
49427 INIT_LIST_HEAD(&new_od->buffers);
49428 goto retry;
49429@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
49430
49431 sysfs_put_active(attr_sd);
49432
49433- if (buffer->event != atomic_read(&od->event))
49434+ if (buffer->event != atomic_read_unchecked(&od->event))
49435 goto trigger;
49436
49437 return DEFAULT_POLLMASK;
49438@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
49439
49440 od = sd->s_attr.open;
49441 if (od) {
49442- atomic_inc(&od->event);
49443+ atomic_inc_unchecked(&od->event);
49444 wake_up_interruptible(&od->poll);
49445 }
49446
49447diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
49448index a7ac78f..02158e1 100644
49449--- a/fs/sysfs/symlink.c
49450+++ b/fs/sysfs/symlink.c
49451@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
49452
49453 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
49454 {
49455- char *page = nd_get_link(nd);
49456+ const char *page = nd_get_link(nd);
49457 if (!IS_ERR(page))
49458 free_page((unsigned long)page);
49459 }
49460diff --git a/fs/udf/misc.c b/fs/udf/misc.c
49461index c175b4d..8f36a16 100644
49462--- a/fs/udf/misc.c
49463+++ b/fs/udf/misc.c
49464@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
49465
49466 u8 udf_tag_checksum(const struct tag *t)
49467 {
49468- u8 *data = (u8 *)t;
49469+ const u8 *data = (const u8 *)t;
49470 u8 checksum = 0;
49471 int i;
49472 for (i = 0; i < sizeof(struct tag); ++i)
49473diff --git a/fs/utimes.c b/fs/utimes.c
49474index ba653f3..06ea4b1 100644
49475--- a/fs/utimes.c
49476+++ b/fs/utimes.c
49477@@ -1,6 +1,7 @@
49478 #include <linux/compiler.h>
49479 #include <linux/file.h>
49480 #include <linux/fs.h>
49481+#include <linux/security.h>
49482 #include <linux/linkage.h>
49483 #include <linux/mount.h>
49484 #include <linux/namei.h>
49485@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
49486 goto mnt_drop_write_and_out;
49487 }
49488 }
49489+
49490+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
49491+ error = -EACCES;
49492+ goto mnt_drop_write_and_out;
49493+ }
49494+
49495 mutex_lock(&inode->i_mutex);
49496 error = notify_change(path->dentry, &newattrs);
49497 mutex_unlock(&inode->i_mutex);
49498diff --git a/fs/xattr.c b/fs/xattr.c
49499index 3c8c1cc..a83c398 100644
49500--- a/fs/xattr.c
49501+++ b/fs/xattr.c
49502@@ -316,7 +316,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
49503 * Extended attribute SET operations
49504 */
49505 static long
49506-setxattr(struct dentry *d, const char __user *name, const void __user *value,
49507+setxattr(struct path *path, const char __user *name, const void __user *value,
49508 size_t size, int flags)
49509 {
49510 int error;
49511@@ -349,7 +349,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
49512 }
49513 }
49514
49515- error = vfs_setxattr(d, kname, kvalue, size, flags);
49516+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
49517+ error = -EACCES;
49518+ goto out;
49519+ }
49520+
49521+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
49522 out:
49523 if (vvalue)
49524 vfree(vvalue);
49525@@ -370,7 +375,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
49526 return error;
49527 error = mnt_want_write(path.mnt);
49528 if (!error) {
49529- error = setxattr(path.dentry, name, value, size, flags);
49530+ error = setxattr(&path, name, value, size, flags);
49531 mnt_drop_write(path.mnt);
49532 }
49533 path_put(&path);
49534@@ -389,7 +394,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
49535 return error;
49536 error = mnt_want_write(path.mnt);
49537 if (!error) {
49538- error = setxattr(path.dentry, name, value, size, flags);
49539+ error = setxattr(&path, name, value, size, flags);
49540 mnt_drop_write(path.mnt);
49541 }
49542 path_put(&path);
49543@@ -400,17 +405,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
49544 const void __user *,value, size_t, size, int, flags)
49545 {
49546 struct file *f;
49547- struct dentry *dentry;
49548 int error = -EBADF;
49549
49550 f = fget(fd);
49551 if (!f)
49552 return error;
49553- dentry = f->f_path.dentry;
49554- audit_inode(NULL, dentry);
49555+ audit_inode(NULL, f->f_path.dentry);
49556 error = mnt_want_write_file(f);
49557 if (!error) {
49558- error = setxattr(dentry, name, value, size, flags);
49559+ error = setxattr(&f->f_path, name, value, size, flags);
49560 mnt_drop_write_file(f);
49561 }
49562 fput(f);
49563diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
49564index 69d06b0..c0996e5 100644
49565--- a/fs/xattr_acl.c
49566+++ b/fs/xattr_acl.c
49567@@ -17,8 +17,8 @@
49568 struct posix_acl *
49569 posix_acl_from_xattr(const void *value, size_t size)
49570 {
49571- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
49572- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
49573+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
49574+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
49575 int count;
49576 struct posix_acl *acl;
49577 struct posix_acl_entry *acl_e;
49578diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
49579index 85e7e32..5344e52 100644
49580--- a/fs/xfs/xfs_bmap.c
49581+++ b/fs/xfs/xfs_bmap.c
49582@@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
49583 int nmap,
49584 int ret_nmap);
49585 #else
49586-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
49587+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
49588 #endif /* DEBUG */
49589
49590 STATIC int
49591diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
49592index 79d05e8..e3e5861 100644
49593--- a/fs/xfs/xfs_dir2_sf.c
49594+++ b/fs/xfs/xfs_dir2_sf.c
49595@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
49596 }
49597
49598 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
49599- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49600+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
49601+ char name[sfep->namelen];
49602+ memcpy(name, sfep->name, sfep->namelen);
49603+ if (filldir(dirent, name, sfep->namelen,
49604+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
49605+ *offset = off & 0x7fffffff;
49606+ return 0;
49607+ }
49608+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49609 off & 0x7fffffff, ino, DT_UNKNOWN)) {
49610 *offset = off & 0x7fffffff;
49611 return 0;
49612diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
49613index 91f8ff5..0ce68f9 100644
49614--- a/fs/xfs/xfs_ioctl.c
49615+++ b/fs/xfs/xfs_ioctl.c
49616@@ -128,7 +128,7 @@ xfs_find_handle(
49617 }
49618
49619 error = -EFAULT;
49620- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
49621+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
49622 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
49623 goto out_put;
49624
49625diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
49626index 3011b87..1ab03e9 100644
49627--- a/fs/xfs/xfs_iops.c
49628+++ b/fs/xfs/xfs_iops.c
49629@@ -397,7 +397,7 @@ xfs_vn_put_link(
49630 struct nameidata *nd,
49631 void *p)
49632 {
49633- char *s = nd_get_link(nd);
49634+ const char *s = nd_get_link(nd);
49635
49636 if (!IS_ERR(s))
49637 kfree(s);
49638diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
49639new file mode 100644
49640index 0000000..4d533f1
49641--- /dev/null
49642+++ b/grsecurity/Kconfig
49643@@ -0,0 +1,941 @@
49644+#
49645+# grecurity configuration
49646+#
49647+menu "Memory Protections"
49648+depends on GRKERNSEC
49649+
49650+config GRKERNSEC_KMEM
49651+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
49652+ default y if GRKERNSEC_CONFIG_AUTO
49653+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
49654+ help
49655+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
49656+ be written to or read from to modify or leak the contents of the running
49657+ kernel. /dev/port will also not be allowed to be opened. If you have module
49658+ support disabled, enabling this will close up four ways that are
49659+ currently used to insert malicious code into the running kernel.
49660+ Even with all these features enabled, we still highly recommend that
49661+ you use the RBAC system, as it is still possible for an attacker to
49662+ modify the running kernel through privileged I/O granted by ioperm/iopl.
49663+ If you are not using XFree86, you may be able to stop this additional
49664+ case by enabling the 'Disable privileged I/O' option. Though nothing
49665+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
49666+ but only to video memory, which is the only writing we allow in this
49667+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
49668+ not be allowed to mprotect it with PROT_WRITE later.
49669+ It is highly recommended that you say Y here if you meet all the
49670+ conditions above.
49671+
49672+config GRKERNSEC_VM86
49673+ bool "Restrict VM86 mode"
49674+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
49675+ depends on X86_32
49676+
49677+ help
49678+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
49679+ make use of a special execution mode on 32bit x86 processors called
49680+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
49681+ video cards and will still work with this option enabled. The purpose
49682+ of the option is to prevent exploitation of emulation errors in
49683+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
49684+ Nearly all users should be able to enable this option.
49685+
49686+config GRKERNSEC_IO
49687+ bool "Disable privileged I/O"
49688+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
49689+ depends on X86
49690+ select RTC_CLASS
49691+ select RTC_INTF_DEV
49692+ select RTC_DRV_CMOS
49693+
49694+ help
49695+ If you say Y here, all ioperm and iopl calls will return an error.
49696+ Ioperm and iopl can be used to modify the running kernel.
49697+ Unfortunately, some programs need this access to operate properly,
49698+ the most notable of which are XFree86 and hwclock. hwclock can be
49699+ remedied by having RTC support in the kernel, so real-time
49700+ clock support is enabled if this option is enabled, to ensure
49701+ that hwclock operates correctly. XFree86 still will not
49702+ operate correctly with this option enabled, so DO NOT CHOOSE Y
49703+ IF YOU USE XFree86. If you use XFree86 and you still want to
49704+ protect your kernel against modification, use the RBAC system.
49705+
49706+config GRKERNSEC_PROC_MEMMAP
49707+ bool "Harden ASLR against information leaks and entropy reduction"
49708+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
49709+ depends on PAX_NOEXEC || PAX_ASLR
49710+ help
49711+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
49712+ give no information about the addresses of its mappings if
49713+ PaX features that rely on random addresses are enabled on the task.
49714+ In addition to sanitizing this information and disabling other
49715+ dangerous sources of information, this option causes reads of sensitive
49716+ /proc/<pid> entries where the file descriptor was opened in a different
49717+ task than the one performing the read. Such attempts are logged.
49718+ This option also limits argv/env strings for suid/sgid binaries
49719+ to 512KB to prevent a complete exhaustion of the stack entropy provided
49720+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
49721+ binaries to prevent alternative mmap layouts from being abused.
49722+
49723+ If you use PaX it is essential that you say Y here as it closes up
49724+ several holes that make full ASLR useless locally.
49725+
49726+config GRKERNSEC_BRUTE
49727+ bool "Deter exploit bruteforcing"
49728+ default y if GRKERNSEC_CONFIG_AUTO
49729+ help
49730+ If you say Y here, attempts to bruteforce exploits against forking
49731+ daemons such as apache or sshd, as well as against suid/sgid binaries
49732+ will be deterred. When a child of a forking daemon is killed by PaX
49733+ or crashes due to an illegal instruction or other suspicious signal,
49734+ the parent process will be delayed 30 seconds upon every subsequent
49735+ fork until the administrator is able to assess the situation and
49736+ restart the daemon.
49737+ In the suid/sgid case, the attempt is logged, the user has all their
49738+ processes terminated, and they are prevented from executing any further
49739+ processes for 15 minutes.
49740+ It is recommended that you also enable signal logging in the auditing
49741+ section so that logs are generated when a process triggers a suspicious
49742+ signal.
49743+ If the sysctl option is enabled, a sysctl option with name
49744+ "deter_bruteforce" is created.
49745+
49746+
49747+config GRKERNSEC_MODHARDEN
49748+ bool "Harden module auto-loading"
49749+ default y if GRKERNSEC_CONFIG_AUTO
49750+ depends on MODULES
49751+ help
49752+ If you say Y here, module auto-loading in response to use of some
49753+ feature implemented by an unloaded module will be restricted to
49754+ root users. Enabling this option helps defend against attacks
49755+ by unprivileged users who abuse the auto-loading behavior to
49756+ cause a vulnerable module to load that is then exploited.
49757+
49758+ If this option prevents a legitimate use of auto-loading for a
49759+ non-root user, the administrator can execute modprobe manually
49760+ with the exact name of the module mentioned in the alert log.
49761+ Alternatively, the administrator can add the module to the list
49762+ of modules loaded at boot by modifying init scripts.
49763+
49764+ Modification of init scripts will most likely be needed on
49765+ Ubuntu servers with encrypted home directory support enabled,
49766+ as the first non-root user logging in will cause the ecb(aes),
49767+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
49768+
49769+config GRKERNSEC_HIDESYM
49770+ bool "Hide kernel symbols"
49771+ default y if GRKERNSEC_CONFIG_AUTO
49772+ select PAX_USERCOPY_SLABS
49773+ help
49774+ If you say Y here, getting information on loaded modules, and
49775+ displaying all kernel symbols through a syscall will be restricted
49776+ to users with CAP_SYS_MODULE. For software compatibility reasons,
49777+ /proc/kallsyms will be restricted to the root user. The RBAC
49778+ system can hide that entry even from root.
49779+
49780+ This option also prevents leaking of kernel addresses through
49781+ several /proc entries.
49782+
49783+ Note that this option is only effective provided the following
49784+ conditions are met:
49785+ 1) The kernel using grsecurity is not precompiled by some distribution
49786+ 2) You have also enabled GRKERNSEC_DMESG
49787+ 3) You are using the RBAC system and hiding other files such as your
49788+ kernel image and System.map. Alternatively, enabling this option
49789+ causes the permissions on /boot, /lib/modules, and the kernel
49790+ source directory to change at compile time to prevent
49791+ reading by non-root users.
49792+ If the above conditions are met, this option will aid in providing a
49793+ useful protection against local kernel exploitation of overflows
49794+ and arbitrary read/write vulnerabilities.
49795+
49796+config GRKERNSEC_KERN_LOCKOUT
49797+ bool "Active kernel exploit response"
49798+ default y if GRKERNSEC_CONFIG_AUTO
49799+ depends on X86 || ARM || PPC || SPARC
49800+ help
49801+ If you say Y here, when a PaX alert is triggered due to suspicious
49802+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
49803+ or an OOPS occurs due to bad memory accesses, instead of just
49804+ terminating the offending process (and potentially allowing
49805+ a subsequent exploit from the same user), we will take one of two
49806+ actions:
49807+ If the user was root, we will panic the system
49808+ If the user was non-root, we will log the attempt, terminate
49809+ all processes owned by the user, then prevent them from creating
49810+ any new processes until the system is restarted
49811+ This deters repeated kernel exploitation/bruteforcing attempts
49812+ and is useful for later forensics.
49813+
49814+endmenu
49815+menu "Role Based Access Control Options"
49816+depends on GRKERNSEC
49817+
49818+config GRKERNSEC_RBAC_DEBUG
49819+ bool
49820+
49821+config GRKERNSEC_NO_RBAC
49822+ bool "Disable RBAC system"
49823+ help
49824+ If you say Y here, the /dev/grsec device will be removed from the kernel,
49825+ preventing the RBAC system from being enabled. You should only say Y
49826+ here if you have no intention of using the RBAC system, so as to prevent
49827+ an attacker with root access from misusing the RBAC system to hide files
49828+ and processes when loadable module support and /dev/[k]mem have been
49829+ locked down.
49830+
49831+config GRKERNSEC_ACL_HIDEKERN
49832+ bool "Hide kernel processes"
49833+ help
49834+ If you say Y here, all kernel threads will be hidden to all
49835+ processes but those whose subject has the "view hidden processes"
49836+ flag.
49837+
49838+config GRKERNSEC_ACL_MAXTRIES
49839+ int "Maximum tries before password lockout"
49840+ default 3
49841+ help
49842+ This option enforces the maximum number of times a user can attempt
49843+ to authorize themselves with the grsecurity RBAC system before being
49844+ denied the ability to attempt authorization again for a specified time.
49845+ The lower the number, the harder it will be to brute-force a password.
49846+
49847+config GRKERNSEC_ACL_TIMEOUT
49848+ int "Time to wait after max password tries, in seconds"
49849+ default 30
49850+ help
49851+ This option specifies the time the user must wait after attempting to
49852+ authorize to the RBAC system with the maximum number of invalid
49853+ passwords. The higher the number, the harder it will be to brute-force
49854+ a password.
49855+
49856+endmenu
49857+menu "Filesystem Protections"
49858+depends on GRKERNSEC
49859+
49860+config GRKERNSEC_PROC
49861+ bool "Proc restrictions"
49862+ default y if GRKERNSEC_CONFIG_AUTO
49863+ help
49864+ If you say Y here, the permissions of the /proc filesystem
49865+ will be altered to enhance system security and privacy. You MUST
49866+ choose either a user only restriction or a user and group restriction.
49867+ Depending upon the option you choose, you can either restrict users to
49868+ see only the processes they themselves run, or choose a group that can
49869+ view all processes and files normally restricted to root if you choose
49870+ the "restrict to user only" option. NOTE: If you're running identd or
49871+ ntpd as a non-root user, you will have to run it as the group you
49872+ specify here.
49873+
49874+config GRKERNSEC_PROC_USER
49875+ bool "Restrict /proc to user only"
49876+ depends on GRKERNSEC_PROC
49877+ help
49878+ If you say Y here, non-root users will only be able to view their own
49879+ processes, and restricts them from viewing network-related information,
49880+ and viewing kernel symbol and module information.
49881+
49882+config GRKERNSEC_PROC_USERGROUP
49883+ bool "Allow special group"
49884+ default y if GRKERNSEC_CONFIG_AUTO
49885+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
49886+ help
49887+ If you say Y here, you will be able to select a group that will be
49888+ able to view all processes and network-related information. If you've
49889+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
49890+ remain hidden. This option is useful if you want to run identd as
49891+ a non-root user.
49892+
49893+config GRKERNSEC_PROC_GID
49894+ int "GID for special group"
49895+ depends on GRKERNSEC_PROC_USERGROUP
49896+ default 1001
49897+
49898+config GRKERNSEC_PROC_ADD
49899+ bool "Additional restrictions"
49900+ default y if GRKERNSEC_CONFIG_AUTO
49901+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
49902+ help
49903+ If you say Y here, additional restrictions will be placed on
49904+ /proc that keep normal users from viewing device information and
49905+ slabinfo information that could be useful for exploits.
49906+
49907+config GRKERNSEC_LINK
49908+ bool "Linking restrictions"
49909+ default y if GRKERNSEC_CONFIG_AUTO
49910+ help
49911+ If you say Y here, /tmp race exploits will be prevented, since users
49912+ will no longer be able to follow symlinks owned by other users in
49913+ world-writable +t directories (e.g. /tmp), unless the owner of the
49914+ symlink is the owner of the directory. users will also not be
49915+ able to hardlink to files they do not own. If the sysctl option is
49916+ enabled, a sysctl option with name "linking_restrictions" is created.
49917+
49918+config GRKERNSEC_SYMLINKOWN
49919+ bool "Kernel-enforced SymlinksIfOwnerMatch"
49920+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
49921+ help
49922+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
49923+ that prevents it from being used as a security feature. As Apache
49924+ verifies the symlink by performing a stat() against the target of
49925+ the symlink before it is followed, an attacker can setup a symlink
49926+ to point to a same-owned file, then replace the symlink with one
49927+ that targets another user's file just after Apache "validates" the
49928+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
49929+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
49930+ will be in place for the group you specify. If the sysctl option
49931+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
49932+ created.
49933+
49934+config GRKERNSEC_SYMLINKOWN_GID
49935+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
49936+ depends on GRKERNSEC_SYMLINKOWN
49937+ default 1006
49938+ help
49939+ Setting this GID determines what group kernel-enforced
49940+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
49941+ is enabled, a sysctl option with name "symlinkown_gid" is created.
49942+
49943+config GRKERNSEC_FIFO
49944+ bool "FIFO restrictions"
49945+ default y if GRKERNSEC_CONFIG_AUTO
49946+ help
49947+ If you say Y here, users will not be able to write to FIFOs they don't
49948+ own in world-writable +t directories (e.g. /tmp), unless the owner of
49949+ the FIFO is the same owner of the directory it's held in. If the sysctl
49950+ option is enabled, a sysctl option with name "fifo_restrictions" is
49951+ created.
49952+
49953+config GRKERNSEC_SYSFS_RESTRICT
49954+ bool "Sysfs/debugfs restriction"
49955+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
49956+ depends on SYSFS
49957+ help
49958+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
49959+ any filesystem normally mounted under it (e.g. debugfs) will be
49960+ mostly accessible only by root. These filesystems generally provide access
49961+ to hardware and debug information that isn't appropriate for unprivileged
49962+ users of the system. Sysfs and debugfs have also become a large source
49963+ of new vulnerabilities, ranging from infoleaks to local compromise.
49964+ There has been very little oversight with an eye toward security involved
49965+ in adding new exporters of information to these filesystems, so their
49966+ use is discouraged.
49967+ For reasons of compatibility, a few directories have been whitelisted
49968+ for access by non-root users:
49969+ /sys/fs/selinux
49970+ /sys/fs/fuse
49971+ /sys/devices/system/cpu
49972+
49973+config GRKERNSEC_ROFS
49974+ bool "Runtime read-only mount protection"
49975+ help
49976+ If you say Y here, a sysctl option with name "romount_protect" will
49977+ be created. By setting this option to 1 at runtime, filesystems
49978+ will be protected in the following ways:
49979+ * No new writable mounts will be allowed
49980+ * Existing read-only mounts won't be able to be remounted read/write
49981+ * Write operations will be denied on all block devices
49982+ This option acts independently of grsec_lock: once it is set to 1,
49983+ it cannot be turned off. Therefore, please be mindful of the resulting
49984+ behavior if this option is enabled in an init script on a read-only
49985+ filesystem. This feature is mainly intended for secure embedded systems.
49986+
49987+config GRKERNSEC_CHROOT
49988+ bool "Chroot jail restrictions"
49989+ default y if GRKERNSEC_CONFIG_AUTO
49990+ help
49991+ If you say Y here, you will be able to choose several options that will
49992+ make breaking out of a chrooted jail much more difficult. If you
49993+ encounter no software incompatibilities with the following options, it
49994+ is recommended that you enable each one.
49995+
49996+config GRKERNSEC_CHROOT_MOUNT
49997+ bool "Deny mounts"
49998+ default y if GRKERNSEC_CONFIG_AUTO
49999+ depends on GRKERNSEC_CHROOT
50000+ help
50001+ If you say Y here, processes inside a chroot will not be able to
50002+ mount or remount filesystems. If the sysctl option is enabled, a
50003+ sysctl option with name "chroot_deny_mount" is created.
50004+
50005+config GRKERNSEC_CHROOT_DOUBLE
50006+ bool "Deny double-chroots"
50007+ default y if GRKERNSEC_CONFIG_AUTO
50008+ depends on GRKERNSEC_CHROOT
50009+ help
50010+ If you say Y here, processes inside a chroot will not be able to chroot
50011+ again outside the chroot. This is a widely used method of breaking
50012+ out of a chroot jail and should not be allowed. If the sysctl
50013+ option is enabled, a sysctl option with name
50014+ "chroot_deny_chroot" is created.
50015+
50016+config GRKERNSEC_CHROOT_PIVOT
50017+ bool "Deny pivot_root in chroot"
50018+ default y if GRKERNSEC_CONFIG_AUTO
50019+ depends on GRKERNSEC_CHROOT
50020+ help
50021+ If you say Y here, processes inside a chroot will not be able to use
50022+ a function called pivot_root() that was introduced in Linux 2.3.41. It
50023+ works similar to chroot in that it changes the root filesystem. This
50024+ function could be misused in a chrooted process to attempt to break out
50025+ of the chroot, and therefore should not be allowed. If the sysctl
50026+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
50027+ created.
50028+
50029+config GRKERNSEC_CHROOT_CHDIR
50030+ bool "Enforce chdir(\"/\") on all chroots"
50031+ default y if GRKERNSEC_CONFIG_AUTO
50032+ depends on GRKERNSEC_CHROOT
50033+ help
50034+ If you say Y here, the current working directory of all newly-chrooted
50035+ applications will be set to the the root directory of the chroot.
50036+ The man page on chroot(2) states:
50037+ Note that this call does not change the current working
50038+ directory, so that `.' can be outside the tree rooted at
50039+ `/'. In particular, the super-user can escape from a
50040+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50041+
50042+ It is recommended that you say Y here, since it's not known to break
50043+ any software. If the sysctl option is enabled, a sysctl option with
50044+ name "chroot_enforce_chdir" is created.
50045+
50046+config GRKERNSEC_CHROOT_CHMOD
50047+ bool "Deny (f)chmod +s"
50048+ default y if GRKERNSEC_CONFIG_AUTO
50049+ depends on GRKERNSEC_CHROOT
50050+ help
50051+ If you say Y here, processes inside a chroot will not be able to chmod
50052+ or fchmod files to make them have suid or sgid bits. This protects
50053+ against another published method of breaking a chroot. If the sysctl
50054+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
50055+ created.
50056+
50057+config GRKERNSEC_CHROOT_FCHDIR
50058+ bool "Deny fchdir out of chroot"
50059+ default y if GRKERNSEC_CONFIG_AUTO
50060+ depends on GRKERNSEC_CHROOT
50061+ help
50062+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
50063+ to a file descriptor of the chrooting process that points to a directory
50064+ outside the filesystem will be stopped. If the sysctl option
50065+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50066+
50067+config GRKERNSEC_CHROOT_MKNOD
50068+ bool "Deny mknod"
50069+ default y if GRKERNSEC_CONFIG_AUTO
50070+ depends on GRKERNSEC_CHROOT
50071+ help
50072+ If you say Y here, processes inside a chroot will not be allowed to
50073+ mknod. The problem with using mknod inside a chroot is that it
50074+ would allow an attacker to create a device entry that is the same
50075+ as one on the physical root of your system, which could range from
50076+ anything from the console device to a device for your harddrive (which
50077+ they could then use to wipe the drive or steal data). It is recommended
50078+ that you say Y here, unless you run into software incompatibilities.
50079+ If the sysctl option is enabled, a sysctl option with name
50080+ "chroot_deny_mknod" is created.
50081+
50082+config GRKERNSEC_CHROOT_SHMAT
50083+ bool "Deny shmat() out of chroot"
50084+ default y if GRKERNSEC_CONFIG_AUTO
50085+ depends on GRKERNSEC_CHROOT
50086+ help
50087+ If you say Y here, processes inside a chroot will not be able to attach
50088+ to shared memory segments that were created outside of the chroot jail.
50089+ It is recommended that you say Y here. If the sysctl option is enabled,
50090+ a sysctl option with name "chroot_deny_shmat" is created.
50091+
50092+config GRKERNSEC_CHROOT_UNIX
50093+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
50094+ default y if GRKERNSEC_CONFIG_AUTO
50095+ depends on GRKERNSEC_CHROOT
50096+ help
50097+ If you say Y here, processes inside a chroot will not be able to
50098+ connect to abstract (meaning not belonging to a filesystem) Unix
50099+ domain sockets that were bound outside of a chroot. It is recommended
50100+ that you say Y here. If the sysctl option is enabled, a sysctl option
50101+ with name "chroot_deny_unix" is created.
50102+
50103+config GRKERNSEC_CHROOT_FINDTASK
50104+ bool "Protect outside processes"
50105+ default y if GRKERNSEC_CONFIG_AUTO
50106+ depends on GRKERNSEC_CHROOT
50107+ help
50108+ If you say Y here, processes inside a chroot will not be able to
50109+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50110+ getsid, or view any process outside of the chroot. If the sysctl
50111+ option is enabled, a sysctl option with name "chroot_findtask" is
50112+ created.
50113+
50114+config GRKERNSEC_CHROOT_NICE
50115+ bool "Restrict priority changes"
50116+ default y if GRKERNSEC_CONFIG_AUTO
50117+ depends on GRKERNSEC_CHROOT
50118+ help
50119+ If you say Y here, processes inside a chroot will not be able to raise
50120+ the priority of processes in the chroot, or alter the priority of
50121+ processes outside the chroot. This provides more security than simply
50122+ removing CAP_SYS_NICE from the process' capability set. If the
50123+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50124+ is created.
50125+
50126+config GRKERNSEC_CHROOT_SYSCTL
50127+ bool "Deny sysctl writes"
50128+ default y if GRKERNSEC_CONFIG_AUTO
50129+ depends on GRKERNSEC_CHROOT
50130+ help
50131+ If you say Y here, an attacker in a chroot will not be able to
50132+ write to sysctl entries, either by sysctl(2) or through a /proc
50133+ interface. It is strongly recommended that you say Y here. If the
50134+ sysctl option is enabled, a sysctl option with name
50135+ "chroot_deny_sysctl" is created.
50136+
50137+config GRKERNSEC_CHROOT_CAPS
50138+ bool "Capability restrictions"
50139+ default y if GRKERNSEC_CONFIG_AUTO
50140+ depends on GRKERNSEC_CHROOT
50141+ help
50142+ If you say Y here, the capabilities on all processes within a
50143+ chroot jail will be lowered to stop module insertion, raw i/o,
50144+ system and net admin tasks, rebooting the system, modifying immutable
50145+ files, modifying IPC owned by another, and changing the system time.
50146+ This is left an option because it can break some apps. Disable this
50147+ if your chrooted apps are having problems performing those kinds of
50148+ tasks. If the sysctl option is enabled, a sysctl option with
50149+ name "chroot_caps" is created.
50150+
50151+endmenu
50152+menu "Kernel Auditing"
50153+depends on GRKERNSEC
50154+
50155+config GRKERNSEC_AUDIT_GROUP
50156+ bool "Single group for auditing"
50157+ help
50158+ If you say Y here, the exec, chdir, and (un)mount logging features
50159+ will only operate on a group you specify. This option is recommended
50160+ if you only want to watch certain users instead of having a large
50161+ amount of logs from the entire system. If the sysctl option is enabled,
50162+ a sysctl option with name "audit_group" is created.
50163+
50164+config GRKERNSEC_AUDIT_GID
50165+ int "GID for auditing"
50166+ depends on GRKERNSEC_AUDIT_GROUP
50167+ default 1007
50168+
50169+config GRKERNSEC_EXECLOG
50170+ bool "Exec logging"
50171+ help
50172+ If you say Y here, all execve() calls will be logged (since the
50173+ other exec*() calls are frontends to execve(), all execution
50174+ will be logged). Useful for shell-servers that like to keep track
50175+ of their users. If the sysctl option is enabled, a sysctl option with
50176+ name "exec_logging" is created.
50177+ WARNING: This option when enabled will produce a LOT of logs, especially
50178+ on an active system.
50179+
50180+config GRKERNSEC_RESLOG
50181+ bool "Resource logging"
50182+ default y if GRKERNSEC_CONFIG_AUTO
50183+ help
50184+ If you say Y here, all attempts to overstep resource limits will
50185+ be logged with the resource name, the requested size, and the current
50186+ limit. It is highly recommended that you say Y here. If the sysctl
50187+ option is enabled, a sysctl option with name "resource_logging" is
50188+ created. If the RBAC system is enabled, the sysctl value is ignored.
50189+
50190+config GRKERNSEC_CHROOT_EXECLOG
50191+ bool "Log execs within chroot"
50192+ help
50193+ If you say Y here, all executions inside a chroot jail will be logged
50194+ to syslog. This can cause a large amount of logs if certain
50195+ applications (eg. djb's daemontools) are installed on the system, and
50196+ is therefore left as an option. If the sysctl option is enabled, a
50197+ sysctl option with name "chroot_execlog" is created.
50198+
50199+config GRKERNSEC_AUDIT_PTRACE
50200+ bool "Ptrace logging"
50201+ help
50202+ If you say Y here, all attempts to attach to a process via ptrace
50203+ will be logged. If the sysctl option is enabled, a sysctl option
50204+ with name "audit_ptrace" is created.
50205+
50206+config GRKERNSEC_AUDIT_CHDIR
50207+ bool "Chdir logging"
50208+ help
50209+ If you say Y here, all chdir() calls will be logged. If the sysctl
50210+ option is enabled, a sysctl option with name "audit_chdir" is created.
50211+
50212+config GRKERNSEC_AUDIT_MOUNT
50213+ bool "(Un)Mount logging"
50214+ help
50215+ If you say Y here, all mounts and unmounts will be logged. If the
50216+ sysctl option is enabled, a sysctl option with name "audit_mount" is
50217+ created.
50218+
50219+config GRKERNSEC_SIGNAL
50220+ bool "Signal logging"
50221+ default y if GRKERNSEC_CONFIG_AUTO
50222+ help
50223+ If you say Y here, certain important signals will be logged, such as
50224+ SIGSEGV, which will as a result inform you of when a error in a program
50225+ occurred, which in some cases could mean a possible exploit attempt.
50226+ If the sysctl option is enabled, a sysctl option with name
50227+ "signal_logging" is created.
50228+
50229+config GRKERNSEC_FORKFAIL
50230+ bool "Fork failure logging"
50231+ help
50232+ If you say Y here, all failed fork() attempts will be logged.
50233+ This could suggest a fork bomb, or someone attempting to overstep
50234+ their process limit. If the sysctl option is enabled, a sysctl option
50235+ with name "forkfail_logging" is created.
50236+
50237+config GRKERNSEC_TIME
50238+ bool "Time change logging"
50239+ default y if GRKERNSEC_CONFIG_AUTO
50240+ help
50241+ If you say Y here, any changes of the system clock will be logged.
50242+ If the sysctl option is enabled, a sysctl option with name
50243+ "timechange_logging" is created.
50244+
50245+config GRKERNSEC_PROC_IPADDR
50246+ bool "/proc/<pid>/ipaddr support"
50247+ default y if GRKERNSEC_CONFIG_AUTO
50248+ help
50249+ If you say Y here, a new entry will be added to each /proc/<pid>
50250+ directory that contains the IP address of the person using the task.
50251+ The IP is carried across local TCP and AF_UNIX stream sockets.
50252+ This information can be useful for IDS/IPSes to perform remote response
50253+ to a local attack. The entry is readable by only the owner of the
50254+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50255+ the RBAC system), and thus does not create privacy concerns.
50256+
50257+config GRKERNSEC_RWXMAP_LOG
50258+ bool 'Denied RWX mmap/mprotect logging'
50259+ default y if GRKERNSEC_CONFIG_AUTO
50260+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50261+ help
50262+ If you say Y here, calls to mmap() and mprotect() with explicit
50263+ usage of PROT_WRITE and PROT_EXEC together will be logged when
50264+ denied by the PAX_MPROTECT feature. If the sysctl option is
50265+ enabled, a sysctl option with name "rwxmap_logging" is created.
50266+
50267+config GRKERNSEC_AUDIT_TEXTREL
50268+ bool 'ELF text relocations logging (READ HELP)'
50269+ depends on PAX_MPROTECT
50270+ help
50271+ If you say Y here, text relocations will be logged with the filename
50272+ of the offending library or binary. The purpose of the feature is
50273+ to help Linux distribution developers get rid of libraries and
50274+ binaries that need text relocations which hinder the future progress
50275+ of PaX. Only Linux distribution developers should say Y here, and
50276+ never on a production machine, as this option creates an information
50277+ leak that could aid an attacker in defeating the randomization of
50278+ a single memory region. If the sysctl option is enabled, a sysctl
50279+ option with name "audit_textrel" is created.
50280+
50281+endmenu
50282+
50283+menu "Executable Protections"
50284+depends on GRKERNSEC
50285+
50286+config GRKERNSEC_DMESG
50287+ bool "Dmesg(8) restriction"
50288+ default y if GRKERNSEC_CONFIG_AUTO
50289+ help
50290+ If you say Y here, non-root users will not be able to use dmesg(8)
50291+ to view up to the last 4kb of messages in the kernel's log buffer.
50292+ The kernel's log buffer often contains kernel addresses and other
50293+ identifying information useful to an attacker in fingerprinting a
50294+ system for a targeted exploit.
50295+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
50296+ created.
50297+
50298+config GRKERNSEC_HARDEN_PTRACE
50299+ bool "Deter ptrace-based process snooping"
50300+ default y if GRKERNSEC_CONFIG_AUTO
50301+ help
50302+ If you say Y here, TTY sniffers and other malicious monitoring
50303+ programs implemented through ptrace will be defeated. If you
50304+ have been using the RBAC system, this option has already been
50305+ enabled for several years for all users, with the ability to make
50306+ fine-grained exceptions.
50307+
50308+ This option only affects the ability of non-root users to ptrace
50309+ processes that are not a descendent of the ptracing process.
50310+ This means that strace ./binary and gdb ./binary will still work,
50311+ but attaching to arbitrary processes will not. If the sysctl
50312+ option is enabled, a sysctl option with name "harden_ptrace" is
50313+ created.
50314+
50315+config GRKERNSEC_PTRACE_READEXEC
50316+ bool "Require read access to ptrace sensitive binaries"
50317+ default y if GRKERNSEC_CONFIG_AUTO
50318+ help
50319+ If you say Y here, unprivileged users will not be able to ptrace unreadable
50320+ binaries. This option is useful in environments that
50321+ remove the read bits (e.g. file mode 4711) from suid binaries to
50322+ prevent infoleaking of their contents. This option adds
50323+ consistency to the use of that file mode, as the binary could normally
50324+ be read out when run without privileges while ptracing.
50325+
50326+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
50327+ is created.
50328+
50329+config GRKERNSEC_SETXID
50330+ bool "Enforce consistent multithreaded privileges"
50331+ default y if GRKERNSEC_CONFIG_AUTO
50332+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
50333+ help
50334+ If you say Y here, a change from a root uid to a non-root uid
50335+ in a multithreaded application will cause the resulting uids,
50336+ gids, supplementary groups, and capabilities in that thread
50337+ to be propagated to the other threads of the process. In most
50338+ cases this is unnecessary, as glibc will emulate this behavior
50339+ on behalf of the application. Other libcs do not act in the
50340+ same way, allowing the other threads of the process to continue
50341+ running with root privileges. If the sysctl option is enabled,
50342+ a sysctl option with name "consistent_setxid" is created.
50343+
50344+config GRKERNSEC_TPE
50345+ bool "Trusted Path Execution (TPE)"
50346+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
50347+ help
50348+ If you say Y here, you will be able to choose a gid to add to the
50349+ supplementary groups of users you want to mark as "untrusted."
50350+ These users will not be able to execute any files that are not in
50351+ root-owned directories writable only by root. If the sysctl option
50352+ is enabled, a sysctl option with name "tpe" is created.
50353+
50354+config GRKERNSEC_TPE_ALL
50355+ bool "Partially restrict all non-root users"
50356+ depends on GRKERNSEC_TPE
50357+ help
50358+ If you say Y here, all non-root users will be covered under
50359+ a weaker TPE restriction. This is separate from, and in addition to,
50360+ the main TPE options that you have selected elsewhere. Thus, if a
50361+ "trusted" GID is chosen, this restriction applies to even that GID.
50362+ Under this restriction, all non-root users will only be allowed to
50363+ execute files in directories they own that are not group or
50364+ world-writable, or in directories owned by root and writable only by
50365+ root. If the sysctl option is enabled, a sysctl option with name
50366+ "tpe_restrict_all" is created.
50367+
50368+config GRKERNSEC_TPE_INVERT
50369+ bool "Invert GID option"
50370+ depends on GRKERNSEC_TPE
50371+ help
50372+ If you say Y here, the group you specify in the TPE configuration will
50373+ decide what group TPE restrictions will be *disabled* for. This
50374+ option is useful if you want TPE restrictions to be applied to most
50375+ users on the system. If the sysctl option is enabled, a sysctl option
50376+ with name "tpe_invert" is created. Unlike other sysctl options, this
50377+ entry will default to on for backward-compatibility.
50378+
50379+config GRKERNSEC_TPE_GID
50380+ int "GID for untrusted users"
50381+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50382+ default 1005
50383+ help
50384+ Setting this GID determines what group TPE restrictions will be
50385+ *enabled* for. If the sysctl option is enabled, a sysctl option
50386+ with name "tpe_gid" is created.
50387+
50388+config GRKERNSEC_TPE_GID
50389+ int "GID for trusted users"
50390+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50391+ default 1005
50392+ help
50393+ Setting this GID determines what group TPE restrictions will be
50394+ *disabled* for. If the sysctl option is enabled, a sysctl option
50395+ with name "tpe_gid" is created.
50396+
50397+endmenu
50398+menu "Network Protections"
50399+depends on GRKERNSEC
50400+
50401+config GRKERNSEC_RANDNET
50402+ bool "Larger entropy pools"
50403+ default y if GRKERNSEC_CONFIG_AUTO
50404+ help
50405+ If you say Y here, the entropy pools used for many features of Linux
50406+ and grsecurity will be doubled in size. Since several grsecurity
50407+ features use additional randomness, it is recommended that you say Y
50408+ here. Saying Y here has a similar effect as modifying
50409+ /proc/sys/kernel/random/poolsize.
50410+
50411+config GRKERNSEC_BLACKHOLE
50412+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50413+ default y if GRKERNSEC_CONFIG_AUTO
50414+ depends on NET
50415+ help
50416+ If you say Y here, neither TCP resets nor ICMP
50417+ destination-unreachable packets will be sent in response to packets
50418+ sent to ports for which no associated listening process exists.
50419+ This feature supports both IPV4 and IPV6 and exempts the
50420+ loopback interface from blackholing. Enabling this feature
50421+ makes a host more resilient to DoS attacks and reduces network
50422+ visibility against scanners.
50423+
50424+ The blackhole feature as-implemented is equivalent to the FreeBSD
50425+ blackhole feature, as it prevents RST responses to all packets, not
50426+ just SYNs. Under most application behavior this causes no
50427+ problems, but applications (like haproxy) may not close certain
50428+ connections in a way that cleanly terminates them on the remote
50429+ end, leaving the remote host in LAST_ACK state. Because of this
50430+ side-effect and to prevent intentional LAST_ACK DoSes, this
50431+ feature also adds automatic mitigation against such attacks.
50432+ The mitigation drastically reduces the amount of time a socket
50433+ can spend in LAST_ACK state. If you're using haproxy and not
50434+ all servers it connects to have this option enabled, consider
50435+ disabling this feature on the haproxy host.
50436+
50437+ If the sysctl option is enabled, two sysctl options with names
50438+ "ip_blackhole" and "lastack_retries" will be created.
50439+ While "ip_blackhole" takes the standard zero/non-zero on/off
50440+ toggle, "lastack_retries" uses the same kinds of values as
50441+ "tcp_retries1" and "tcp_retries2". The default value of 4
50442+ prevents a socket from lasting more than 45 seconds in LAST_ACK
50443+ state.
50444+
50445+config GRKERNSEC_SOCKET
50446+ bool "Socket restrictions"
50447+ depends on NET
50448+ help
50449+ If you say Y here, you will be able to choose from several options.
50450+ If you assign a GID on your system and add it to the supplementary
50451+ groups of users you want to restrict socket access to, this patch
50452+ will perform up to three things, based on the option(s) you choose.
50453+
50454+config GRKERNSEC_SOCKET_ALL
50455+ bool "Deny any sockets to group"
50456+ depends on GRKERNSEC_SOCKET
50457+ help
50458+ If you say Y here, you will be able to choose a GID of whose users will
50459+ be unable to connect to other hosts from your machine or run server
50460+ applications from your machine. If the sysctl option is enabled, a
50461+ sysctl option with name "socket_all" is created.
50462+
50463+config GRKERNSEC_SOCKET_ALL_GID
50464+ int "GID to deny all sockets for"
50465+ depends on GRKERNSEC_SOCKET_ALL
50466+ default 1004
50467+ help
50468+ Here you can choose the GID to disable socket access for. Remember to
50469+ add the users you want socket access disabled for to the GID
50470+ specified here. If the sysctl option is enabled, a sysctl option
50471+ with name "socket_all_gid" is created.
50472+
50473+config GRKERNSEC_SOCKET_CLIENT
50474+ bool "Deny client sockets to group"
50475+ depends on GRKERNSEC_SOCKET
50476+ help
50477+ If you say Y here, you will be able to choose a GID of whose users will
50478+ be unable to connect to other hosts from your machine, but will be
50479+ able to run servers. If this option is enabled, all users in the group
50480+ you specify will have to use passive mode when initiating ftp transfers
50481+ from the shell on your machine. If the sysctl option is enabled, a
50482+ sysctl option with name "socket_client" is created.
50483+
50484+config GRKERNSEC_SOCKET_CLIENT_GID
50485+ int "GID to deny client sockets for"
50486+ depends on GRKERNSEC_SOCKET_CLIENT
50487+ default 1003
50488+ help
50489+ Here you can choose the GID to disable client socket access for.
50490+ Remember to add the users you want client socket access disabled for to
50491+ the GID specified here. If the sysctl option is enabled, a sysctl
50492+ option with name "socket_client_gid" is created.
50493+
50494+config GRKERNSEC_SOCKET_SERVER
50495+ bool "Deny server sockets to group"
50496+ depends on GRKERNSEC_SOCKET
50497+ help
50498+ If you say Y here, you will be able to choose a GID of whose users will
50499+ be unable to run server applications from your machine. If the sysctl
50500+ option is enabled, a sysctl option with name "socket_server" is created.
50501+
50502+config GRKERNSEC_SOCKET_SERVER_GID
50503+ int "GID to deny server sockets for"
50504+ depends on GRKERNSEC_SOCKET_SERVER
50505+ default 1002
50506+ help
50507+ Here you can choose the GID to disable server socket access for.
50508+ Remember to add the users you want server socket access disabled for to
50509+ the GID specified here. If the sysctl option is enabled, a sysctl
50510+ option with name "socket_server_gid" is created.
50511+
50512+endmenu
50513+menu "Sysctl Support"
50514+depends on GRKERNSEC && SYSCTL
50515+
50516+config GRKERNSEC_SYSCTL
50517+ bool "Sysctl support"
50518+ default y if GRKERNSEC_CONFIG_AUTO
50519+ help
50520+ If you say Y here, you will be able to change the options that
50521+ grsecurity runs with at bootup, without having to recompile your
50522+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50523+ to enable (1) or disable (0) various features. All the sysctl entries
50524+ are mutable until the "grsec_lock" entry is set to a non-zero value.
50525+ All features enabled in the kernel configuration are disabled at boot
50526+ if you do not say Y to the "Turn on features by default" option.
50527+ All options should be set at startup, and the grsec_lock entry should
50528+ be set to a non-zero value after all the options are set.
50529+ *THIS IS EXTREMELY IMPORTANT*
50530+
50531+config GRKERNSEC_SYSCTL_DISTRO
50532+ bool "Extra sysctl support for distro makers (READ HELP)"
50533+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50534+ help
50535+ If you say Y here, additional sysctl options will be created
50536+ for features that affect processes running as root. Therefore,
50537+ it is critical when using this option that the grsec_lock entry be
50538+ enabled after boot. Only distros with prebuilt kernel packages
50539+ with this option enabled that can ensure grsec_lock is enabled
50540+ after boot should use this option.
50541+ *Failure to set grsec_lock after boot makes all grsec features
50542+ this option covers useless*
50543+
50544+ Currently this option creates the following sysctl entries:
50545+ "Disable Privileged I/O": "disable_priv_io"
50546+
50547+config GRKERNSEC_SYSCTL_ON
50548+ bool "Turn on features by default"
50549+ default y if GRKERNSEC_CONFIG_AUTO
50550+ depends on GRKERNSEC_SYSCTL
50551+ help
50552+ If you say Y here, instead of having all features enabled in the
50553+ kernel configuration disabled at boot time, the features will be
50554+ enabled at boot time. It is recommended you say Y here unless
50555+ there is some reason you would want all sysctl-tunable features to
50556+ be disabled by default. As mentioned elsewhere, it is important
50557+ to enable the grsec_lock entry once you have finished modifying
50558+ the sysctl entries.
50559+
50560+endmenu
50561+menu "Logging Options"
50562+depends on GRKERNSEC
50563+
50564+config GRKERNSEC_FLOODTIME
50565+ int "Seconds in between log messages (minimum)"
50566+ default 10
50567+ help
50568+ This option allows you to enforce the number of seconds between
50569+ grsecurity log messages. The default should be suitable for most
50570+ people, however, if you choose to change it, choose a value small enough
50571+ to allow informative logs to be produced, but large enough to
50572+ prevent flooding.
50573+
50574+config GRKERNSEC_FLOODBURST
50575+ int "Number of messages in a burst (maximum)"
50576+ default 6
50577+ help
50578+ This option allows you to choose the maximum number of messages allowed
50579+ within the flood time interval you chose in a separate option. The
50580+ default should be suitable for most people, however if you find that
50581+ many of your logs are being interpreted as flooding, you may want to
50582+ raise this value.
50583+
50584+endmenu
50585diff --git a/grsecurity/Makefile b/grsecurity/Makefile
50586new file mode 100644
50587index 0000000..1b9afa9
50588--- /dev/null
50589+++ b/grsecurity/Makefile
50590@@ -0,0 +1,38 @@
50591+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50592+# during 2001-2009 it has been completely redesigned by Brad Spengler
50593+# into an RBAC system
50594+#
50595+# All code in this directory and various hooks inserted throughout the kernel
50596+# are copyright Brad Spengler - Open Source Security, Inc., and released
50597+# under the GPL v2 or higher
50598+
50599+KBUILD_CFLAGS += -Werror
50600+
50601+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50602+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
50603+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
50604+
50605+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50606+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50607+ gracl_learn.o grsec_log.o
50608+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
50609+
50610+ifdef CONFIG_NET
50611+obj-y += grsec_sock.o
50612+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50613+endif
50614+
50615+ifndef CONFIG_GRKERNSEC
50616+obj-y += grsec_disabled.o
50617+endif
50618+
50619+ifdef CONFIG_GRKERNSEC_HIDESYM
50620+extra-y := grsec_hidesym.o
50621+$(obj)/grsec_hidesym.o:
50622+ @-chmod -f 500 /boot
50623+ @-chmod -f 500 /lib/modules
50624+ @-chmod -f 500 /lib64/modules
50625+ @-chmod -f 500 /lib32/modules
50626+ @-chmod -f 700 .
50627+ @echo ' grsec: protected kernel image paths'
50628+endif
50629diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
50630new file mode 100644
50631index 0000000..7a5922f
50632--- /dev/null
50633+++ b/grsecurity/gracl.c
50634@@ -0,0 +1,4016 @@
50635+#include <linux/kernel.h>
50636+#include <linux/module.h>
50637+#include <linux/sched.h>
50638+#include <linux/mm.h>
50639+#include <linux/file.h>
50640+#include <linux/fs.h>
50641+#include <linux/namei.h>
50642+#include <linux/mount.h>
50643+#include <linux/tty.h>
50644+#include <linux/proc_fs.h>
50645+#include <linux/lglock.h>
50646+#include <linux/slab.h>
50647+#include <linux/vmalloc.h>
50648+#include <linux/types.h>
50649+#include <linux/sysctl.h>
50650+#include <linux/netdevice.h>
50651+#include <linux/ptrace.h>
50652+#include <linux/gracl.h>
50653+#include <linux/gralloc.h>
50654+#include <linux/security.h>
50655+#include <linux/grinternal.h>
50656+#include <linux/pid_namespace.h>
50657+#include <linux/stop_machine.h>
50658+#include <linux/fdtable.h>
50659+#include <linux/percpu.h>
50660+#include "../fs/mount.h"
50661+
50662+#include <asm/uaccess.h>
50663+#include <asm/errno.h>
50664+#include <asm/mman.h>
50665+
50666+static struct acl_role_db acl_role_set;
50667+static struct name_db name_set;
50668+static struct inodev_db inodev_set;
50669+
50670+/* for keeping track of userspace pointers used for subjects, so we
50671+ can share references in the kernel as well
50672+*/
50673+
50674+static struct path real_root;
50675+
50676+static struct acl_subj_map_db subj_map_set;
50677+
50678+static struct acl_role_label *default_role;
50679+
50680+static struct acl_role_label *role_list;
50681+
50682+static u16 acl_sp_role_value;
50683+
50684+extern char *gr_shared_page[4];
50685+static DEFINE_MUTEX(gr_dev_mutex);
50686+DEFINE_RWLOCK(gr_inode_lock);
50687+
50688+struct gr_arg *gr_usermode;
50689+
50690+static unsigned int gr_status __read_only = GR_STATUS_INIT;
50691+
50692+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
50693+extern void gr_clear_learn_entries(void);
50694+
50695+#ifdef CONFIG_GRKERNSEC_RESLOG
50696+extern void gr_log_resource(const struct task_struct *task,
50697+ const int res, const unsigned long wanted, const int gt);
50698+#endif
50699+
50700+unsigned char *gr_system_salt;
50701+unsigned char *gr_system_sum;
50702+
50703+static struct sprole_pw **acl_special_roles = NULL;
50704+static __u16 num_sprole_pws = 0;
50705+
50706+static struct acl_role_label *kernel_role = NULL;
50707+
50708+static unsigned int gr_auth_attempts = 0;
50709+static unsigned long gr_auth_expires = 0UL;
50710+
50711+#ifdef CONFIG_NET
50712+extern struct vfsmount *sock_mnt;
50713+#endif
50714+
50715+extern struct vfsmount *pipe_mnt;
50716+extern struct vfsmount *shm_mnt;
50717+#ifdef CONFIG_HUGETLBFS
50718+extern struct vfsmount *hugetlbfs_vfsmount;
50719+#endif
50720+
50721+static struct acl_object_label *fakefs_obj_rw;
50722+static struct acl_object_label *fakefs_obj_rwx;
50723+
50724+extern int gr_init_uidset(void);
50725+extern void gr_free_uidset(void);
50726+extern void gr_remove_uid(uid_t uid);
50727+extern int gr_find_uid(uid_t uid);
50728+
50729+DECLARE_BRLOCK(vfsmount_lock);
50730+
50731+__inline__ int
50732+gr_acl_is_enabled(void)
50733+{
50734+ return (gr_status & GR_READY);
50735+}
50736+
50737+#ifdef CONFIG_BTRFS_FS
50738+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
50739+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
50740+#endif
50741+
50742+static inline dev_t __get_dev(const struct dentry *dentry)
50743+{
50744+#ifdef CONFIG_BTRFS_FS
50745+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
50746+ return get_btrfs_dev_from_inode(dentry->d_inode);
50747+ else
50748+#endif
50749+ return dentry->d_inode->i_sb->s_dev;
50750+}
50751+
50752+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50753+{
50754+ return __get_dev(dentry);
50755+}
50756+
50757+static char gr_task_roletype_to_char(struct task_struct *task)
50758+{
50759+ switch (task->role->roletype &
50760+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
50761+ GR_ROLE_SPECIAL)) {
50762+ case GR_ROLE_DEFAULT:
50763+ return 'D';
50764+ case GR_ROLE_USER:
50765+ return 'U';
50766+ case GR_ROLE_GROUP:
50767+ return 'G';
50768+ case GR_ROLE_SPECIAL:
50769+ return 'S';
50770+ }
50771+
50772+ return 'X';
50773+}
50774+
50775+char gr_roletype_to_char(void)
50776+{
50777+ return gr_task_roletype_to_char(current);
50778+}
50779+
50780+__inline__ int
50781+gr_acl_tpe_check(void)
50782+{
50783+ if (unlikely(!(gr_status & GR_READY)))
50784+ return 0;
50785+ if (current->role->roletype & GR_ROLE_TPE)
50786+ return 1;
50787+ else
50788+ return 0;
50789+}
50790+
50791+int
50792+gr_handle_rawio(const struct inode *inode)
50793+{
50794+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50795+ if (inode && S_ISBLK(inode->i_mode) &&
50796+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50797+ !capable(CAP_SYS_RAWIO))
50798+ return 1;
50799+#endif
50800+ return 0;
50801+}
50802+
50803+static int
50804+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
50805+{
50806+ if (likely(lena != lenb))
50807+ return 0;
50808+
50809+ return !memcmp(a, b, lena);
50810+}
50811+
50812+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
50813+{
50814+ *buflen -= namelen;
50815+ if (*buflen < 0)
50816+ return -ENAMETOOLONG;
50817+ *buffer -= namelen;
50818+ memcpy(*buffer, str, namelen);
50819+ return 0;
50820+}
50821+
50822+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
50823+{
50824+ return prepend(buffer, buflen, name->name, name->len);
50825+}
50826+
50827+static int prepend_path(const struct path *path, struct path *root,
50828+ char **buffer, int *buflen)
50829+{
50830+ struct dentry *dentry = path->dentry;
50831+ struct vfsmount *vfsmnt = path->mnt;
50832+ struct mount *mnt = real_mount(vfsmnt);
50833+ bool slash = false;
50834+ int error = 0;
50835+
50836+ while (dentry != root->dentry || vfsmnt != root->mnt) {
50837+ struct dentry * parent;
50838+
50839+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
50840+ /* Global root? */
50841+ if (!mnt_has_parent(mnt)) {
50842+ goto out;
50843+ }
50844+ dentry = mnt->mnt_mountpoint;
50845+ mnt = mnt->mnt_parent;
50846+ vfsmnt = &mnt->mnt;
50847+ continue;
50848+ }
50849+ parent = dentry->d_parent;
50850+ prefetch(parent);
50851+ spin_lock(&dentry->d_lock);
50852+ error = prepend_name(buffer, buflen, &dentry->d_name);
50853+ spin_unlock(&dentry->d_lock);
50854+ if (!error)
50855+ error = prepend(buffer, buflen, "/", 1);
50856+ if (error)
50857+ break;
50858+
50859+ slash = true;
50860+ dentry = parent;
50861+ }
50862+
50863+out:
50864+ if (!error && !slash)
50865+ error = prepend(buffer, buflen, "/", 1);
50866+
50867+ return error;
50868+}
50869+
50870+/* this must be called with vfsmount_lock and rename_lock held */
50871+
50872+static char *__our_d_path(const struct path *path, struct path *root,
50873+ char *buf, int buflen)
50874+{
50875+ char *res = buf + buflen;
50876+ int error;
50877+
50878+ prepend(&res, &buflen, "\0", 1);
50879+ error = prepend_path(path, root, &res, &buflen);
50880+ if (error)
50881+ return ERR_PTR(error);
50882+
50883+ return res;
50884+}
50885+
50886+static char *
50887+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
50888+{
50889+ char *retval;
50890+
50891+ retval = __our_d_path(path, root, buf, buflen);
50892+ if (unlikely(IS_ERR(retval)))
50893+ retval = strcpy(buf, "<path too long>");
50894+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
50895+ retval[1] = '\0';
50896+
50897+ return retval;
50898+}
50899+
50900+static char *
50901+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50902+ char *buf, int buflen)
50903+{
50904+ struct path path;
50905+ char *res;
50906+
50907+ path.dentry = (struct dentry *)dentry;
50908+ path.mnt = (struct vfsmount *)vfsmnt;
50909+
50910+ /* we can use real_root.dentry, real_root.mnt, because this is only called
50911+ by the RBAC system */
50912+ res = gen_full_path(&path, &real_root, buf, buflen);
50913+
50914+ return res;
50915+}
50916+
50917+static char *
50918+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50919+ char *buf, int buflen)
50920+{
50921+ char *res;
50922+ struct path path;
50923+ struct path root;
50924+ struct task_struct *reaper = init_pid_ns.child_reaper;
50925+
50926+ path.dentry = (struct dentry *)dentry;
50927+ path.mnt = (struct vfsmount *)vfsmnt;
50928+
50929+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
50930+ get_fs_root(reaper->fs, &root);
50931+
50932+ write_seqlock(&rename_lock);
50933+ br_read_lock(vfsmount_lock);
50934+ res = gen_full_path(&path, &root, buf, buflen);
50935+ br_read_unlock(vfsmount_lock);
50936+ write_sequnlock(&rename_lock);
50937+
50938+ path_put(&root);
50939+ return res;
50940+}
50941+
50942+static char *
50943+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50944+{
50945+ char *ret;
50946+ write_seqlock(&rename_lock);
50947+ br_read_lock(vfsmount_lock);
50948+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50949+ PAGE_SIZE);
50950+ br_read_unlock(vfsmount_lock);
50951+ write_sequnlock(&rename_lock);
50952+ return ret;
50953+}
50954+
50955+static char *
50956+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50957+{
50958+ char *ret;
50959+ char *buf;
50960+ int buflen;
50961+
50962+ write_seqlock(&rename_lock);
50963+ br_read_lock(vfsmount_lock);
50964+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50965+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
50966+ buflen = (int)(ret - buf);
50967+ if (buflen >= 5)
50968+ prepend(&ret, &buflen, "/proc", 5);
50969+ else
50970+ ret = strcpy(buf, "<path too long>");
50971+ br_read_unlock(vfsmount_lock);
50972+ write_sequnlock(&rename_lock);
50973+ return ret;
50974+}
50975+
50976+char *
50977+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
50978+{
50979+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50980+ PAGE_SIZE);
50981+}
50982+
50983+char *
50984+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
50985+{
50986+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50987+ PAGE_SIZE);
50988+}
50989+
50990+char *
50991+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
50992+{
50993+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
50994+ PAGE_SIZE);
50995+}
50996+
50997+char *
50998+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
50999+{
51000+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
51001+ PAGE_SIZE);
51002+}
51003+
51004+char *
51005+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
51006+{
51007+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
51008+ PAGE_SIZE);
51009+}
51010+
51011+__inline__ __u32
51012+to_gr_audit(const __u32 reqmode)
51013+{
51014+ /* masks off auditable permission flags, then shifts them to create
51015+ auditing flags, and adds the special case of append auditing if
51016+ we're requesting write */
51017+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
51018+}
51019+
51020+struct acl_subject_label *
51021+lookup_subject_map(const struct acl_subject_label *userp)
51022+{
51023+ unsigned int index = shash(userp, subj_map_set.s_size);
51024+ struct subject_map *match;
51025+
51026+ match = subj_map_set.s_hash[index];
51027+
51028+ while (match && match->user != userp)
51029+ match = match->next;
51030+
51031+ if (match != NULL)
51032+ return match->kernel;
51033+ else
51034+ return NULL;
51035+}
51036+
51037+static void
51038+insert_subj_map_entry(struct subject_map *subjmap)
51039+{
51040+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
51041+ struct subject_map **curr;
51042+
51043+ subjmap->prev = NULL;
51044+
51045+ curr = &subj_map_set.s_hash[index];
51046+ if (*curr != NULL)
51047+ (*curr)->prev = subjmap;
51048+
51049+ subjmap->next = *curr;
51050+ *curr = subjmap;
51051+
51052+ return;
51053+}
51054+
51055+static struct acl_role_label *
51056+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
51057+ const gid_t gid)
51058+{
51059+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
51060+ struct acl_role_label *match;
51061+ struct role_allowed_ip *ipp;
51062+ unsigned int x;
51063+ u32 curr_ip = task->signal->curr_ip;
51064+
51065+ task->signal->saved_ip = curr_ip;
51066+
51067+ match = acl_role_set.r_hash[index];
51068+
51069+ while (match) {
51070+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
51071+ for (x = 0; x < match->domain_child_num; x++) {
51072+ if (match->domain_children[x] == uid)
51073+ goto found;
51074+ }
51075+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
51076+ break;
51077+ match = match->next;
51078+ }
51079+found:
51080+ if (match == NULL) {
51081+ try_group:
51082+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
51083+ match = acl_role_set.r_hash[index];
51084+
51085+ while (match) {
51086+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
51087+ for (x = 0; x < match->domain_child_num; x++) {
51088+ if (match->domain_children[x] == gid)
51089+ goto found2;
51090+ }
51091+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
51092+ break;
51093+ match = match->next;
51094+ }
51095+found2:
51096+ if (match == NULL)
51097+ match = default_role;
51098+ if (match->allowed_ips == NULL)
51099+ return match;
51100+ else {
51101+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51102+ if (likely
51103+ ((ntohl(curr_ip) & ipp->netmask) ==
51104+ (ntohl(ipp->addr) & ipp->netmask)))
51105+ return match;
51106+ }
51107+ match = default_role;
51108+ }
51109+ } else if (match->allowed_ips == NULL) {
51110+ return match;
51111+ } else {
51112+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51113+ if (likely
51114+ ((ntohl(curr_ip) & ipp->netmask) ==
51115+ (ntohl(ipp->addr) & ipp->netmask)))
51116+ return match;
51117+ }
51118+ goto try_group;
51119+ }
51120+
51121+ return match;
51122+}
51123+
51124+struct acl_subject_label *
51125+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
51126+ const struct acl_role_label *role)
51127+{
51128+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
51129+ struct acl_subject_label *match;
51130+
51131+ match = role->subj_hash[index];
51132+
51133+ while (match && (match->inode != ino || match->device != dev ||
51134+ (match->mode & GR_DELETED))) {
51135+ match = match->next;
51136+ }
51137+
51138+ if (match && !(match->mode & GR_DELETED))
51139+ return match;
51140+ else
51141+ return NULL;
51142+}
51143+
51144+struct acl_subject_label *
51145+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
51146+ const struct acl_role_label *role)
51147+{
51148+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
51149+ struct acl_subject_label *match;
51150+
51151+ match = role->subj_hash[index];
51152+
51153+ while (match && (match->inode != ino || match->device != dev ||
51154+ !(match->mode & GR_DELETED))) {
51155+ match = match->next;
51156+ }
51157+
51158+ if (match && (match->mode & GR_DELETED))
51159+ return match;
51160+ else
51161+ return NULL;
51162+}
51163+
51164+static struct acl_object_label *
51165+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
51166+ const struct acl_subject_label *subj)
51167+{
51168+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51169+ struct acl_object_label *match;
51170+
51171+ match = subj->obj_hash[index];
51172+
51173+ while (match && (match->inode != ino || match->device != dev ||
51174+ (match->mode & GR_DELETED))) {
51175+ match = match->next;
51176+ }
51177+
51178+ if (match && !(match->mode & GR_DELETED))
51179+ return match;
51180+ else
51181+ return NULL;
51182+}
51183+
51184+static struct acl_object_label *
51185+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
51186+ const struct acl_subject_label *subj)
51187+{
51188+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51189+ struct acl_object_label *match;
51190+
51191+ match = subj->obj_hash[index];
51192+
51193+ while (match && (match->inode != ino || match->device != dev ||
51194+ !(match->mode & GR_DELETED))) {
51195+ match = match->next;
51196+ }
51197+
51198+ if (match && (match->mode & GR_DELETED))
51199+ return match;
51200+
51201+ match = subj->obj_hash[index];
51202+
51203+ while (match && (match->inode != ino || match->device != dev ||
51204+ (match->mode & GR_DELETED))) {
51205+ match = match->next;
51206+ }
51207+
51208+ if (match && !(match->mode & GR_DELETED))
51209+ return match;
51210+ else
51211+ return NULL;
51212+}
51213+
51214+static struct name_entry *
51215+lookup_name_entry(const char *name)
51216+{
51217+ unsigned int len = strlen(name);
51218+ unsigned int key = full_name_hash(name, len);
51219+ unsigned int index = key % name_set.n_size;
51220+ struct name_entry *match;
51221+
51222+ match = name_set.n_hash[index];
51223+
51224+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
51225+ match = match->next;
51226+
51227+ return match;
51228+}
51229+
51230+static struct name_entry *
51231+lookup_name_entry_create(const char *name)
51232+{
51233+ unsigned int len = strlen(name);
51234+ unsigned int key = full_name_hash(name, len);
51235+ unsigned int index = key % name_set.n_size;
51236+ struct name_entry *match;
51237+
51238+ match = name_set.n_hash[index];
51239+
51240+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51241+ !match->deleted))
51242+ match = match->next;
51243+
51244+ if (match && match->deleted)
51245+ return match;
51246+
51247+ match = name_set.n_hash[index];
51248+
51249+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51250+ match->deleted))
51251+ match = match->next;
51252+
51253+ if (match && !match->deleted)
51254+ return match;
51255+ else
51256+ return NULL;
51257+}
51258+
51259+static struct inodev_entry *
51260+lookup_inodev_entry(const ino_t ino, const dev_t dev)
51261+{
51262+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
51263+ struct inodev_entry *match;
51264+
51265+ match = inodev_set.i_hash[index];
51266+
51267+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
51268+ match = match->next;
51269+
51270+ return match;
51271+}
51272+
51273+static void
51274+insert_inodev_entry(struct inodev_entry *entry)
51275+{
51276+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
51277+ inodev_set.i_size);
51278+ struct inodev_entry **curr;
51279+
51280+ entry->prev = NULL;
51281+
51282+ curr = &inodev_set.i_hash[index];
51283+ if (*curr != NULL)
51284+ (*curr)->prev = entry;
51285+
51286+ entry->next = *curr;
51287+ *curr = entry;
51288+
51289+ return;
51290+}
51291+
51292+static void
51293+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
51294+{
51295+ unsigned int index =
51296+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
51297+ struct acl_role_label **curr;
51298+ struct acl_role_label *tmp, *tmp2;
51299+
51300+ curr = &acl_role_set.r_hash[index];
51301+
51302+ /* simple case, slot is empty, just set it to our role */
51303+ if (*curr == NULL) {
51304+ *curr = role;
51305+ } else {
51306+ /* example:
51307+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
51308+ 2 -> 3
51309+ */
51310+ /* first check to see if we can already be reached via this slot */
51311+ tmp = *curr;
51312+ while (tmp && tmp != role)
51313+ tmp = tmp->next;
51314+ if (tmp == role) {
51315+ /* we don't need to add ourselves to this slot's chain */
51316+ return;
51317+ }
51318+ /* we need to add ourselves to this chain, two cases */
51319+ if (role->next == NULL) {
51320+ /* simple case, append the current chain to our role */
51321+ role->next = *curr;
51322+ *curr = role;
51323+ } else {
51324+ /* 1 -> 2 -> 3 -> 4
51325+ 2 -> 3 -> 4
51326+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
51327+ */
51328+ /* trickier case: walk our role's chain until we find
51329+ the role for the start of the current slot's chain */
51330+ tmp = role;
51331+ tmp2 = *curr;
51332+ while (tmp->next && tmp->next != tmp2)
51333+ tmp = tmp->next;
51334+ if (tmp->next == tmp2) {
51335+ /* from example above, we found 3, so just
51336+ replace this slot's chain with ours */
51337+ *curr = role;
51338+ } else {
51339+ /* we didn't find a subset of our role's chain
51340+ in the current slot's chain, so append their
51341+ chain to ours, and set us as the first role in
51342+ the slot's chain
51343+
51344+ we could fold this case with the case above,
51345+ but making it explicit for clarity
51346+ */
51347+ tmp->next = tmp2;
51348+ *curr = role;
51349+ }
51350+ }
51351+ }
51352+
51353+ return;
51354+}
51355+
51356+static void
51357+insert_acl_role_label(struct acl_role_label *role)
51358+{
51359+ int i;
51360+
51361+ if (role_list == NULL) {
51362+ role_list = role;
51363+ role->prev = NULL;
51364+ } else {
51365+ role->prev = role_list;
51366+ role_list = role;
51367+ }
51368+
51369+ /* used for hash chains */
51370+ role->next = NULL;
51371+
51372+ if (role->roletype & GR_ROLE_DOMAIN) {
51373+ for (i = 0; i < role->domain_child_num; i++)
51374+ __insert_acl_role_label(role, role->domain_children[i]);
51375+ } else
51376+ __insert_acl_role_label(role, role->uidgid);
51377+}
51378+
51379+static int
51380+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
51381+{
51382+ struct name_entry **curr, *nentry;
51383+ struct inodev_entry *ientry;
51384+ unsigned int len = strlen(name);
51385+ unsigned int key = full_name_hash(name, len);
51386+ unsigned int index = key % name_set.n_size;
51387+
51388+ curr = &name_set.n_hash[index];
51389+
51390+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
51391+ curr = &((*curr)->next);
51392+
51393+ if (*curr != NULL)
51394+ return 1;
51395+
51396+ nentry = acl_alloc(sizeof (struct name_entry));
51397+ if (nentry == NULL)
51398+ return 0;
51399+ ientry = acl_alloc(sizeof (struct inodev_entry));
51400+ if (ientry == NULL)
51401+ return 0;
51402+ ientry->nentry = nentry;
51403+
51404+ nentry->key = key;
51405+ nentry->name = name;
51406+ nentry->inode = inode;
51407+ nentry->device = device;
51408+ nentry->len = len;
51409+ nentry->deleted = deleted;
51410+
51411+ nentry->prev = NULL;
51412+ curr = &name_set.n_hash[index];
51413+ if (*curr != NULL)
51414+ (*curr)->prev = nentry;
51415+ nentry->next = *curr;
51416+ *curr = nentry;
51417+
51418+ /* insert us into the table searchable by inode/dev */
51419+ insert_inodev_entry(ientry);
51420+
51421+ return 1;
51422+}
51423+
51424+static void
51425+insert_acl_obj_label(struct acl_object_label *obj,
51426+ struct acl_subject_label *subj)
51427+{
51428+ unsigned int index =
51429+ fhash(obj->inode, obj->device, subj->obj_hash_size);
51430+ struct acl_object_label **curr;
51431+
51432+
51433+ obj->prev = NULL;
51434+
51435+ curr = &subj->obj_hash[index];
51436+ if (*curr != NULL)
51437+ (*curr)->prev = obj;
51438+
51439+ obj->next = *curr;
51440+ *curr = obj;
51441+
51442+ return;
51443+}
51444+
51445+static void
51446+insert_acl_subj_label(struct acl_subject_label *obj,
51447+ struct acl_role_label *role)
51448+{
51449+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
51450+ struct acl_subject_label **curr;
51451+
51452+ obj->prev = NULL;
51453+
51454+ curr = &role->subj_hash[index];
51455+ if (*curr != NULL)
51456+ (*curr)->prev = obj;
51457+
51458+ obj->next = *curr;
51459+ *curr = obj;
51460+
51461+ return;
51462+}
51463+
51464+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
51465+
51466+static void *
51467+create_table(__u32 * len, int elementsize)
51468+{
51469+ unsigned int table_sizes[] = {
51470+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
51471+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
51472+ 4194301, 8388593, 16777213, 33554393, 67108859
51473+ };
51474+ void *newtable = NULL;
51475+ unsigned int pwr = 0;
51476+
51477+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
51478+ table_sizes[pwr] <= *len)
51479+ pwr++;
51480+
51481+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
51482+ return newtable;
51483+
51484+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
51485+ newtable =
51486+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
51487+ else
51488+ newtable = vmalloc(table_sizes[pwr] * elementsize);
51489+
51490+ *len = table_sizes[pwr];
51491+
51492+ return newtable;
51493+}
51494+
51495+static int
51496+init_variables(const struct gr_arg *arg)
51497+{
51498+ struct task_struct *reaper = init_pid_ns.child_reaper;
51499+ unsigned int stacksize;
51500+
51501+ subj_map_set.s_size = arg->role_db.num_subjects;
51502+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
51503+ name_set.n_size = arg->role_db.num_objects;
51504+ inodev_set.i_size = arg->role_db.num_objects;
51505+
51506+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
51507+ !name_set.n_size || !inodev_set.i_size)
51508+ return 1;
51509+
51510+ if (!gr_init_uidset())
51511+ return 1;
51512+
51513+ /* set up the stack that holds allocation info */
51514+
51515+ stacksize = arg->role_db.num_pointers + 5;
51516+
51517+ if (!acl_alloc_stack_init(stacksize))
51518+ return 1;
51519+
51520+ /* grab reference for the real root dentry and vfsmount */
51521+ get_fs_root(reaper->fs, &real_root);
51522+
51523+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51524+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
51525+#endif
51526+
51527+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
51528+ if (fakefs_obj_rw == NULL)
51529+ return 1;
51530+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
51531+
51532+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
51533+ if (fakefs_obj_rwx == NULL)
51534+ return 1;
51535+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
51536+
51537+ subj_map_set.s_hash =
51538+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
51539+ acl_role_set.r_hash =
51540+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
51541+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
51542+ inodev_set.i_hash =
51543+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
51544+
51545+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
51546+ !name_set.n_hash || !inodev_set.i_hash)
51547+ return 1;
51548+
51549+ memset(subj_map_set.s_hash, 0,
51550+ sizeof(struct subject_map *) * subj_map_set.s_size);
51551+ memset(acl_role_set.r_hash, 0,
51552+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
51553+ memset(name_set.n_hash, 0,
51554+ sizeof (struct name_entry *) * name_set.n_size);
51555+ memset(inodev_set.i_hash, 0,
51556+ sizeof (struct inodev_entry *) * inodev_set.i_size);
51557+
51558+ return 0;
51559+}
51560+
51561+/* free information not needed after startup
51562+ currently contains user->kernel pointer mappings for subjects
51563+*/
51564+
51565+static void
51566+free_init_variables(void)
51567+{
51568+ __u32 i;
51569+
51570+ if (subj_map_set.s_hash) {
51571+ for (i = 0; i < subj_map_set.s_size; i++) {
51572+ if (subj_map_set.s_hash[i]) {
51573+ kfree(subj_map_set.s_hash[i]);
51574+ subj_map_set.s_hash[i] = NULL;
51575+ }
51576+ }
51577+
51578+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
51579+ PAGE_SIZE)
51580+ kfree(subj_map_set.s_hash);
51581+ else
51582+ vfree(subj_map_set.s_hash);
51583+ }
51584+
51585+ return;
51586+}
51587+
51588+static void
51589+free_variables(void)
51590+{
51591+ struct acl_subject_label *s;
51592+ struct acl_role_label *r;
51593+ struct task_struct *task, *task2;
51594+ unsigned int x;
51595+
51596+ gr_clear_learn_entries();
51597+
51598+ read_lock(&tasklist_lock);
51599+ do_each_thread(task2, task) {
51600+ task->acl_sp_role = 0;
51601+ task->acl_role_id = 0;
51602+ task->acl = NULL;
51603+ task->role = NULL;
51604+ } while_each_thread(task2, task);
51605+ read_unlock(&tasklist_lock);
51606+
51607+ /* release the reference to the real root dentry and vfsmount */
51608+ path_put(&real_root);
51609+ memset(&real_root, 0, sizeof(real_root));
51610+
51611+ /* free all object hash tables */
51612+
51613+ FOR_EACH_ROLE_START(r)
51614+ if (r->subj_hash == NULL)
51615+ goto next_role;
51616+ FOR_EACH_SUBJECT_START(r, s, x)
51617+ if (s->obj_hash == NULL)
51618+ break;
51619+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51620+ kfree(s->obj_hash);
51621+ else
51622+ vfree(s->obj_hash);
51623+ FOR_EACH_SUBJECT_END(s, x)
51624+ FOR_EACH_NESTED_SUBJECT_START(r, s)
51625+ if (s->obj_hash == NULL)
51626+ break;
51627+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51628+ kfree(s->obj_hash);
51629+ else
51630+ vfree(s->obj_hash);
51631+ FOR_EACH_NESTED_SUBJECT_END(s)
51632+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
51633+ kfree(r->subj_hash);
51634+ else
51635+ vfree(r->subj_hash);
51636+ r->subj_hash = NULL;
51637+next_role:
51638+ FOR_EACH_ROLE_END(r)
51639+
51640+ acl_free_all();
51641+
51642+ if (acl_role_set.r_hash) {
51643+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
51644+ PAGE_SIZE)
51645+ kfree(acl_role_set.r_hash);
51646+ else
51647+ vfree(acl_role_set.r_hash);
51648+ }
51649+ if (name_set.n_hash) {
51650+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
51651+ PAGE_SIZE)
51652+ kfree(name_set.n_hash);
51653+ else
51654+ vfree(name_set.n_hash);
51655+ }
51656+
51657+ if (inodev_set.i_hash) {
51658+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
51659+ PAGE_SIZE)
51660+ kfree(inodev_set.i_hash);
51661+ else
51662+ vfree(inodev_set.i_hash);
51663+ }
51664+
51665+ gr_free_uidset();
51666+
51667+ memset(&name_set, 0, sizeof (struct name_db));
51668+ memset(&inodev_set, 0, sizeof (struct inodev_db));
51669+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
51670+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
51671+
51672+ default_role = NULL;
51673+ kernel_role = NULL;
51674+ role_list = NULL;
51675+
51676+ return;
51677+}
51678+
51679+static __u32
51680+count_user_objs(struct acl_object_label *userp)
51681+{
51682+ struct acl_object_label o_tmp;
51683+ __u32 num = 0;
51684+
51685+ while (userp) {
51686+ if (copy_from_user(&o_tmp, userp,
51687+ sizeof (struct acl_object_label)))
51688+ break;
51689+
51690+ userp = o_tmp.prev;
51691+ num++;
51692+ }
51693+
51694+ return num;
51695+}
51696+
51697+static struct acl_subject_label *
51698+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
51699+
51700+static int
51701+copy_user_glob(struct acl_object_label *obj)
51702+{
51703+ struct acl_object_label *g_tmp, **guser;
51704+ unsigned int len;
51705+ char *tmp;
51706+
51707+ if (obj->globbed == NULL)
51708+ return 0;
51709+
51710+ guser = &obj->globbed;
51711+ while (*guser) {
51712+ g_tmp = (struct acl_object_label *)
51713+ acl_alloc(sizeof (struct acl_object_label));
51714+ if (g_tmp == NULL)
51715+ return -ENOMEM;
51716+
51717+ if (copy_from_user(g_tmp, *guser,
51718+ sizeof (struct acl_object_label)))
51719+ return -EFAULT;
51720+
51721+ len = strnlen_user(g_tmp->filename, PATH_MAX);
51722+
51723+ if (!len || len >= PATH_MAX)
51724+ return -EINVAL;
51725+
51726+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51727+ return -ENOMEM;
51728+
51729+ if (copy_from_user(tmp, g_tmp->filename, len))
51730+ return -EFAULT;
51731+ tmp[len-1] = '\0';
51732+ g_tmp->filename = tmp;
51733+
51734+ *guser = g_tmp;
51735+ guser = &(g_tmp->next);
51736+ }
51737+
51738+ return 0;
51739+}
51740+
51741+static int
51742+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
51743+ struct acl_role_label *role)
51744+{
51745+ struct acl_object_label *o_tmp;
51746+ unsigned int len;
51747+ int ret;
51748+ char *tmp;
51749+
51750+ while (userp) {
51751+ if ((o_tmp = (struct acl_object_label *)
51752+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
51753+ return -ENOMEM;
51754+
51755+ if (copy_from_user(o_tmp, userp,
51756+ sizeof (struct acl_object_label)))
51757+ return -EFAULT;
51758+
51759+ userp = o_tmp->prev;
51760+
51761+ len = strnlen_user(o_tmp->filename, PATH_MAX);
51762+
51763+ if (!len || len >= PATH_MAX)
51764+ return -EINVAL;
51765+
51766+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51767+ return -ENOMEM;
51768+
51769+ if (copy_from_user(tmp, o_tmp->filename, len))
51770+ return -EFAULT;
51771+ tmp[len-1] = '\0';
51772+ o_tmp->filename = tmp;
51773+
51774+ insert_acl_obj_label(o_tmp, subj);
51775+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
51776+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
51777+ return -ENOMEM;
51778+
51779+ ret = copy_user_glob(o_tmp);
51780+ if (ret)
51781+ return ret;
51782+
51783+ if (o_tmp->nested) {
51784+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
51785+ if (IS_ERR(o_tmp->nested))
51786+ return PTR_ERR(o_tmp->nested);
51787+
51788+ /* insert into nested subject list */
51789+ o_tmp->nested->next = role->hash->first;
51790+ role->hash->first = o_tmp->nested;
51791+ }
51792+ }
51793+
51794+ return 0;
51795+}
51796+
51797+static __u32
51798+count_user_subjs(struct acl_subject_label *userp)
51799+{
51800+ struct acl_subject_label s_tmp;
51801+ __u32 num = 0;
51802+
51803+ while (userp) {
51804+ if (copy_from_user(&s_tmp, userp,
51805+ sizeof (struct acl_subject_label)))
51806+ break;
51807+
51808+ userp = s_tmp.prev;
51809+ /* do not count nested subjects against this count, since
51810+ they are not included in the hash table, but are
51811+ attached to objects. We have already counted
51812+ the subjects in userspace for the allocation
51813+ stack
51814+ */
51815+ if (!(s_tmp.mode & GR_NESTED))
51816+ num++;
51817+ }
51818+
51819+ return num;
51820+}
51821+
51822+static int
51823+copy_user_allowedips(struct acl_role_label *rolep)
51824+{
51825+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
51826+
51827+ ruserip = rolep->allowed_ips;
51828+
51829+ while (ruserip) {
51830+ rlast = rtmp;
51831+
51832+ if ((rtmp = (struct role_allowed_ip *)
51833+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
51834+ return -ENOMEM;
51835+
51836+ if (copy_from_user(rtmp, ruserip,
51837+ sizeof (struct role_allowed_ip)))
51838+ return -EFAULT;
51839+
51840+ ruserip = rtmp->prev;
51841+
51842+ if (!rlast) {
51843+ rtmp->prev = NULL;
51844+ rolep->allowed_ips = rtmp;
51845+ } else {
51846+ rlast->next = rtmp;
51847+ rtmp->prev = rlast;
51848+ }
51849+
51850+ if (!ruserip)
51851+ rtmp->next = NULL;
51852+ }
51853+
51854+ return 0;
51855+}
51856+
51857+static int
51858+copy_user_transitions(struct acl_role_label *rolep)
51859+{
51860+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
51861+
51862+ unsigned int len;
51863+ char *tmp;
51864+
51865+ rusertp = rolep->transitions;
51866+
51867+ while (rusertp) {
51868+ rlast = rtmp;
51869+
51870+ if ((rtmp = (struct role_transition *)
51871+ acl_alloc(sizeof (struct role_transition))) == NULL)
51872+ return -ENOMEM;
51873+
51874+ if (copy_from_user(rtmp, rusertp,
51875+ sizeof (struct role_transition)))
51876+ return -EFAULT;
51877+
51878+ rusertp = rtmp->prev;
51879+
51880+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
51881+
51882+ if (!len || len >= GR_SPROLE_LEN)
51883+ return -EINVAL;
51884+
51885+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51886+ return -ENOMEM;
51887+
51888+ if (copy_from_user(tmp, rtmp->rolename, len))
51889+ return -EFAULT;
51890+ tmp[len-1] = '\0';
51891+ rtmp->rolename = tmp;
51892+
51893+ if (!rlast) {
51894+ rtmp->prev = NULL;
51895+ rolep->transitions = rtmp;
51896+ } else {
51897+ rlast->next = rtmp;
51898+ rtmp->prev = rlast;
51899+ }
51900+
51901+ if (!rusertp)
51902+ rtmp->next = NULL;
51903+ }
51904+
51905+ return 0;
51906+}
51907+
51908+static struct acl_subject_label *
51909+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
51910+{
51911+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
51912+ unsigned int len;
51913+ char *tmp;
51914+ __u32 num_objs;
51915+ struct acl_ip_label **i_tmp, *i_utmp2;
51916+ struct gr_hash_struct ghash;
51917+ struct subject_map *subjmap;
51918+ unsigned int i_num;
51919+ int err;
51920+
51921+ s_tmp = lookup_subject_map(userp);
51922+
51923+ /* we've already copied this subject into the kernel, just return
51924+ the reference to it, and don't copy it over again
51925+ */
51926+ if (s_tmp)
51927+ return(s_tmp);
51928+
51929+ if ((s_tmp = (struct acl_subject_label *)
51930+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
51931+ return ERR_PTR(-ENOMEM);
51932+
51933+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
51934+ if (subjmap == NULL)
51935+ return ERR_PTR(-ENOMEM);
51936+
51937+ subjmap->user = userp;
51938+ subjmap->kernel = s_tmp;
51939+ insert_subj_map_entry(subjmap);
51940+
51941+ if (copy_from_user(s_tmp, userp,
51942+ sizeof (struct acl_subject_label)))
51943+ return ERR_PTR(-EFAULT);
51944+
51945+ len = strnlen_user(s_tmp->filename, PATH_MAX);
51946+
51947+ if (!len || len >= PATH_MAX)
51948+ return ERR_PTR(-EINVAL);
51949+
51950+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51951+ return ERR_PTR(-ENOMEM);
51952+
51953+ if (copy_from_user(tmp, s_tmp->filename, len))
51954+ return ERR_PTR(-EFAULT);
51955+ tmp[len-1] = '\0';
51956+ s_tmp->filename = tmp;
51957+
51958+ if (!strcmp(s_tmp->filename, "/"))
51959+ role->root_label = s_tmp;
51960+
51961+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
51962+ return ERR_PTR(-EFAULT);
51963+
51964+ /* copy user and group transition tables */
51965+
51966+ if (s_tmp->user_trans_num) {
51967+ uid_t *uidlist;
51968+
51969+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
51970+ if (uidlist == NULL)
51971+ return ERR_PTR(-ENOMEM);
51972+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
51973+ return ERR_PTR(-EFAULT);
51974+
51975+ s_tmp->user_transitions = uidlist;
51976+ }
51977+
51978+ if (s_tmp->group_trans_num) {
51979+ gid_t *gidlist;
51980+
51981+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
51982+ if (gidlist == NULL)
51983+ return ERR_PTR(-ENOMEM);
51984+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
51985+ return ERR_PTR(-EFAULT);
51986+
51987+ s_tmp->group_transitions = gidlist;
51988+ }
51989+
51990+ /* set up object hash table */
51991+ num_objs = count_user_objs(ghash.first);
51992+
51993+ s_tmp->obj_hash_size = num_objs;
51994+ s_tmp->obj_hash =
51995+ (struct acl_object_label **)
51996+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
51997+
51998+ if (!s_tmp->obj_hash)
51999+ return ERR_PTR(-ENOMEM);
52000+
52001+ memset(s_tmp->obj_hash, 0,
52002+ s_tmp->obj_hash_size *
52003+ sizeof (struct acl_object_label *));
52004+
52005+ /* add in objects */
52006+ err = copy_user_objs(ghash.first, s_tmp, role);
52007+
52008+ if (err)
52009+ return ERR_PTR(err);
52010+
52011+ /* set pointer for parent subject */
52012+ if (s_tmp->parent_subject) {
52013+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
52014+
52015+ if (IS_ERR(s_tmp2))
52016+ return s_tmp2;
52017+
52018+ s_tmp->parent_subject = s_tmp2;
52019+ }
52020+
52021+ /* add in ip acls */
52022+
52023+ if (!s_tmp->ip_num) {
52024+ s_tmp->ips = NULL;
52025+ goto insert;
52026+ }
52027+
52028+ i_tmp =
52029+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
52030+ sizeof (struct acl_ip_label *));
52031+
52032+ if (!i_tmp)
52033+ return ERR_PTR(-ENOMEM);
52034+
52035+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
52036+ *(i_tmp + i_num) =
52037+ (struct acl_ip_label *)
52038+ acl_alloc(sizeof (struct acl_ip_label));
52039+ if (!*(i_tmp + i_num))
52040+ return ERR_PTR(-ENOMEM);
52041+
52042+ if (copy_from_user
52043+ (&i_utmp2, s_tmp->ips + i_num,
52044+ sizeof (struct acl_ip_label *)))
52045+ return ERR_PTR(-EFAULT);
52046+
52047+ if (copy_from_user
52048+ (*(i_tmp + i_num), i_utmp2,
52049+ sizeof (struct acl_ip_label)))
52050+ return ERR_PTR(-EFAULT);
52051+
52052+ if ((*(i_tmp + i_num))->iface == NULL)
52053+ continue;
52054+
52055+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
52056+ if (!len || len >= IFNAMSIZ)
52057+ return ERR_PTR(-EINVAL);
52058+ tmp = acl_alloc(len);
52059+ if (tmp == NULL)
52060+ return ERR_PTR(-ENOMEM);
52061+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
52062+ return ERR_PTR(-EFAULT);
52063+ (*(i_tmp + i_num))->iface = tmp;
52064+ }
52065+
52066+ s_tmp->ips = i_tmp;
52067+
52068+insert:
52069+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
52070+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
52071+ return ERR_PTR(-ENOMEM);
52072+
52073+ return s_tmp;
52074+}
52075+
52076+static int
52077+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
52078+{
52079+ struct acl_subject_label s_pre;
52080+ struct acl_subject_label * ret;
52081+ int err;
52082+
52083+ while (userp) {
52084+ if (copy_from_user(&s_pre, userp,
52085+ sizeof (struct acl_subject_label)))
52086+ return -EFAULT;
52087+
52088+ /* do not add nested subjects here, add
52089+ while parsing objects
52090+ */
52091+
52092+ if (s_pre.mode & GR_NESTED) {
52093+ userp = s_pre.prev;
52094+ continue;
52095+ }
52096+
52097+ ret = do_copy_user_subj(userp, role);
52098+
52099+ err = PTR_ERR(ret);
52100+ if (IS_ERR(ret))
52101+ return err;
52102+
52103+ insert_acl_subj_label(ret, role);
52104+
52105+ userp = s_pre.prev;
52106+ }
52107+
52108+ return 0;
52109+}
52110+
52111+static int
52112+copy_user_acl(struct gr_arg *arg)
52113+{
52114+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
52115+ struct sprole_pw *sptmp;
52116+ struct gr_hash_struct *ghash;
52117+ uid_t *domainlist;
52118+ unsigned int r_num;
52119+ unsigned int len;
52120+ char *tmp;
52121+ int err = 0;
52122+ __u16 i;
52123+ __u32 num_subjs;
52124+
52125+ /* we need a default and kernel role */
52126+ if (arg->role_db.num_roles < 2)
52127+ return -EINVAL;
52128+
52129+ /* copy special role authentication info from userspace */
52130+
52131+ num_sprole_pws = arg->num_sprole_pws;
52132+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
52133+
52134+ if (!acl_special_roles && num_sprole_pws)
52135+ return -ENOMEM;
52136+
52137+ for (i = 0; i < num_sprole_pws; i++) {
52138+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
52139+ if (!sptmp)
52140+ return -ENOMEM;
52141+ if (copy_from_user(sptmp, arg->sprole_pws + i,
52142+ sizeof (struct sprole_pw)))
52143+ return -EFAULT;
52144+
52145+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
52146+
52147+ if (!len || len >= GR_SPROLE_LEN)
52148+ return -EINVAL;
52149+
52150+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52151+ return -ENOMEM;
52152+
52153+ if (copy_from_user(tmp, sptmp->rolename, len))
52154+ return -EFAULT;
52155+
52156+ tmp[len-1] = '\0';
52157+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52158+ printk(KERN_ALERT "Copying special role %s\n", tmp);
52159+#endif
52160+ sptmp->rolename = tmp;
52161+ acl_special_roles[i] = sptmp;
52162+ }
52163+
52164+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
52165+
52166+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
52167+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
52168+
52169+ if (!r_tmp)
52170+ return -ENOMEM;
52171+
52172+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
52173+ sizeof (struct acl_role_label *)))
52174+ return -EFAULT;
52175+
52176+ if (copy_from_user(r_tmp, r_utmp2,
52177+ sizeof (struct acl_role_label)))
52178+ return -EFAULT;
52179+
52180+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
52181+
52182+ if (!len || len >= PATH_MAX)
52183+ return -EINVAL;
52184+
52185+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52186+ return -ENOMEM;
52187+
52188+ if (copy_from_user(tmp, r_tmp->rolename, len))
52189+ return -EFAULT;
52190+
52191+ tmp[len-1] = '\0';
52192+ r_tmp->rolename = tmp;
52193+
52194+ if (!strcmp(r_tmp->rolename, "default")
52195+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
52196+ default_role = r_tmp;
52197+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
52198+ kernel_role = r_tmp;
52199+ }
52200+
52201+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
52202+ return -ENOMEM;
52203+
52204+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
52205+ return -EFAULT;
52206+
52207+ r_tmp->hash = ghash;
52208+
52209+ num_subjs = count_user_subjs(r_tmp->hash->first);
52210+
52211+ r_tmp->subj_hash_size = num_subjs;
52212+ r_tmp->subj_hash =
52213+ (struct acl_subject_label **)
52214+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
52215+
52216+ if (!r_tmp->subj_hash)
52217+ return -ENOMEM;
52218+
52219+ err = copy_user_allowedips(r_tmp);
52220+ if (err)
52221+ return err;
52222+
52223+ /* copy domain info */
52224+ if (r_tmp->domain_children != NULL) {
52225+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
52226+ if (domainlist == NULL)
52227+ return -ENOMEM;
52228+
52229+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
52230+ return -EFAULT;
52231+
52232+ r_tmp->domain_children = domainlist;
52233+ }
52234+
52235+ err = copy_user_transitions(r_tmp);
52236+ if (err)
52237+ return err;
52238+
52239+ memset(r_tmp->subj_hash, 0,
52240+ r_tmp->subj_hash_size *
52241+ sizeof (struct acl_subject_label *));
52242+
52243+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
52244+
52245+ if (err)
52246+ return err;
52247+
52248+ /* set nested subject list to null */
52249+ r_tmp->hash->first = NULL;
52250+
52251+ insert_acl_role_label(r_tmp);
52252+ }
52253+
52254+ if (default_role == NULL || kernel_role == NULL)
52255+ return -EINVAL;
52256+
52257+ return err;
52258+}
52259+
52260+static int
52261+gracl_init(struct gr_arg *args)
52262+{
52263+ int error = 0;
52264+
52265+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
52266+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
52267+
52268+ if (init_variables(args)) {
52269+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
52270+ error = -ENOMEM;
52271+ free_variables();
52272+ goto out;
52273+ }
52274+
52275+ error = copy_user_acl(args);
52276+ free_init_variables();
52277+ if (error) {
52278+ free_variables();
52279+ goto out;
52280+ }
52281+
52282+ if ((error = gr_set_acls(0))) {
52283+ free_variables();
52284+ goto out;
52285+ }
52286+
52287+ pax_open_kernel();
52288+ gr_status |= GR_READY;
52289+ pax_close_kernel();
52290+
52291+ out:
52292+ return error;
52293+}
52294+
52295+/* derived from glibc fnmatch() 0: match, 1: no match*/
52296+
52297+static int
52298+glob_match(const char *p, const char *n)
52299+{
52300+ char c;
52301+
52302+ while ((c = *p++) != '\0') {
52303+ switch (c) {
52304+ case '?':
52305+ if (*n == '\0')
52306+ return 1;
52307+ else if (*n == '/')
52308+ return 1;
52309+ break;
52310+ case '\\':
52311+ if (*n != c)
52312+ return 1;
52313+ break;
52314+ case '*':
52315+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
52316+ if (*n == '/')
52317+ return 1;
52318+ else if (c == '?') {
52319+ if (*n == '\0')
52320+ return 1;
52321+ else
52322+ ++n;
52323+ }
52324+ }
52325+ if (c == '\0') {
52326+ return 0;
52327+ } else {
52328+ const char *endp;
52329+
52330+ if ((endp = strchr(n, '/')) == NULL)
52331+ endp = n + strlen(n);
52332+
52333+ if (c == '[') {
52334+ for (--p; n < endp; ++n)
52335+ if (!glob_match(p, n))
52336+ return 0;
52337+ } else if (c == '/') {
52338+ while (*n != '\0' && *n != '/')
52339+ ++n;
52340+ if (*n == '/' && !glob_match(p, n + 1))
52341+ return 0;
52342+ } else {
52343+ for (--p; n < endp; ++n)
52344+ if (*n == c && !glob_match(p, n))
52345+ return 0;
52346+ }
52347+
52348+ return 1;
52349+ }
52350+ case '[':
52351+ {
52352+ int not;
52353+ char cold;
52354+
52355+ if (*n == '\0' || *n == '/')
52356+ return 1;
52357+
52358+ not = (*p == '!' || *p == '^');
52359+ if (not)
52360+ ++p;
52361+
52362+ c = *p++;
52363+ for (;;) {
52364+ unsigned char fn = (unsigned char)*n;
52365+
52366+ if (c == '\0')
52367+ return 1;
52368+ else {
52369+ if (c == fn)
52370+ goto matched;
52371+ cold = c;
52372+ c = *p++;
52373+
52374+ if (c == '-' && *p != ']') {
52375+ unsigned char cend = *p++;
52376+
52377+ if (cend == '\0')
52378+ return 1;
52379+
52380+ if (cold <= fn && fn <= cend)
52381+ goto matched;
52382+
52383+ c = *p++;
52384+ }
52385+ }
52386+
52387+ if (c == ']')
52388+ break;
52389+ }
52390+ if (!not)
52391+ return 1;
52392+ break;
52393+ matched:
52394+ while (c != ']') {
52395+ if (c == '\0')
52396+ return 1;
52397+
52398+ c = *p++;
52399+ }
52400+ if (not)
52401+ return 1;
52402+ }
52403+ break;
52404+ default:
52405+ if (c != *n)
52406+ return 1;
52407+ }
52408+
52409+ ++n;
52410+ }
52411+
52412+ if (*n == '\0')
52413+ return 0;
52414+
52415+ if (*n == '/')
52416+ return 0;
52417+
52418+ return 1;
52419+}
52420+
52421+static struct acl_object_label *
52422+chk_glob_label(struct acl_object_label *globbed,
52423+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
52424+{
52425+ struct acl_object_label *tmp;
52426+
52427+ if (*path == NULL)
52428+ *path = gr_to_filename_nolock(dentry, mnt);
52429+
52430+ tmp = globbed;
52431+
52432+ while (tmp) {
52433+ if (!glob_match(tmp->filename, *path))
52434+ return tmp;
52435+ tmp = tmp->next;
52436+ }
52437+
52438+ return NULL;
52439+}
52440+
52441+static struct acl_object_label *
52442+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52443+ const ino_t curr_ino, const dev_t curr_dev,
52444+ const struct acl_subject_label *subj, char **path, const int checkglob)
52445+{
52446+ struct acl_subject_label *tmpsubj;
52447+ struct acl_object_label *retval;
52448+ struct acl_object_label *retval2;
52449+
52450+ tmpsubj = (struct acl_subject_label *) subj;
52451+ read_lock(&gr_inode_lock);
52452+ do {
52453+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
52454+ if (retval) {
52455+ if (checkglob && retval->globbed) {
52456+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
52457+ if (retval2)
52458+ retval = retval2;
52459+ }
52460+ break;
52461+ }
52462+ } while ((tmpsubj = tmpsubj->parent_subject));
52463+ read_unlock(&gr_inode_lock);
52464+
52465+ return retval;
52466+}
52467+
52468+static __inline__ struct acl_object_label *
52469+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52470+ struct dentry *curr_dentry,
52471+ const struct acl_subject_label *subj, char **path, const int checkglob)
52472+{
52473+ int newglob = checkglob;
52474+ ino_t inode;
52475+ dev_t device;
52476+
52477+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
52478+ as we don't want a / * rule to match instead of the / object
52479+ don't do this for create lookups that call this function though, since they're looking up
52480+ on the parent and thus need globbing checks on all paths
52481+ */
52482+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
52483+ newglob = GR_NO_GLOB;
52484+
52485+ spin_lock(&curr_dentry->d_lock);
52486+ inode = curr_dentry->d_inode->i_ino;
52487+ device = __get_dev(curr_dentry);
52488+ spin_unlock(&curr_dentry->d_lock);
52489+
52490+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
52491+}
52492+
52493+static struct acl_object_label *
52494+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52495+ const struct acl_subject_label *subj, char *path, const int checkglob)
52496+{
52497+ struct dentry *dentry = (struct dentry *) l_dentry;
52498+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52499+ struct mount *real_mnt = real_mount(mnt);
52500+ struct acl_object_label *retval;
52501+ struct dentry *parent;
52502+
52503+ write_seqlock(&rename_lock);
52504+ br_read_lock(vfsmount_lock);
52505+
52506+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
52507+#ifdef CONFIG_NET
52508+ mnt == sock_mnt ||
52509+#endif
52510+#ifdef CONFIG_HUGETLBFS
52511+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
52512+#endif
52513+ /* ignore Eric Biederman */
52514+ IS_PRIVATE(l_dentry->d_inode))) {
52515+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
52516+ goto out;
52517+ }
52518+
52519+ for (;;) {
52520+ if (dentry == real_root.dentry && mnt == real_root.mnt)
52521+ break;
52522+
52523+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52524+ if (!mnt_has_parent(real_mnt))
52525+ break;
52526+
52527+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52528+ if (retval != NULL)
52529+ goto out;
52530+
52531+ dentry = real_mnt->mnt_mountpoint;
52532+ real_mnt = real_mnt->mnt_parent;
52533+ mnt = &real_mnt->mnt;
52534+ continue;
52535+ }
52536+
52537+ parent = dentry->d_parent;
52538+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52539+ if (retval != NULL)
52540+ goto out;
52541+
52542+ dentry = parent;
52543+ }
52544+
52545+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52546+
52547+ /* real_root is pinned so we don't have to hold a reference */
52548+ if (retval == NULL)
52549+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
52550+out:
52551+ br_read_unlock(vfsmount_lock);
52552+ write_sequnlock(&rename_lock);
52553+
52554+ BUG_ON(retval == NULL);
52555+
52556+ return retval;
52557+}
52558+
52559+static __inline__ struct acl_object_label *
52560+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52561+ const struct acl_subject_label *subj)
52562+{
52563+ char *path = NULL;
52564+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
52565+}
52566+
52567+static __inline__ struct acl_object_label *
52568+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52569+ const struct acl_subject_label *subj)
52570+{
52571+ char *path = NULL;
52572+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
52573+}
52574+
52575+static __inline__ struct acl_object_label *
52576+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52577+ const struct acl_subject_label *subj, char *path)
52578+{
52579+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
52580+}
52581+
52582+static struct acl_subject_label *
52583+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52584+ const struct acl_role_label *role)
52585+{
52586+ struct dentry *dentry = (struct dentry *) l_dentry;
52587+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52588+ struct mount *real_mnt = real_mount(mnt);
52589+ struct acl_subject_label *retval;
52590+ struct dentry *parent;
52591+
52592+ write_seqlock(&rename_lock);
52593+ br_read_lock(vfsmount_lock);
52594+
52595+ for (;;) {
52596+ if (dentry == real_root.dentry && mnt == real_root.mnt)
52597+ break;
52598+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52599+ if (!mnt_has_parent(real_mnt))
52600+ break;
52601+
52602+ spin_lock(&dentry->d_lock);
52603+ read_lock(&gr_inode_lock);
52604+ retval =
52605+ lookup_acl_subj_label(dentry->d_inode->i_ino,
52606+ __get_dev(dentry), role);
52607+ read_unlock(&gr_inode_lock);
52608+ spin_unlock(&dentry->d_lock);
52609+ if (retval != NULL)
52610+ goto out;
52611+
52612+ dentry = real_mnt->mnt_mountpoint;
52613+ real_mnt = real_mnt->mnt_parent;
52614+ mnt = &real_mnt->mnt;
52615+ continue;
52616+ }
52617+
52618+ spin_lock(&dentry->d_lock);
52619+ read_lock(&gr_inode_lock);
52620+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52621+ __get_dev(dentry), role);
52622+ read_unlock(&gr_inode_lock);
52623+ parent = dentry->d_parent;
52624+ spin_unlock(&dentry->d_lock);
52625+
52626+ if (retval != NULL)
52627+ goto out;
52628+
52629+ dentry = parent;
52630+ }
52631+
52632+ spin_lock(&dentry->d_lock);
52633+ read_lock(&gr_inode_lock);
52634+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52635+ __get_dev(dentry), role);
52636+ read_unlock(&gr_inode_lock);
52637+ spin_unlock(&dentry->d_lock);
52638+
52639+ if (unlikely(retval == NULL)) {
52640+ /* real_root is pinned, we don't need to hold a reference */
52641+ read_lock(&gr_inode_lock);
52642+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
52643+ __get_dev(real_root.dentry), role);
52644+ read_unlock(&gr_inode_lock);
52645+ }
52646+out:
52647+ br_read_unlock(vfsmount_lock);
52648+ write_sequnlock(&rename_lock);
52649+
52650+ BUG_ON(retval == NULL);
52651+
52652+ return retval;
52653+}
52654+
52655+static void
52656+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
52657+{
52658+ struct task_struct *task = current;
52659+ const struct cred *cred = current_cred();
52660+
52661+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52662+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52663+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52664+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
52665+
52666+ return;
52667+}
52668+
52669+static void
52670+gr_log_learn_id_change(const char type, const unsigned int real,
52671+ const unsigned int effective, const unsigned int fs)
52672+{
52673+ struct task_struct *task = current;
52674+ const struct cred *cred = current_cred();
52675+
52676+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
52677+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52678+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52679+ type, real, effective, fs, &task->signal->saved_ip);
52680+
52681+ return;
52682+}
52683+
52684+__u32
52685+gr_search_file(const struct dentry * dentry, const __u32 mode,
52686+ const struct vfsmount * mnt)
52687+{
52688+ __u32 retval = mode;
52689+ struct acl_subject_label *curracl;
52690+ struct acl_object_label *currobj;
52691+
52692+ if (unlikely(!(gr_status & GR_READY)))
52693+ return (mode & ~GR_AUDITS);
52694+
52695+ curracl = current->acl;
52696+
52697+ currobj = chk_obj_label(dentry, mnt, curracl);
52698+ retval = currobj->mode & mode;
52699+
52700+ /* if we're opening a specified transfer file for writing
52701+ (e.g. /dev/initctl), then transfer our role to init
52702+ */
52703+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
52704+ current->role->roletype & GR_ROLE_PERSIST)) {
52705+ struct task_struct *task = init_pid_ns.child_reaper;
52706+
52707+ if (task->role != current->role) {
52708+ task->acl_sp_role = 0;
52709+ task->acl_role_id = current->acl_role_id;
52710+ task->role = current->role;
52711+ rcu_read_lock();
52712+ read_lock(&grsec_exec_file_lock);
52713+ gr_apply_subject_to_task(task);
52714+ read_unlock(&grsec_exec_file_lock);
52715+ rcu_read_unlock();
52716+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
52717+ }
52718+ }
52719+
52720+ if (unlikely
52721+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
52722+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
52723+ __u32 new_mode = mode;
52724+
52725+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52726+
52727+ retval = new_mode;
52728+
52729+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
52730+ new_mode |= GR_INHERIT;
52731+
52732+ if (!(mode & GR_NOLEARN))
52733+ gr_log_learn(dentry, mnt, new_mode);
52734+ }
52735+
52736+ return retval;
52737+}
52738+
52739+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
52740+ const struct dentry *parent,
52741+ const struct vfsmount *mnt)
52742+{
52743+ struct name_entry *match;
52744+ struct acl_object_label *matchpo;
52745+ struct acl_subject_label *curracl;
52746+ char *path;
52747+
52748+ if (unlikely(!(gr_status & GR_READY)))
52749+ return NULL;
52750+
52751+ preempt_disable();
52752+ path = gr_to_filename_rbac(new_dentry, mnt);
52753+ match = lookup_name_entry_create(path);
52754+
52755+ curracl = current->acl;
52756+
52757+ if (match) {
52758+ read_lock(&gr_inode_lock);
52759+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
52760+ read_unlock(&gr_inode_lock);
52761+
52762+ if (matchpo) {
52763+ preempt_enable();
52764+ return matchpo;
52765+ }
52766+ }
52767+
52768+ // lookup parent
52769+
52770+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
52771+
52772+ preempt_enable();
52773+ return matchpo;
52774+}
52775+
52776+__u32
52777+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
52778+ const struct vfsmount * mnt, const __u32 mode)
52779+{
52780+ struct acl_object_label *matchpo;
52781+ __u32 retval;
52782+
52783+ if (unlikely(!(gr_status & GR_READY)))
52784+ return (mode & ~GR_AUDITS);
52785+
52786+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
52787+
52788+ retval = matchpo->mode & mode;
52789+
52790+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
52791+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52792+ __u32 new_mode = mode;
52793+
52794+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52795+
52796+ gr_log_learn(new_dentry, mnt, new_mode);
52797+ return new_mode;
52798+ }
52799+
52800+ return retval;
52801+}
52802+
52803+__u32
52804+gr_check_link(const struct dentry * new_dentry,
52805+ const struct dentry * parent_dentry,
52806+ const struct vfsmount * parent_mnt,
52807+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
52808+{
52809+ struct acl_object_label *obj;
52810+ __u32 oldmode, newmode;
52811+ __u32 needmode;
52812+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
52813+ GR_DELETE | GR_INHERIT;
52814+
52815+ if (unlikely(!(gr_status & GR_READY)))
52816+ return (GR_CREATE | GR_LINK);
52817+
52818+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
52819+ oldmode = obj->mode;
52820+
52821+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
52822+ newmode = obj->mode;
52823+
52824+ needmode = newmode & checkmodes;
52825+
52826+ // old name for hardlink must have at least the permissions of the new name
52827+ if ((oldmode & needmode) != needmode)
52828+ goto bad;
52829+
52830+ // if old name had restrictions/auditing, make sure the new name does as well
52831+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
52832+
52833+ // don't allow hardlinking of suid/sgid files without permission
52834+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52835+ needmode |= GR_SETID;
52836+
52837+ if ((newmode & needmode) != needmode)
52838+ goto bad;
52839+
52840+ // enforce minimum permissions
52841+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
52842+ return newmode;
52843+bad:
52844+ needmode = oldmode;
52845+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52846+ needmode |= GR_SETID;
52847+
52848+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
52849+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
52850+ return (GR_CREATE | GR_LINK);
52851+ } else if (newmode & GR_SUPPRESS)
52852+ return GR_SUPPRESS;
52853+ else
52854+ return 0;
52855+}
52856+
52857+int
52858+gr_check_hidden_task(const struct task_struct *task)
52859+{
52860+ if (unlikely(!(gr_status & GR_READY)))
52861+ return 0;
52862+
52863+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
52864+ return 1;
52865+
52866+ return 0;
52867+}
52868+
52869+int
52870+gr_check_protected_task(const struct task_struct *task)
52871+{
52872+ if (unlikely(!(gr_status & GR_READY) || !task))
52873+ return 0;
52874+
52875+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52876+ task->acl != current->acl)
52877+ return 1;
52878+
52879+ return 0;
52880+}
52881+
52882+int
52883+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52884+{
52885+ struct task_struct *p;
52886+ int ret = 0;
52887+
52888+ if (unlikely(!(gr_status & GR_READY) || !pid))
52889+ return ret;
52890+
52891+ read_lock(&tasklist_lock);
52892+ do_each_pid_task(pid, type, p) {
52893+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52894+ p->acl != current->acl) {
52895+ ret = 1;
52896+ goto out;
52897+ }
52898+ } while_each_pid_task(pid, type, p);
52899+out:
52900+ read_unlock(&tasklist_lock);
52901+
52902+ return ret;
52903+}
52904+
52905+void
52906+gr_copy_label(struct task_struct *tsk)
52907+{
52908+ tsk->signal->used_accept = 0;
52909+ tsk->acl_sp_role = 0;
52910+ tsk->acl_role_id = current->acl_role_id;
52911+ tsk->acl = current->acl;
52912+ tsk->role = current->role;
52913+ tsk->signal->curr_ip = current->signal->curr_ip;
52914+ tsk->signal->saved_ip = current->signal->saved_ip;
52915+ if (current->exec_file)
52916+ get_file(current->exec_file);
52917+ tsk->exec_file = current->exec_file;
52918+ tsk->is_writable = current->is_writable;
52919+ if (unlikely(current->signal->used_accept)) {
52920+ current->signal->curr_ip = 0;
52921+ current->signal->saved_ip = 0;
52922+ }
52923+
52924+ return;
52925+}
52926+
52927+static void
52928+gr_set_proc_res(struct task_struct *task)
52929+{
52930+ struct acl_subject_label *proc;
52931+ unsigned short i;
52932+
52933+ proc = task->acl;
52934+
52935+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
52936+ return;
52937+
52938+ for (i = 0; i < RLIM_NLIMITS; i++) {
52939+ if (!(proc->resmask & (1 << i)))
52940+ continue;
52941+
52942+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
52943+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
52944+ }
52945+
52946+ return;
52947+}
52948+
52949+extern int __gr_process_user_ban(struct user_struct *user);
52950+
52951+int
52952+gr_check_user_change(int real, int effective, int fs)
52953+{
52954+ unsigned int i;
52955+ __u16 num;
52956+ uid_t *uidlist;
52957+ int curuid;
52958+ int realok = 0;
52959+ int effectiveok = 0;
52960+ int fsok = 0;
52961+
52962+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52963+ struct user_struct *user;
52964+
52965+ if (real == -1)
52966+ goto skipit;
52967+
52968+ user = find_user(real);
52969+ if (user == NULL)
52970+ goto skipit;
52971+
52972+ if (__gr_process_user_ban(user)) {
52973+ /* for find_user */
52974+ free_uid(user);
52975+ return 1;
52976+ }
52977+
52978+ /* for find_user */
52979+ free_uid(user);
52980+
52981+skipit:
52982+#endif
52983+
52984+ if (unlikely(!(gr_status & GR_READY)))
52985+ return 0;
52986+
52987+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52988+ gr_log_learn_id_change('u', real, effective, fs);
52989+
52990+ num = current->acl->user_trans_num;
52991+ uidlist = current->acl->user_transitions;
52992+
52993+ if (uidlist == NULL)
52994+ return 0;
52995+
52996+ if (real == -1)
52997+ realok = 1;
52998+ if (effective == -1)
52999+ effectiveok = 1;
53000+ if (fs == -1)
53001+ fsok = 1;
53002+
53003+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
53004+ for (i = 0; i < num; i++) {
53005+ curuid = (int)uidlist[i];
53006+ if (real == curuid)
53007+ realok = 1;
53008+ if (effective == curuid)
53009+ effectiveok = 1;
53010+ if (fs == curuid)
53011+ fsok = 1;
53012+ }
53013+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
53014+ for (i = 0; i < num; i++) {
53015+ curuid = (int)uidlist[i];
53016+ if (real == curuid)
53017+ break;
53018+ if (effective == curuid)
53019+ break;
53020+ if (fs == curuid)
53021+ break;
53022+ }
53023+ /* not in deny list */
53024+ if (i == num) {
53025+ realok = 1;
53026+ effectiveok = 1;
53027+ fsok = 1;
53028+ }
53029+ }
53030+
53031+ if (realok && effectiveok && fsok)
53032+ return 0;
53033+ else {
53034+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53035+ return 1;
53036+ }
53037+}
53038+
53039+int
53040+gr_check_group_change(int real, int effective, int fs)
53041+{
53042+ unsigned int i;
53043+ __u16 num;
53044+ gid_t *gidlist;
53045+ int curgid;
53046+ int realok = 0;
53047+ int effectiveok = 0;
53048+ int fsok = 0;
53049+
53050+ if (unlikely(!(gr_status & GR_READY)))
53051+ return 0;
53052+
53053+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53054+ gr_log_learn_id_change('g', real, effective, fs);
53055+
53056+ num = current->acl->group_trans_num;
53057+ gidlist = current->acl->group_transitions;
53058+
53059+ if (gidlist == NULL)
53060+ return 0;
53061+
53062+ if (real == -1)
53063+ realok = 1;
53064+ if (effective == -1)
53065+ effectiveok = 1;
53066+ if (fs == -1)
53067+ fsok = 1;
53068+
53069+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
53070+ for (i = 0; i < num; i++) {
53071+ curgid = (int)gidlist[i];
53072+ if (real == curgid)
53073+ realok = 1;
53074+ if (effective == curgid)
53075+ effectiveok = 1;
53076+ if (fs == curgid)
53077+ fsok = 1;
53078+ }
53079+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
53080+ for (i = 0; i < num; i++) {
53081+ curgid = (int)gidlist[i];
53082+ if (real == curgid)
53083+ break;
53084+ if (effective == curgid)
53085+ break;
53086+ if (fs == curgid)
53087+ break;
53088+ }
53089+ /* not in deny list */
53090+ if (i == num) {
53091+ realok = 1;
53092+ effectiveok = 1;
53093+ fsok = 1;
53094+ }
53095+ }
53096+
53097+ if (realok && effectiveok && fsok)
53098+ return 0;
53099+ else {
53100+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53101+ return 1;
53102+ }
53103+}
53104+
53105+extern int gr_acl_is_capable(const int cap);
53106+
53107+void
53108+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
53109+{
53110+ struct acl_role_label *role = task->role;
53111+ struct acl_subject_label *subj = NULL;
53112+ struct acl_object_label *obj;
53113+ struct file *filp;
53114+
53115+ if (unlikely(!(gr_status & GR_READY)))
53116+ return;
53117+
53118+ filp = task->exec_file;
53119+
53120+ /* kernel process, we'll give them the kernel role */
53121+ if (unlikely(!filp)) {
53122+ task->role = kernel_role;
53123+ task->acl = kernel_role->root_label;
53124+ return;
53125+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
53126+ role = lookup_acl_role_label(task, uid, gid);
53127+
53128+ /* don't change the role if we're not a privileged process */
53129+ if (role && task->role != role &&
53130+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
53131+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
53132+ return;
53133+
53134+ /* perform subject lookup in possibly new role
53135+ we can use this result below in the case where role == task->role
53136+ */
53137+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
53138+
53139+ /* if we changed uid/gid, but result in the same role
53140+ and are using inheritance, don't lose the inherited subject
53141+ if current subject is other than what normal lookup
53142+ would result in, we arrived via inheritance, don't
53143+ lose subject
53144+ */
53145+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
53146+ (subj == task->acl)))
53147+ task->acl = subj;
53148+
53149+ task->role = role;
53150+
53151+ task->is_writable = 0;
53152+
53153+ /* ignore additional mmap checks for processes that are writable
53154+ by the default ACL */
53155+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53156+ if (unlikely(obj->mode & GR_WRITE))
53157+ task->is_writable = 1;
53158+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53159+ if (unlikely(obj->mode & GR_WRITE))
53160+ task->is_writable = 1;
53161+
53162+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53163+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53164+#endif
53165+
53166+ gr_set_proc_res(task);
53167+
53168+ return;
53169+}
53170+
53171+int
53172+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53173+ const int unsafe_flags)
53174+{
53175+ struct task_struct *task = current;
53176+ struct acl_subject_label *newacl;
53177+ struct acl_object_label *obj;
53178+ __u32 retmode;
53179+
53180+ if (unlikely(!(gr_status & GR_READY)))
53181+ return 0;
53182+
53183+ newacl = chk_subj_label(dentry, mnt, task->role);
53184+
53185+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
53186+ did an exec
53187+ */
53188+ rcu_read_lock();
53189+ read_lock(&tasklist_lock);
53190+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
53191+ (task->parent->acl->mode & GR_POVERRIDE))) {
53192+ read_unlock(&tasklist_lock);
53193+ rcu_read_unlock();
53194+ goto skip_check;
53195+ }
53196+ read_unlock(&tasklist_lock);
53197+ rcu_read_unlock();
53198+
53199+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
53200+ !(task->role->roletype & GR_ROLE_GOD) &&
53201+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
53202+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
53203+ if (unsafe_flags & LSM_UNSAFE_SHARE)
53204+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
53205+ else
53206+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
53207+ return -EACCES;
53208+ }
53209+
53210+skip_check:
53211+
53212+ obj = chk_obj_label(dentry, mnt, task->acl);
53213+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
53214+
53215+ if (!(task->acl->mode & GR_INHERITLEARN) &&
53216+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
53217+ if (obj->nested)
53218+ task->acl = obj->nested;
53219+ else
53220+ task->acl = newacl;
53221+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
53222+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
53223+
53224+ task->is_writable = 0;
53225+
53226+ /* ignore additional mmap checks for processes that are writable
53227+ by the default ACL */
53228+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
53229+ if (unlikely(obj->mode & GR_WRITE))
53230+ task->is_writable = 1;
53231+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
53232+ if (unlikely(obj->mode & GR_WRITE))
53233+ task->is_writable = 1;
53234+
53235+ gr_set_proc_res(task);
53236+
53237+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53238+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53239+#endif
53240+ return 0;
53241+}
53242+
53243+/* always called with valid inodev ptr */
53244+static void
53245+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
53246+{
53247+ struct acl_object_label *matchpo;
53248+ struct acl_subject_label *matchps;
53249+ struct acl_subject_label *subj;
53250+ struct acl_role_label *role;
53251+ unsigned int x;
53252+
53253+ FOR_EACH_ROLE_START(role)
53254+ FOR_EACH_SUBJECT_START(role, subj, x)
53255+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
53256+ matchpo->mode |= GR_DELETED;
53257+ FOR_EACH_SUBJECT_END(subj,x)
53258+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
53259+ if (subj->inode == ino && subj->device == dev)
53260+ subj->mode |= GR_DELETED;
53261+ FOR_EACH_NESTED_SUBJECT_END(subj)
53262+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
53263+ matchps->mode |= GR_DELETED;
53264+ FOR_EACH_ROLE_END(role)
53265+
53266+ inodev->nentry->deleted = 1;
53267+
53268+ return;
53269+}
53270+
53271+void
53272+gr_handle_delete(const ino_t ino, const dev_t dev)
53273+{
53274+ struct inodev_entry *inodev;
53275+
53276+ if (unlikely(!(gr_status & GR_READY)))
53277+ return;
53278+
53279+ write_lock(&gr_inode_lock);
53280+ inodev = lookup_inodev_entry(ino, dev);
53281+ if (inodev != NULL)
53282+ do_handle_delete(inodev, ino, dev);
53283+ write_unlock(&gr_inode_lock);
53284+
53285+ return;
53286+}
53287+
53288+static void
53289+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
53290+ const ino_t newinode, const dev_t newdevice,
53291+ struct acl_subject_label *subj)
53292+{
53293+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
53294+ struct acl_object_label *match;
53295+
53296+ match = subj->obj_hash[index];
53297+
53298+ while (match && (match->inode != oldinode ||
53299+ match->device != olddevice ||
53300+ !(match->mode & GR_DELETED)))
53301+ match = match->next;
53302+
53303+ if (match && (match->inode == oldinode)
53304+ && (match->device == olddevice)
53305+ && (match->mode & GR_DELETED)) {
53306+ if (match->prev == NULL) {
53307+ subj->obj_hash[index] = match->next;
53308+ if (match->next != NULL)
53309+ match->next->prev = NULL;
53310+ } else {
53311+ match->prev->next = match->next;
53312+ if (match->next != NULL)
53313+ match->next->prev = match->prev;
53314+ }
53315+ match->prev = NULL;
53316+ match->next = NULL;
53317+ match->inode = newinode;
53318+ match->device = newdevice;
53319+ match->mode &= ~GR_DELETED;
53320+
53321+ insert_acl_obj_label(match, subj);
53322+ }
53323+
53324+ return;
53325+}
53326+
53327+static void
53328+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
53329+ const ino_t newinode, const dev_t newdevice,
53330+ struct acl_role_label *role)
53331+{
53332+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
53333+ struct acl_subject_label *match;
53334+
53335+ match = role->subj_hash[index];
53336+
53337+ while (match && (match->inode != oldinode ||
53338+ match->device != olddevice ||
53339+ !(match->mode & GR_DELETED)))
53340+ match = match->next;
53341+
53342+ if (match && (match->inode == oldinode)
53343+ && (match->device == olddevice)
53344+ && (match->mode & GR_DELETED)) {
53345+ if (match->prev == NULL) {
53346+ role->subj_hash[index] = match->next;
53347+ if (match->next != NULL)
53348+ match->next->prev = NULL;
53349+ } else {
53350+ match->prev->next = match->next;
53351+ if (match->next != NULL)
53352+ match->next->prev = match->prev;
53353+ }
53354+ match->prev = NULL;
53355+ match->next = NULL;
53356+ match->inode = newinode;
53357+ match->device = newdevice;
53358+ match->mode &= ~GR_DELETED;
53359+
53360+ insert_acl_subj_label(match, role);
53361+ }
53362+
53363+ return;
53364+}
53365+
53366+static void
53367+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
53368+ const ino_t newinode, const dev_t newdevice)
53369+{
53370+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
53371+ struct inodev_entry *match;
53372+
53373+ match = inodev_set.i_hash[index];
53374+
53375+ while (match && (match->nentry->inode != oldinode ||
53376+ match->nentry->device != olddevice || !match->nentry->deleted))
53377+ match = match->next;
53378+
53379+ if (match && (match->nentry->inode == oldinode)
53380+ && (match->nentry->device == olddevice) &&
53381+ match->nentry->deleted) {
53382+ if (match->prev == NULL) {
53383+ inodev_set.i_hash[index] = match->next;
53384+ if (match->next != NULL)
53385+ match->next->prev = NULL;
53386+ } else {
53387+ match->prev->next = match->next;
53388+ if (match->next != NULL)
53389+ match->next->prev = match->prev;
53390+ }
53391+ match->prev = NULL;
53392+ match->next = NULL;
53393+ match->nentry->inode = newinode;
53394+ match->nentry->device = newdevice;
53395+ match->nentry->deleted = 0;
53396+
53397+ insert_inodev_entry(match);
53398+ }
53399+
53400+ return;
53401+}
53402+
53403+static void
53404+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
53405+{
53406+ struct acl_subject_label *subj;
53407+ struct acl_role_label *role;
53408+ unsigned int x;
53409+
53410+ FOR_EACH_ROLE_START(role)
53411+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
53412+
53413+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
53414+ if ((subj->inode == ino) && (subj->device == dev)) {
53415+ subj->inode = ino;
53416+ subj->device = dev;
53417+ }
53418+ FOR_EACH_NESTED_SUBJECT_END(subj)
53419+ FOR_EACH_SUBJECT_START(role, subj, x)
53420+ update_acl_obj_label(matchn->inode, matchn->device,
53421+ ino, dev, subj);
53422+ FOR_EACH_SUBJECT_END(subj,x)
53423+ FOR_EACH_ROLE_END(role)
53424+
53425+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
53426+
53427+ return;
53428+}
53429+
53430+static void
53431+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
53432+ const struct vfsmount *mnt)
53433+{
53434+ ino_t ino = dentry->d_inode->i_ino;
53435+ dev_t dev = __get_dev(dentry);
53436+
53437+ __do_handle_create(matchn, ino, dev);
53438+
53439+ return;
53440+}
53441+
53442+void
53443+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53444+{
53445+ struct name_entry *matchn;
53446+
53447+ if (unlikely(!(gr_status & GR_READY)))
53448+ return;
53449+
53450+ preempt_disable();
53451+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
53452+
53453+ if (unlikely((unsigned long)matchn)) {
53454+ write_lock(&gr_inode_lock);
53455+ do_handle_create(matchn, dentry, mnt);
53456+ write_unlock(&gr_inode_lock);
53457+ }
53458+ preempt_enable();
53459+
53460+ return;
53461+}
53462+
53463+void
53464+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
53465+{
53466+ struct name_entry *matchn;
53467+
53468+ if (unlikely(!(gr_status & GR_READY)))
53469+ return;
53470+
53471+ preempt_disable();
53472+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
53473+
53474+ if (unlikely((unsigned long)matchn)) {
53475+ write_lock(&gr_inode_lock);
53476+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
53477+ write_unlock(&gr_inode_lock);
53478+ }
53479+ preempt_enable();
53480+
53481+ return;
53482+}
53483+
53484+void
53485+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53486+ struct dentry *old_dentry,
53487+ struct dentry *new_dentry,
53488+ struct vfsmount *mnt, const __u8 replace)
53489+{
53490+ struct name_entry *matchn;
53491+ struct inodev_entry *inodev;
53492+ struct inode *inode = new_dentry->d_inode;
53493+ ino_t old_ino = old_dentry->d_inode->i_ino;
53494+ dev_t old_dev = __get_dev(old_dentry);
53495+
53496+ /* vfs_rename swaps the name and parent link for old_dentry and
53497+ new_dentry
53498+ at this point, old_dentry has the new name, parent link, and inode
53499+ for the renamed file
53500+ if a file is being replaced by a rename, new_dentry has the inode
53501+ and name for the replaced file
53502+ */
53503+
53504+ if (unlikely(!(gr_status & GR_READY)))
53505+ return;
53506+
53507+ preempt_disable();
53508+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
53509+
53510+ /* we wouldn't have to check d_inode if it weren't for
53511+ NFS silly-renaming
53512+ */
53513+
53514+ write_lock(&gr_inode_lock);
53515+ if (unlikely(replace && inode)) {
53516+ ino_t new_ino = inode->i_ino;
53517+ dev_t new_dev = __get_dev(new_dentry);
53518+
53519+ inodev = lookup_inodev_entry(new_ino, new_dev);
53520+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
53521+ do_handle_delete(inodev, new_ino, new_dev);
53522+ }
53523+
53524+ inodev = lookup_inodev_entry(old_ino, old_dev);
53525+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
53526+ do_handle_delete(inodev, old_ino, old_dev);
53527+
53528+ if (unlikely((unsigned long)matchn))
53529+ do_handle_create(matchn, old_dentry, mnt);
53530+
53531+ write_unlock(&gr_inode_lock);
53532+ preempt_enable();
53533+
53534+ return;
53535+}
53536+
53537+static int
53538+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
53539+ unsigned char **sum)
53540+{
53541+ struct acl_role_label *r;
53542+ struct role_allowed_ip *ipp;
53543+ struct role_transition *trans;
53544+ unsigned int i;
53545+ int found = 0;
53546+ u32 curr_ip = current->signal->curr_ip;
53547+
53548+ current->signal->saved_ip = curr_ip;
53549+
53550+ /* check transition table */
53551+
53552+ for (trans = current->role->transitions; trans; trans = trans->next) {
53553+ if (!strcmp(rolename, trans->rolename)) {
53554+ found = 1;
53555+ break;
53556+ }
53557+ }
53558+
53559+ if (!found)
53560+ return 0;
53561+
53562+ /* handle special roles that do not require authentication
53563+ and check ip */
53564+
53565+ FOR_EACH_ROLE_START(r)
53566+ if (!strcmp(rolename, r->rolename) &&
53567+ (r->roletype & GR_ROLE_SPECIAL)) {
53568+ found = 0;
53569+ if (r->allowed_ips != NULL) {
53570+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
53571+ if ((ntohl(curr_ip) & ipp->netmask) ==
53572+ (ntohl(ipp->addr) & ipp->netmask))
53573+ found = 1;
53574+ }
53575+ } else
53576+ found = 2;
53577+ if (!found)
53578+ return 0;
53579+
53580+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
53581+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
53582+ *salt = NULL;
53583+ *sum = NULL;
53584+ return 1;
53585+ }
53586+ }
53587+ FOR_EACH_ROLE_END(r)
53588+
53589+ for (i = 0; i < num_sprole_pws; i++) {
53590+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
53591+ *salt = acl_special_roles[i]->salt;
53592+ *sum = acl_special_roles[i]->sum;
53593+ return 1;
53594+ }
53595+ }
53596+
53597+ return 0;
53598+}
53599+
53600+static void
53601+assign_special_role(char *rolename)
53602+{
53603+ struct acl_object_label *obj;
53604+ struct acl_role_label *r;
53605+ struct acl_role_label *assigned = NULL;
53606+ struct task_struct *tsk;
53607+ struct file *filp;
53608+
53609+ FOR_EACH_ROLE_START(r)
53610+ if (!strcmp(rolename, r->rolename) &&
53611+ (r->roletype & GR_ROLE_SPECIAL)) {
53612+ assigned = r;
53613+ break;
53614+ }
53615+ FOR_EACH_ROLE_END(r)
53616+
53617+ if (!assigned)
53618+ return;
53619+
53620+ read_lock(&tasklist_lock);
53621+ read_lock(&grsec_exec_file_lock);
53622+
53623+ tsk = current->real_parent;
53624+ if (tsk == NULL)
53625+ goto out_unlock;
53626+
53627+ filp = tsk->exec_file;
53628+ if (filp == NULL)
53629+ goto out_unlock;
53630+
53631+ tsk->is_writable = 0;
53632+
53633+ tsk->acl_sp_role = 1;
53634+ tsk->acl_role_id = ++acl_sp_role_value;
53635+ tsk->role = assigned;
53636+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
53637+
53638+ /* ignore additional mmap checks for processes that are writable
53639+ by the default ACL */
53640+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53641+ if (unlikely(obj->mode & GR_WRITE))
53642+ tsk->is_writable = 1;
53643+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
53644+ if (unlikely(obj->mode & GR_WRITE))
53645+ tsk->is_writable = 1;
53646+
53647+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53648+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
53649+#endif
53650+
53651+out_unlock:
53652+ read_unlock(&grsec_exec_file_lock);
53653+ read_unlock(&tasklist_lock);
53654+ return;
53655+}
53656+
53657+int gr_check_secure_terminal(struct task_struct *task)
53658+{
53659+ struct task_struct *p, *p2, *p3;
53660+ struct files_struct *files;
53661+ struct fdtable *fdt;
53662+ struct file *our_file = NULL, *file;
53663+ int i;
53664+
53665+ if (task->signal->tty == NULL)
53666+ return 1;
53667+
53668+ files = get_files_struct(task);
53669+ if (files != NULL) {
53670+ rcu_read_lock();
53671+ fdt = files_fdtable(files);
53672+ for (i=0; i < fdt->max_fds; i++) {
53673+ file = fcheck_files(files, i);
53674+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
53675+ get_file(file);
53676+ our_file = file;
53677+ }
53678+ }
53679+ rcu_read_unlock();
53680+ put_files_struct(files);
53681+ }
53682+
53683+ if (our_file == NULL)
53684+ return 1;
53685+
53686+ read_lock(&tasklist_lock);
53687+ do_each_thread(p2, p) {
53688+ files = get_files_struct(p);
53689+ if (files == NULL ||
53690+ (p->signal && p->signal->tty == task->signal->tty)) {
53691+ if (files != NULL)
53692+ put_files_struct(files);
53693+ continue;
53694+ }
53695+ rcu_read_lock();
53696+ fdt = files_fdtable(files);
53697+ for (i=0; i < fdt->max_fds; i++) {
53698+ file = fcheck_files(files, i);
53699+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
53700+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
53701+ p3 = task;
53702+ while (p3->pid > 0) {
53703+ if (p3 == p)
53704+ break;
53705+ p3 = p3->real_parent;
53706+ }
53707+ if (p3 == p)
53708+ break;
53709+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
53710+ gr_handle_alertkill(p);
53711+ rcu_read_unlock();
53712+ put_files_struct(files);
53713+ read_unlock(&tasklist_lock);
53714+ fput(our_file);
53715+ return 0;
53716+ }
53717+ }
53718+ rcu_read_unlock();
53719+ put_files_struct(files);
53720+ } while_each_thread(p2, p);
53721+ read_unlock(&tasklist_lock);
53722+
53723+ fput(our_file);
53724+ return 1;
53725+}
53726+
53727+static int gr_rbac_disable(void *unused)
53728+{
53729+ pax_open_kernel();
53730+ gr_status &= ~GR_READY;
53731+ pax_close_kernel();
53732+
53733+ return 0;
53734+}
53735+
53736+ssize_t
53737+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
53738+{
53739+ struct gr_arg_wrapper uwrap;
53740+ unsigned char *sprole_salt = NULL;
53741+ unsigned char *sprole_sum = NULL;
53742+ int error = sizeof (struct gr_arg_wrapper);
53743+ int error2 = 0;
53744+
53745+ mutex_lock(&gr_dev_mutex);
53746+
53747+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
53748+ error = -EPERM;
53749+ goto out;
53750+ }
53751+
53752+ if (count != sizeof (struct gr_arg_wrapper)) {
53753+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
53754+ error = -EINVAL;
53755+ goto out;
53756+ }
53757+
53758+
53759+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
53760+ gr_auth_expires = 0;
53761+ gr_auth_attempts = 0;
53762+ }
53763+
53764+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
53765+ error = -EFAULT;
53766+ goto out;
53767+ }
53768+
53769+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
53770+ error = -EINVAL;
53771+ goto out;
53772+ }
53773+
53774+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
53775+ error = -EFAULT;
53776+ goto out;
53777+ }
53778+
53779+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53780+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53781+ time_after(gr_auth_expires, get_seconds())) {
53782+ error = -EBUSY;
53783+ goto out;
53784+ }
53785+
53786+ /* if non-root trying to do anything other than use a special role,
53787+ do not attempt authentication, do not count towards authentication
53788+ locking
53789+ */
53790+
53791+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
53792+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53793+ current_uid()) {
53794+ error = -EPERM;
53795+ goto out;
53796+ }
53797+
53798+ /* ensure pw and special role name are null terminated */
53799+
53800+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
53801+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
53802+
53803+ /* Okay.
53804+ * We have our enough of the argument structure..(we have yet
53805+ * to copy_from_user the tables themselves) . Copy the tables
53806+ * only if we need them, i.e. for loading operations. */
53807+
53808+ switch (gr_usermode->mode) {
53809+ case GR_STATUS:
53810+ if (gr_status & GR_READY) {
53811+ error = 1;
53812+ if (!gr_check_secure_terminal(current))
53813+ error = 3;
53814+ } else
53815+ error = 2;
53816+ goto out;
53817+ case GR_SHUTDOWN:
53818+ if ((gr_status & GR_READY)
53819+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53820+ stop_machine(gr_rbac_disable, NULL, NULL);
53821+ free_variables();
53822+ memset(gr_usermode, 0, sizeof (struct gr_arg));
53823+ memset(gr_system_salt, 0, GR_SALT_LEN);
53824+ memset(gr_system_sum, 0, GR_SHA_LEN);
53825+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
53826+ } else if (gr_status & GR_READY) {
53827+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
53828+ error = -EPERM;
53829+ } else {
53830+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
53831+ error = -EAGAIN;
53832+ }
53833+ break;
53834+ case GR_ENABLE:
53835+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
53836+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
53837+ else {
53838+ if (gr_status & GR_READY)
53839+ error = -EAGAIN;
53840+ else
53841+ error = error2;
53842+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
53843+ }
53844+ break;
53845+ case GR_RELOAD:
53846+ if (!(gr_status & GR_READY)) {
53847+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
53848+ error = -EAGAIN;
53849+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53850+ stop_machine(gr_rbac_disable, NULL, NULL);
53851+ free_variables();
53852+ error2 = gracl_init(gr_usermode);
53853+ if (!error2)
53854+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
53855+ else {
53856+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53857+ error = error2;
53858+ }
53859+ } else {
53860+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53861+ error = -EPERM;
53862+ }
53863+ break;
53864+ case GR_SEGVMOD:
53865+ if (unlikely(!(gr_status & GR_READY))) {
53866+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
53867+ error = -EAGAIN;
53868+ break;
53869+ }
53870+
53871+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53872+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
53873+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
53874+ struct acl_subject_label *segvacl;
53875+ segvacl =
53876+ lookup_acl_subj_label(gr_usermode->segv_inode,
53877+ gr_usermode->segv_device,
53878+ current->role);
53879+ if (segvacl) {
53880+ segvacl->crashes = 0;
53881+ segvacl->expires = 0;
53882+ }
53883+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
53884+ gr_remove_uid(gr_usermode->segv_uid);
53885+ }
53886+ } else {
53887+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
53888+ error = -EPERM;
53889+ }
53890+ break;
53891+ case GR_SPROLE:
53892+ case GR_SPROLEPAM:
53893+ if (unlikely(!(gr_status & GR_READY))) {
53894+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
53895+ error = -EAGAIN;
53896+ break;
53897+ }
53898+
53899+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
53900+ current->role->expires = 0;
53901+ current->role->auth_attempts = 0;
53902+ }
53903+
53904+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53905+ time_after(current->role->expires, get_seconds())) {
53906+ error = -EBUSY;
53907+ goto out;
53908+ }
53909+
53910+ if (lookup_special_role_auth
53911+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
53912+ && ((!sprole_salt && !sprole_sum)
53913+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
53914+ char *p = "";
53915+ assign_special_role(gr_usermode->sp_role);
53916+ read_lock(&tasklist_lock);
53917+ if (current->real_parent)
53918+ p = current->real_parent->role->rolename;
53919+ read_unlock(&tasklist_lock);
53920+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
53921+ p, acl_sp_role_value);
53922+ } else {
53923+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
53924+ error = -EPERM;
53925+ if(!(current->role->auth_attempts++))
53926+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53927+
53928+ goto out;
53929+ }
53930+ break;
53931+ case GR_UNSPROLE:
53932+ if (unlikely(!(gr_status & GR_READY))) {
53933+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
53934+ error = -EAGAIN;
53935+ break;
53936+ }
53937+
53938+ if (current->role->roletype & GR_ROLE_SPECIAL) {
53939+ char *p = "";
53940+ int i = 0;
53941+
53942+ read_lock(&tasklist_lock);
53943+ if (current->real_parent) {
53944+ p = current->real_parent->role->rolename;
53945+ i = current->real_parent->acl_role_id;
53946+ }
53947+ read_unlock(&tasklist_lock);
53948+
53949+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
53950+ gr_set_acls(1);
53951+ } else {
53952+ error = -EPERM;
53953+ goto out;
53954+ }
53955+ break;
53956+ default:
53957+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
53958+ error = -EINVAL;
53959+ break;
53960+ }
53961+
53962+ if (error != -EPERM)
53963+ goto out;
53964+
53965+ if(!(gr_auth_attempts++))
53966+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53967+
53968+ out:
53969+ mutex_unlock(&gr_dev_mutex);
53970+ return error;
53971+}
53972+
53973+/* must be called with
53974+ rcu_read_lock();
53975+ read_lock(&tasklist_lock);
53976+ read_lock(&grsec_exec_file_lock);
53977+*/
53978+int gr_apply_subject_to_task(struct task_struct *task)
53979+{
53980+ struct acl_object_label *obj;
53981+ char *tmpname;
53982+ struct acl_subject_label *tmpsubj;
53983+ struct file *filp;
53984+ struct name_entry *nmatch;
53985+
53986+ filp = task->exec_file;
53987+ if (filp == NULL)
53988+ return 0;
53989+
53990+ /* the following is to apply the correct subject
53991+ on binaries running when the RBAC system
53992+ is enabled, when the binaries have been
53993+ replaced or deleted since their execution
53994+ -----
53995+ when the RBAC system starts, the inode/dev
53996+ from exec_file will be one the RBAC system
53997+ is unaware of. It only knows the inode/dev
53998+ of the present file on disk, or the absence
53999+ of it.
54000+ */
54001+ preempt_disable();
54002+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
54003+
54004+ nmatch = lookup_name_entry(tmpname);
54005+ preempt_enable();
54006+ tmpsubj = NULL;
54007+ if (nmatch) {
54008+ if (nmatch->deleted)
54009+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
54010+ else
54011+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
54012+ if (tmpsubj != NULL)
54013+ task->acl = tmpsubj;
54014+ }
54015+ if (tmpsubj == NULL)
54016+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
54017+ task->role);
54018+ if (task->acl) {
54019+ task->is_writable = 0;
54020+ /* ignore additional mmap checks for processes that are writable
54021+ by the default ACL */
54022+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54023+ if (unlikely(obj->mode & GR_WRITE))
54024+ task->is_writable = 1;
54025+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
54026+ if (unlikely(obj->mode & GR_WRITE))
54027+ task->is_writable = 1;
54028+
54029+ gr_set_proc_res(task);
54030+
54031+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54032+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
54033+#endif
54034+ } else {
54035+ return 1;
54036+ }
54037+
54038+ return 0;
54039+}
54040+
54041+int
54042+gr_set_acls(const int type)
54043+{
54044+ struct task_struct *task, *task2;
54045+ struct acl_role_label *role = current->role;
54046+ __u16 acl_role_id = current->acl_role_id;
54047+ const struct cred *cred;
54048+ int ret;
54049+
54050+ rcu_read_lock();
54051+ read_lock(&tasklist_lock);
54052+ read_lock(&grsec_exec_file_lock);
54053+ do_each_thread(task2, task) {
54054+ /* check to see if we're called from the exit handler,
54055+ if so, only replace ACLs that have inherited the admin
54056+ ACL */
54057+
54058+ if (type && (task->role != role ||
54059+ task->acl_role_id != acl_role_id))
54060+ continue;
54061+
54062+ task->acl_role_id = 0;
54063+ task->acl_sp_role = 0;
54064+
54065+ if (task->exec_file) {
54066+ cred = __task_cred(task);
54067+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
54068+ ret = gr_apply_subject_to_task(task);
54069+ if (ret) {
54070+ read_unlock(&grsec_exec_file_lock);
54071+ read_unlock(&tasklist_lock);
54072+ rcu_read_unlock();
54073+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
54074+ return ret;
54075+ }
54076+ } else {
54077+ // it's a kernel process
54078+ task->role = kernel_role;
54079+ task->acl = kernel_role->root_label;
54080+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
54081+ task->acl->mode &= ~GR_PROCFIND;
54082+#endif
54083+ }
54084+ } while_each_thread(task2, task);
54085+ read_unlock(&grsec_exec_file_lock);
54086+ read_unlock(&tasklist_lock);
54087+ rcu_read_unlock();
54088+
54089+ return 0;
54090+}
54091+
54092+void
54093+gr_learn_resource(const struct task_struct *task,
54094+ const int res, const unsigned long wanted, const int gt)
54095+{
54096+ struct acl_subject_label *acl;
54097+ const struct cred *cred;
54098+
54099+ if (unlikely((gr_status & GR_READY) &&
54100+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
54101+ goto skip_reslog;
54102+
54103+#ifdef CONFIG_GRKERNSEC_RESLOG
54104+ gr_log_resource(task, res, wanted, gt);
54105+#endif
54106+ skip_reslog:
54107+
54108+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
54109+ return;
54110+
54111+ acl = task->acl;
54112+
54113+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
54114+ !(acl->resmask & (1 << (unsigned short) res))))
54115+ return;
54116+
54117+ if (wanted >= acl->res[res].rlim_cur) {
54118+ unsigned long res_add;
54119+
54120+ res_add = wanted;
54121+ switch (res) {
54122+ case RLIMIT_CPU:
54123+ res_add += GR_RLIM_CPU_BUMP;
54124+ break;
54125+ case RLIMIT_FSIZE:
54126+ res_add += GR_RLIM_FSIZE_BUMP;
54127+ break;
54128+ case RLIMIT_DATA:
54129+ res_add += GR_RLIM_DATA_BUMP;
54130+ break;
54131+ case RLIMIT_STACK:
54132+ res_add += GR_RLIM_STACK_BUMP;
54133+ break;
54134+ case RLIMIT_CORE:
54135+ res_add += GR_RLIM_CORE_BUMP;
54136+ break;
54137+ case RLIMIT_RSS:
54138+ res_add += GR_RLIM_RSS_BUMP;
54139+ break;
54140+ case RLIMIT_NPROC:
54141+ res_add += GR_RLIM_NPROC_BUMP;
54142+ break;
54143+ case RLIMIT_NOFILE:
54144+ res_add += GR_RLIM_NOFILE_BUMP;
54145+ break;
54146+ case RLIMIT_MEMLOCK:
54147+ res_add += GR_RLIM_MEMLOCK_BUMP;
54148+ break;
54149+ case RLIMIT_AS:
54150+ res_add += GR_RLIM_AS_BUMP;
54151+ break;
54152+ case RLIMIT_LOCKS:
54153+ res_add += GR_RLIM_LOCKS_BUMP;
54154+ break;
54155+ case RLIMIT_SIGPENDING:
54156+ res_add += GR_RLIM_SIGPENDING_BUMP;
54157+ break;
54158+ case RLIMIT_MSGQUEUE:
54159+ res_add += GR_RLIM_MSGQUEUE_BUMP;
54160+ break;
54161+ case RLIMIT_NICE:
54162+ res_add += GR_RLIM_NICE_BUMP;
54163+ break;
54164+ case RLIMIT_RTPRIO:
54165+ res_add += GR_RLIM_RTPRIO_BUMP;
54166+ break;
54167+ case RLIMIT_RTTIME:
54168+ res_add += GR_RLIM_RTTIME_BUMP;
54169+ break;
54170+ }
54171+
54172+ acl->res[res].rlim_cur = res_add;
54173+
54174+ if (wanted > acl->res[res].rlim_max)
54175+ acl->res[res].rlim_max = res_add;
54176+
54177+ /* only log the subject filename, since resource logging is supported for
54178+ single-subject learning only */
54179+ rcu_read_lock();
54180+ cred = __task_cred(task);
54181+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54182+ task->role->roletype, cred->uid, cred->gid, acl->filename,
54183+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
54184+ "", (unsigned long) res, &task->signal->saved_ip);
54185+ rcu_read_unlock();
54186+ }
54187+
54188+ return;
54189+}
54190+
54191+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
54192+void
54193+pax_set_initial_flags(struct linux_binprm *bprm)
54194+{
54195+ struct task_struct *task = current;
54196+ struct acl_subject_label *proc;
54197+ unsigned long flags;
54198+
54199+ if (unlikely(!(gr_status & GR_READY)))
54200+ return;
54201+
54202+ flags = pax_get_flags(task);
54203+
54204+ proc = task->acl;
54205+
54206+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
54207+ flags &= ~MF_PAX_PAGEEXEC;
54208+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
54209+ flags &= ~MF_PAX_SEGMEXEC;
54210+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
54211+ flags &= ~MF_PAX_RANDMMAP;
54212+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
54213+ flags &= ~MF_PAX_EMUTRAMP;
54214+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
54215+ flags &= ~MF_PAX_MPROTECT;
54216+
54217+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
54218+ flags |= MF_PAX_PAGEEXEC;
54219+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
54220+ flags |= MF_PAX_SEGMEXEC;
54221+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
54222+ flags |= MF_PAX_RANDMMAP;
54223+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
54224+ flags |= MF_PAX_EMUTRAMP;
54225+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
54226+ flags |= MF_PAX_MPROTECT;
54227+
54228+ pax_set_flags(task, flags);
54229+
54230+ return;
54231+}
54232+#endif
54233+
54234+int
54235+gr_handle_proc_ptrace(struct task_struct *task)
54236+{
54237+ struct file *filp;
54238+ struct task_struct *tmp = task;
54239+ struct task_struct *curtemp = current;
54240+ __u32 retmode;
54241+
54242+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54243+ if (unlikely(!(gr_status & GR_READY)))
54244+ return 0;
54245+#endif
54246+
54247+ read_lock(&tasklist_lock);
54248+ read_lock(&grsec_exec_file_lock);
54249+ filp = task->exec_file;
54250+
54251+ while (tmp->pid > 0) {
54252+ if (tmp == curtemp)
54253+ break;
54254+ tmp = tmp->real_parent;
54255+ }
54256+
54257+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54258+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
54259+ read_unlock(&grsec_exec_file_lock);
54260+ read_unlock(&tasklist_lock);
54261+ return 1;
54262+ }
54263+
54264+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54265+ if (!(gr_status & GR_READY)) {
54266+ read_unlock(&grsec_exec_file_lock);
54267+ read_unlock(&tasklist_lock);
54268+ return 0;
54269+ }
54270+#endif
54271+
54272+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
54273+ read_unlock(&grsec_exec_file_lock);
54274+ read_unlock(&tasklist_lock);
54275+
54276+ if (retmode & GR_NOPTRACE)
54277+ return 1;
54278+
54279+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
54280+ && (current->acl != task->acl || (current->acl != current->role->root_label
54281+ && current->pid != task->pid)))
54282+ return 1;
54283+
54284+ return 0;
54285+}
54286+
54287+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
54288+{
54289+ if (unlikely(!(gr_status & GR_READY)))
54290+ return;
54291+
54292+ if (!(current->role->roletype & GR_ROLE_GOD))
54293+ return;
54294+
54295+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
54296+ p->role->rolename, gr_task_roletype_to_char(p),
54297+ p->acl->filename);
54298+}
54299+
54300+int
54301+gr_handle_ptrace(struct task_struct *task, const long request)
54302+{
54303+ struct task_struct *tmp = task;
54304+ struct task_struct *curtemp = current;
54305+ __u32 retmode;
54306+
54307+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54308+ if (unlikely(!(gr_status & GR_READY)))
54309+ return 0;
54310+#endif
54311+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
54312+ read_lock(&tasklist_lock);
54313+ while (tmp->pid > 0) {
54314+ if (tmp == curtemp)
54315+ break;
54316+ tmp = tmp->real_parent;
54317+ }
54318+
54319+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54320+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
54321+ read_unlock(&tasklist_lock);
54322+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54323+ return 1;
54324+ }
54325+ read_unlock(&tasklist_lock);
54326+ }
54327+
54328+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54329+ if (!(gr_status & GR_READY))
54330+ return 0;
54331+#endif
54332+
54333+ read_lock(&grsec_exec_file_lock);
54334+ if (unlikely(!task->exec_file)) {
54335+ read_unlock(&grsec_exec_file_lock);
54336+ return 0;
54337+ }
54338+
54339+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
54340+ read_unlock(&grsec_exec_file_lock);
54341+
54342+ if (retmode & GR_NOPTRACE) {
54343+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54344+ return 1;
54345+ }
54346+
54347+ if (retmode & GR_PTRACERD) {
54348+ switch (request) {
54349+ case PTRACE_SEIZE:
54350+ case PTRACE_POKETEXT:
54351+ case PTRACE_POKEDATA:
54352+ case PTRACE_POKEUSR:
54353+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
54354+ case PTRACE_SETREGS:
54355+ case PTRACE_SETFPREGS:
54356+#endif
54357+#ifdef CONFIG_X86
54358+ case PTRACE_SETFPXREGS:
54359+#endif
54360+#ifdef CONFIG_ALTIVEC
54361+ case PTRACE_SETVRREGS:
54362+#endif
54363+ return 1;
54364+ default:
54365+ return 0;
54366+ }
54367+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
54368+ !(current->role->roletype & GR_ROLE_GOD) &&
54369+ (current->acl != task->acl)) {
54370+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54371+ return 1;
54372+ }
54373+
54374+ return 0;
54375+}
54376+
54377+static int is_writable_mmap(const struct file *filp)
54378+{
54379+ struct task_struct *task = current;
54380+ struct acl_object_label *obj, *obj2;
54381+
54382+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
54383+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
54384+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54385+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
54386+ task->role->root_label);
54387+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
54388+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
54389+ return 1;
54390+ }
54391+ }
54392+ return 0;
54393+}
54394+
54395+int
54396+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
54397+{
54398+ __u32 mode;
54399+
54400+ if (unlikely(!file || !(prot & PROT_EXEC)))
54401+ return 1;
54402+
54403+ if (is_writable_mmap(file))
54404+ return 0;
54405+
54406+ mode =
54407+ gr_search_file(file->f_path.dentry,
54408+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54409+ file->f_path.mnt);
54410+
54411+ if (!gr_tpe_allow(file))
54412+ return 0;
54413+
54414+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54415+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54416+ return 0;
54417+ } else if (unlikely(!(mode & GR_EXEC))) {
54418+ return 0;
54419+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54420+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54421+ return 1;
54422+ }
54423+
54424+ return 1;
54425+}
54426+
54427+int
54428+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54429+{
54430+ __u32 mode;
54431+
54432+ if (unlikely(!file || !(prot & PROT_EXEC)))
54433+ return 1;
54434+
54435+ if (is_writable_mmap(file))
54436+ return 0;
54437+
54438+ mode =
54439+ gr_search_file(file->f_path.dentry,
54440+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54441+ file->f_path.mnt);
54442+
54443+ if (!gr_tpe_allow(file))
54444+ return 0;
54445+
54446+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54447+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54448+ return 0;
54449+ } else if (unlikely(!(mode & GR_EXEC))) {
54450+ return 0;
54451+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54452+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54453+ return 1;
54454+ }
54455+
54456+ return 1;
54457+}
54458+
54459+void
54460+gr_acl_handle_psacct(struct task_struct *task, const long code)
54461+{
54462+ unsigned long runtime;
54463+ unsigned long cputime;
54464+ unsigned int wday, cday;
54465+ __u8 whr, chr;
54466+ __u8 wmin, cmin;
54467+ __u8 wsec, csec;
54468+ struct timespec timeval;
54469+
54470+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
54471+ !(task->acl->mode & GR_PROCACCT)))
54472+ return;
54473+
54474+ do_posix_clock_monotonic_gettime(&timeval);
54475+ runtime = timeval.tv_sec - task->start_time.tv_sec;
54476+ wday = runtime / (3600 * 24);
54477+ runtime -= wday * (3600 * 24);
54478+ whr = runtime / 3600;
54479+ runtime -= whr * 3600;
54480+ wmin = runtime / 60;
54481+ runtime -= wmin * 60;
54482+ wsec = runtime;
54483+
54484+ cputime = (task->utime + task->stime) / HZ;
54485+ cday = cputime / (3600 * 24);
54486+ cputime -= cday * (3600 * 24);
54487+ chr = cputime / 3600;
54488+ cputime -= chr * 3600;
54489+ cmin = cputime / 60;
54490+ cputime -= cmin * 60;
54491+ csec = cputime;
54492+
54493+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
54494+
54495+ return;
54496+}
54497+
54498+void gr_set_kernel_label(struct task_struct *task)
54499+{
54500+ if (gr_status & GR_READY) {
54501+ task->role = kernel_role;
54502+ task->acl = kernel_role->root_label;
54503+ }
54504+ return;
54505+}
54506+
54507+#ifdef CONFIG_TASKSTATS
54508+int gr_is_taskstats_denied(int pid)
54509+{
54510+ struct task_struct *task;
54511+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54512+ const struct cred *cred;
54513+#endif
54514+ int ret = 0;
54515+
54516+ /* restrict taskstats viewing to un-chrooted root users
54517+ who have the 'view' subject flag if the RBAC system is enabled
54518+ */
54519+
54520+ rcu_read_lock();
54521+ read_lock(&tasklist_lock);
54522+ task = find_task_by_vpid(pid);
54523+ if (task) {
54524+#ifdef CONFIG_GRKERNSEC_CHROOT
54525+ if (proc_is_chrooted(task))
54526+ ret = -EACCES;
54527+#endif
54528+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54529+ cred = __task_cred(task);
54530+#ifdef CONFIG_GRKERNSEC_PROC_USER
54531+ if (cred->uid != 0)
54532+ ret = -EACCES;
54533+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54534+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
54535+ ret = -EACCES;
54536+#endif
54537+#endif
54538+ if (gr_status & GR_READY) {
54539+ if (!(task->acl->mode & GR_VIEW))
54540+ ret = -EACCES;
54541+ }
54542+ } else
54543+ ret = -ENOENT;
54544+
54545+ read_unlock(&tasklist_lock);
54546+ rcu_read_unlock();
54547+
54548+ return ret;
54549+}
54550+#endif
54551+
54552+/* AUXV entries are filled via a descendant of search_binary_handler
54553+ after we've already applied the subject for the target
54554+*/
54555+int gr_acl_enable_at_secure(void)
54556+{
54557+ if (unlikely(!(gr_status & GR_READY)))
54558+ return 0;
54559+
54560+ if (current->acl->mode & GR_ATSECURE)
54561+ return 1;
54562+
54563+ return 0;
54564+}
54565+
54566+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
54567+{
54568+ struct task_struct *task = current;
54569+ struct dentry *dentry = file->f_path.dentry;
54570+ struct vfsmount *mnt = file->f_path.mnt;
54571+ struct acl_object_label *obj, *tmp;
54572+ struct acl_subject_label *subj;
54573+ unsigned int bufsize;
54574+ int is_not_root;
54575+ char *path;
54576+ dev_t dev = __get_dev(dentry);
54577+
54578+ if (unlikely(!(gr_status & GR_READY)))
54579+ return 1;
54580+
54581+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
54582+ return 1;
54583+
54584+ /* ignore Eric Biederman */
54585+ if (IS_PRIVATE(dentry->d_inode))
54586+ return 1;
54587+
54588+ subj = task->acl;
54589+ read_lock(&gr_inode_lock);
54590+ do {
54591+ obj = lookup_acl_obj_label(ino, dev, subj);
54592+ if (obj != NULL) {
54593+ read_unlock(&gr_inode_lock);
54594+ return (obj->mode & GR_FIND) ? 1 : 0;
54595+ }
54596+ } while ((subj = subj->parent_subject));
54597+ read_unlock(&gr_inode_lock);
54598+
54599+ /* this is purely an optimization since we're looking for an object
54600+ for the directory we're doing a readdir on
54601+ if it's possible for any globbed object to match the entry we're
54602+ filling into the directory, then the object we find here will be
54603+ an anchor point with attached globbed objects
54604+ */
54605+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
54606+ if (obj->globbed == NULL)
54607+ return (obj->mode & GR_FIND) ? 1 : 0;
54608+
54609+ is_not_root = ((obj->filename[0] == '/') &&
54610+ (obj->filename[1] == '\0')) ? 0 : 1;
54611+ bufsize = PAGE_SIZE - namelen - is_not_root;
54612+
54613+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
54614+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
54615+ return 1;
54616+
54617+ preempt_disable();
54618+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
54619+ bufsize);
54620+
54621+ bufsize = strlen(path);
54622+
54623+ /* if base is "/", don't append an additional slash */
54624+ if (is_not_root)
54625+ *(path + bufsize) = '/';
54626+ memcpy(path + bufsize + is_not_root, name, namelen);
54627+ *(path + bufsize + namelen + is_not_root) = '\0';
54628+
54629+ tmp = obj->globbed;
54630+ while (tmp) {
54631+ if (!glob_match(tmp->filename, path)) {
54632+ preempt_enable();
54633+ return (tmp->mode & GR_FIND) ? 1 : 0;
54634+ }
54635+ tmp = tmp->next;
54636+ }
54637+ preempt_enable();
54638+ return (obj->mode & GR_FIND) ? 1 : 0;
54639+}
54640+
54641+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
54642+EXPORT_SYMBOL(gr_acl_is_enabled);
54643+#endif
54644+EXPORT_SYMBOL(gr_learn_resource);
54645+EXPORT_SYMBOL(gr_set_kernel_label);
54646+#ifdef CONFIG_SECURITY
54647+EXPORT_SYMBOL(gr_check_user_change);
54648+EXPORT_SYMBOL(gr_check_group_change);
54649+#endif
54650+
54651diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
54652new file mode 100644
54653index 0000000..34fefda
54654--- /dev/null
54655+++ b/grsecurity/gracl_alloc.c
54656@@ -0,0 +1,105 @@
54657+#include <linux/kernel.h>
54658+#include <linux/mm.h>
54659+#include <linux/slab.h>
54660+#include <linux/vmalloc.h>
54661+#include <linux/gracl.h>
54662+#include <linux/grsecurity.h>
54663+
54664+static unsigned long alloc_stack_next = 1;
54665+static unsigned long alloc_stack_size = 1;
54666+static void **alloc_stack;
54667+
54668+static __inline__ int
54669+alloc_pop(void)
54670+{
54671+ if (alloc_stack_next == 1)
54672+ return 0;
54673+
54674+ kfree(alloc_stack[alloc_stack_next - 2]);
54675+
54676+ alloc_stack_next--;
54677+
54678+ return 1;
54679+}
54680+
54681+static __inline__ int
54682+alloc_push(void *buf)
54683+{
54684+ if (alloc_stack_next >= alloc_stack_size)
54685+ return 1;
54686+
54687+ alloc_stack[alloc_stack_next - 1] = buf;
54688+
54689+ alloc_stack_next++;
54690+
54691+ return 0;
54692+}
54693+
54694+void *
54695+acl_alloc(unsigned long len)
54696+{
54697+ void *ret = NULL;
54698+
54699+ if (!len || len > PAGE_SIZE)
54700+ goto out;
54701+
54702+ ret = kmalloc(len, GFP_KERNEL);
54703+
54704+ if (ret) {
54705+ if (alloc_push(ret)) {
54706+ kfree(ret);
54707+ ret = NULL;
54708+ }
54709+ }
54710+
54711+out:
54712+ return ret;
54713+}
54714+
54715+void *
54716+acl_alloc_num(unsigned long num, unsigned long len)
54717+{
54718+ if (!len || (num > (PAGE_SIZE / len)))
54719+ return NULL;
54720+
54721+ return acl_alloc(num * len);
54722+}
54723+
54724+void
54725+acl_free_all(void)
54726+{
54727+ if (gr_acl_is_enabled() || !alloc_stack)
54728+ return;
54729+
54730+ while (alloc_pop()) ;
54731+
54732+ if (alloc_stack) {
54733+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
54734+ kfree(alloc_stack);
54735+ else
54736+ vfree(alloc_stack);
54737+ }
54738+
54739+ alloc_stack = NULL;
54740+ alloc_stack_size = 1;
54741+ alloc_stack_next = 1;
54742+
54743+ return;
54744+}
54745+
54746+int
54747+acl_alloc_stack_init(unsigned long size)
54748+{
54749+ if ((size * sizeof (void *)) <= PAGE_SIZE)
54750+ alloc_stack =
54751+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
54752+ else
54753+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
54754+
54755+ alloc_stack_size = size;
54756+
54757+ if (!alloc_stack)
54758+ return 0;
54759+ else
54760+ return 1;
54761+}
54762diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
54763new file mode 100644
54764index 0000000..6d21049
54765--- /dev/null
54766+++ b/grsecurity/gracl_cap.c
54767@@ -0,0 +1,110 @@
54768+#include <linux/kernel.h>
54769+#include <linux/module.h>
54770+#include <linux/sched.h>
54771+#include <linux/gracl.h>
54772+#include <linux/grsecurity.h>
54773+#include <linux/grinternal.h>
54774+
54775+extern const char *captab_log[];
54776+extern int captab_log_entries;
54777+
54778+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
54779+{
54780+ struct acl_subject_label *curracl;
54781+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54782+ kernel_cap_t cap_audit = __cap_empty_set;
54783+
54784+ if (!gr_acl_is_enabled())
54785+ return 1;
54786+
54787+ curracl = task->acl;
54788+
54789+ cap_drop = curracl->cap_lower;
54790+ cap_mask = curracl->cap_mask;
54791+ cap_audit = curracl->cap_invert_audit;
54792+
54793+ while ((curracl = curracl->parent_subject)) {
54794+ /* if the cap isn't specified in the current computed mask but is specified in the
54795+ current level subject, and is lowered in the current level subject, then add
54796+ it to the set of dropped capabilities
54797+ otherwise, add the current level subject's mask to the current computed mask
54798+ */
54799+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54800+ cap_raise(cap_mask, cap);
54801+ if (cap_raised(curracl->cap_lower, cap))
54802+ cap_raise(cap_drop, cap);
54803+ if (cap_raised(curracl->cap_invert_audit, cap))
54804+ cap_raise(cap_audit, cap);
54805+ }
54806+ }
54807+
54808+ if (!cap_raised(cap_drop, cap)) {
54809+ if (cap_raised(cap_audit, cap))
54810+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
54811+ return 1;
54812+ }
54813+
54814+ curracl = task->acl;
54815+
54816+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
54817+ && cap_raised(cred->cap_effective, cap)) {
54818+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54819+ task->role->roletype, cred->uid,
54820+ cred->gid, task->exec_file ?
54821+ gr_to_filename(task->exec_file->f_path.dentry,
54822+ task->exec_file->f_path.mnt) : curracl->filename,
54823+ curracl->filename, 0UL,
54824+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
54825+ return 1;
54826+ }
54827+
54828+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
54829+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
54830+
54831+ return 0;
54832+}
54833+
54834+int
54835+gr_acl_is_capable(const int cap)
54836+{
54837+ return gr_task_acl_is_capable(current, current_cred(), cap);
54838+}
54839+
54840+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
54841+{
54842+ struct acl_subject_label *curracl;
54843+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54844+
54845+ if (!gr_acl_is_enabled())
54846+ return 1;
54847+
54848+ curracl = task->acl;
54849+
54850+ cap_drop = curracl->cap_lower;
54851+ cap_mask = curracl->cap_mask;
54852+
54853+ while ((curracl = curracl->parent_subject)) {
54854+ /* if the cap isn't specified in the current computed mask but is specified in the
54855+ current level subject, and is lowered in the current level subject, then add
54856+ it to the set of dropped capabilities
54857+ otherwise, add the current level subject's mask to the current computed mask
54858+ */
54859+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54860+ cap_raise(cap_mask, cap);
54861+ if (cap_raised(curracl->cap_lower, cap))
54862+ cap_raise(cap_drop, cap);
54863+ }
54864+ }
54865+
54866+ if (!cap_raised(cap_drop, cap))
54867+ return 1;
54868+
54869+ return 0;
54870+}
54871+
54872+int
54873+gr_acl_is_capable_nolog(const int cap)
54874+{
54875+ return gr_task_acl_is_capable_nolog(current, cap);
54876+}
54877+
54878diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
54879new file mode 100644
54880index 0000000..88d0e87
54881--- /dev/null
54882+++ b/grsecurity/gracl_fs.c
54883@@ -0,0 +1,435 @@
54884+#include <linux/kernel.h>
54885+#include <linux/sched.h>
54886+#include <linux/types.h>
54887+#include <linux/fs.h>
54888+#include <linux/file.h>
54889+#include <linux/stat.h>
54890+#include <linux/grsecurity.h>
54891+#include <linux/grinternal.h>
54892+#include <linux/gracl.h>
54893+
54894+umode_t
54895+gr_acl_umask(void)
54896+{
54897+ if (unlikely(!gr_acl_is_enabled()))
54898+ return 0;
54899+
54900+ return current->role->umask;
54901+}
54902+
54903+__u32
54904+gr_acl_handle_hidden_file(const struct dentry * dentry,
54905+ const struct vfsmount * mnt)
54906+{
54907+ __u32 mode;
54908+
54909+ if (unlikely(!dentry->d_inode))
54910+ return GR_FIND;
54911+
54912+ mode =
54913+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
54914+
54915+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
54916+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54917+ return mode;
54918+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
54919+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54920+ return 0;
54921+ } else if (unlikely(!(mode & GR_FIND)))
54922+ return 0;
54923+
54924+ return GR_FIND;
54925+}
54926+
54927+__u32
54928+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54929+ int acc_mode)
54930+{
54931+ __u32 reqmode = GR_FIND;
54932+ __u32 mode;
54933+
54934+ if (unlikely(!dentry->d_inode))
54935+ return reqmode;
54936+
54937+ if (acc_mode & MAY_APPEND)
54938+ reqmode |= GR_APPEND;
54939+ else if (acc_mode & MAY_WRITE)
54940+ reqmode |= GR_WRITE;
54941+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
54942+ reqmode |= GR_READ;
54943+
54944+ mode =
54945+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54946+ mnt);
54947+
54948+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54949+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54950+ reqmode & GR_READ ? " reading" : "",
54951+ reqmode & GR_WRITE ? " writing" : reqmode &
54952+ GR_APPEND ? " appending" : "");
54953+ return reqmode;
54954+ } else
54955+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54956+ {
54957+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54958+ reqmode & GR_READ ? " reading" : "",
54959+ reqmode & GR_WRITE ? " writing" : reqmode &
54960+ GR_APPEND ? " appending" : "");
54961+ return 0;
54962+ } else if (unlikely((mode & reqmode) != reqmode))
54963+ return 0;
54964+
54965+ return reqmode;
54966+}
54967+
54968+__u32
54969+gr_acl_handle_creat(const struct dentry * dentry,
54970+ const struct dentry * p_dentry,
54971+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54972+ const int imode)
54973+{
54974+ __u32 reqmode = GR_WRITE | GR_CREATE;
54975+ __u32 mode;
54976+
54977+ if (acc_mode & MAY_APPEND)
54978+ reqmode |= GR_APPEND;
54979+ // if a directory was required or the directory already exists, then
54980+ // don't count this open as a read
54981+ if ((acc_mode & MAY_READ) &&
54982+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
54983+ reqmode |= GR_READ;
54984+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
54985+ reqmode |= GR_SETID;
54986+
54987+ mode =
54988+ gr_check_create(dentry, p_dentry, p_mnt,
54989+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54990+
54991+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54992+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54993+ reqmode & GR_READ ? " reading" : "",
54994+ reqmode & GR_WRITE ? " writing" : reqmode &
54995+ GR_APPEND ? " appending" : "");
54996+ return reqmode;
54997+ } else
54998+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54999+ {
55000+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55001+ reqmode & GR_READ ? " reading" : "",
55002+ reqmode & GR_WRITE ? " writing" : reqmode &
55003+ GR_APPEND ? " appending" : "");
55004+ return 0;
55005+ } else if (unlikely((mode & reqmode) != reqmode))
55006+ return 0;
55007+
55008+ return reqmode;
55009+}
55010+
55011+__u32
55012+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
55013+ const int fmode)
55014+{
55015+ __u32 mode, reqmode = GR_FIND;
55016+
55017+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
55018+ reqmode |= GR_EXEC;
55019+ if (fmode & S_IWOTH)
55020+ reqmode |= GR_WRITE;
55021+ if (fmode & S_IROTH)
55022+ reqmode |= GR_READ;
55023+
55024+ mode =
55025+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55026+ mnt);
55027+
55028+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55029+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55030+ reqmode & GR_READ ? " reading" : "",
55031+ reqmode & GR_WRITE ? " writing" : "",
55032+ reqmode & GR_EXEC ? " executing" : "");
55033+ return reqmode;
55034+ } else
55035+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55036+ {
55037+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55038+ reqmode & GR_READ ? " reading" : "",
55039+ reqmode & GR_WRITE ? " writing" : "",
55040+ reqmode & GR_EXEC ? " executing" : "");
55041+ return 0;
55042+ } else if (unlikely((mode & reqmode) != reqmode))
55043+ return 0;
55044+
55045+ return reqmode;
55046+}
55047+
55048+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
55049+{
55050+ __u32 mode;
55051+
55052+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
55053+
55054+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55055+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
55056+ return mode;
55057+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55058+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
55059+ return 0;
55060+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
55061+ return 0;
55062+
55063+ return (reqmode);
55064+}
55065+
55066+__u32
55067+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
55068+{
55069+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
55070+}
55071+
55072+__u32
55073+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
55074+{
55075+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
55076+}
55077+
55078+__u32
55079+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
55080+{
55081+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
55082+}
55083+
55084+__u32
55085+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
55086+{
55087+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
55088+}
55089+
55090+__u32
55091+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
55092+ umode_t *modeptr)
55093+{
55094+ umode_t mode;
55095+
55096+ *modeptr &= ~gr_acl_umask();
55097+ mode = *modeptr;
55098+
55099+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
55100+ return 1;
55101+
55102+ if (unlikely(mode & (S_ISUID | S_ISGID))) {
55103+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
55104+ GR_CHMOD_ACL_MSG);
55105+ } else {
55106+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
55107+ }
55108+}
55109+
55110+__u32
55111+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
55112+{
55113+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
55114+}
55115+
55116+__u32
55117+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
55118+{
55119+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
55120+}
55121+
55122+__u32
55123+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
55124+{
55125+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
55126+}
55127+
55128+__u32
55129+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
55130+{
55131+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
55132+ GR_UNIXCONNECT_ACL_MSG);
55133+}
55134+
55135+/* hardlinks require at minimum create and link permission,
55136+ any additional privilege required is based on the
55137+ privilege of the file being linked to
55138+*/
55139+__u32
55140+gr_acl_handle_link(const struct dentry * new_dentry,
55141+ const struct dentry * parent_dentry,
55142+ const struct vfsmount * parent_mnt,
55143+ const struct dentry * old_dentry,
55144+ const struct vfsmount * old_mnt, const char *to)
55145+{
55146+ __u32 mode;
55147+ __u32 needmode = GR_CREATE | GR_LINK;
55148+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
55149+
55150+ mode =
55151+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
55152+ old_mnt);
55153+
55154+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
55155+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55156+ return mode;
55157+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55158+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55159+ return 0;
55160+ } else if (unlikely((mode & needmode) != needmode))
55161+ return 0;
55162+
55163+ return 1;
55164+}
55165+
55166+__u32
55167+gr_acl_handle_symlink(const struct dentry * new_dentry,
55168+ const struct dentry * parent_dentry,
55169+ const struct vfsmount * parent_mnt, const char *from)
55170+{
55171+ __u32 needmode = GR_WRITE | GR_CREATE;
55172+ __u32 mode;
55173+
55174+ mode =
55175+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
55176+ GR_CREATE | GR_AUDIT_CREATE |
55177+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
55178+
55179+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
55180+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55181+ return mode;
55182+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55183+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55184+ return 0;
55185+ } else if (unlikely((mode & needmode) != needmode))
55186+ return 0;
55187+
55188+ return (GR_WRITE | GR_CREATE);
55189+}
55190+
55191+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
55192+{
55193+ __u32 mode;
55194+
55195+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55196+
55197+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55198+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
55199+ return mode;
55200+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55201+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
55202+ return 0;
55203+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
55204+ return 0;
55205+
55206+ return (reqmode);
55207+}
55208+
55209+__u32
55210+gr_acl_handle_mknod(const struct dentry * new_dentry,
55211+ const struct dentry * parent_dentry,
55212+ const struct vfsmount * parent_mnt,
55213+ const int mode)
55214+{
55215+ __u32 reqmode = GR_WRITE | GR_CREATE;
55216+ if (unlikely(mode & (S_ISUID | S_ISGID)))
55217+ reqmode |= GR_SETID;
55218+
55219+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55220+ reqmode, GR_MKNOD_ACL_MSG);
55221+}
55222+
55223+__u32
55224+gr_acl_handle_mkdir(const struct dentry *new_dentry,
55225+ const struct dentry *parent_dentry,
55226+ const struct vfsmount *parent_mnt)
55227+{
55228+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55229+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
55230+}
55231+
55232+#define RENAME_CHECK_SUCCESS(old, new) \
55233+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
55234+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
55235+
55236+int
55237+gr_acl_handle_rename(struct dentry *new_dentry,
55238+ struct dentry *parent_dentry,
55239+ const struct vfsmount *parent_mnt,
55240+ struct dentry *old_dentry,
55241+ struct inode *old_parent_inode,
55242+ struct vfsmount *old_mnt, const char *newname)
55243+{
55244+ __u32 comp1, comp2;
55245+ int error = 0;
55246+
55247+ if (unlikely(!gr_acl_is_enabled()))
55248+ return 0;
55249+
55250+ if (!new_dentry->d_inode) {
55251+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
55252+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
55253+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
55254+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
55255+ GR_DELETE | GR_AUDIT_DELETE |
55256+ GR_AUDIT_READ | GR_AUDIT_WRITE |
55257+ GR_SUPPRESS, old_mnt);
55258+ } else {
55259+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
55260+ GR_CREATE | GR_DELETE |
55261+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
55262+ GR_AUDIT_READ | GR_AUDIT_WRITE |
55263+ GR_SUPPRESS, parent_mnt);
55264+ comp2 =
55265+ gr_search_file(old_dentry,
55266+ GR_READ | GR_WRITE | GR_AUDIT_READ |
55267+ GR_DELETE | GR_AUDIT_DELETE |
55268+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
55269+ }
55270+
55271+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
55272+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
55273+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55274+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
55275+ && !(comp2 & GR_SUPPRESS)) {
55276+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55277+ error = -EACCES;
55278+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
55279+ error = -EACCES;
55280+
55281+ return error;
55282+}
55283+
55284+void
55285+gr_acl_handle_exit(void)
55286+{
55287+ u16 id;
55288+ char *rolename;
55289+ struct file *exec_file;
55290+
55291+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
55292+ !(current->role->roletype & GR_ROLE_PERSIST))) {
55293+ id = current->acl_role_id;
55294+ rolename = current->role->rolename;
55295+ gr_set_acls(1);
55296+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
55297+ }
55298+
55299+ write_lock(&grsec_exec_file_lock);
55300+ exec_file = current->exec_file;
55301+ current->exec_file = NULL;
55302+ write_unlock(&grsec_exec_file_lock);
55303+
55304+ if (exec_file)
55305+ fput(exec_file);
55306+}
55307+
55308+int
55309+gr_acl_handle_procpidmem(const struct task_struct *task)
55310+{
55311+ if (unlikely(!gr_acl_is_enabled()))
55312+ return 0;
55313+
55314+ if (task != current && task->acl->mode & GR_PROTPROCFD)
55315+ return -EACCES;
55316+
55317+ return 0;
55318+}
55319diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
55320new file mode 100644
55321index 0000000..58800a7
55322--- /dev/null
55323+++ b/grsecurity/gracl_ip.c
55324@@ -0,0 +1,384 @@
55325+#include <linux/kernel.h>
55326+#include <asm/uaccess.h>
55327+#include <asm/errno.h>
55328+#include <net/sock.h>
55329+#include <linux/file.h>
55330+#include <linux/fs.h>
55331+#include <linux/net.h>
55332+#include <linux/in.h>
55333+#include <linux/skbuff.h>
55334+#include <linux/ip.h>
55335+#include <linux/udp.h>
55336+#include <linux/types.h>
55337+#include <linux/sched.h>
55338+#include <linux/netdevice.h>
55339+#include <linux/inetdevice.h>
55340+#include <linux/gracl.h>
55341+#include <linux/grsecurity.h>
55342+#include <linux/grinternal.h>
55343+
55344+#define GR_BIND 0x01
55345+#define GR_CONNECT 0x02
55346+#define GR_INVERT 0x04
55347+#define GR_BINDOVERRIDE 0x08
55348+#define GR_CONNECTOVERRIDE 0x10
55349+#define GR_SOCK_FAMILY 0x20
55350+
55351+static const char * gr_protocols[IPPROTO_MAX] = {
55352+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
55353+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
55354+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
55355+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
55356+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
55357+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
55358+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
55359+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
55360+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
55361+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
55362+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
55363+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
55364+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
55365+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
55366+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
55367+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
55368+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
55369+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
55370+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
55371+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
55372+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
55373+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
55374+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
55375+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
55376+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
55377+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
55378+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
55379+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
55380+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
55381+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
55382+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
55383+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
55384+ };
55385+
55386+static const char * gr_socktypes[SOCK_MAX] = {
55387+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
55388+ "unknown:7", "unknown:8", "unknown:9", "packet"
55389+ };
55390+
55391+static const char * gr_sockfamilies[AF_MAX+1] = {
55392+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
55393+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
55394+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
55395+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
55396+ };
55397+
55398+const char *
55399+gr_proto_to_name(unsigned char proto)
55400+{
55401+ return gr_protocols[proto];
55402+}
55403+
55404+const char *
55405+gr_socktype_to_name(unsigned char type)
55406+{
55407+ return gr_socktypes[type];
55408+}
55409+
55410+const char *
55411+gr_sockfamily_to_name(unsigned char family)
55412+{
55413+ return gr_sockfamilies[family];
55414+}
55415+
55416+int
55417+gr_search_socket(const int domain, const int type, const int protocol)
55418+{
55419+ struct acl_subject_label *curr;
55420+ const struct cred *cred = current_cred();
55421+
55422+ if (unlikely(!gr_acl_is_enabled()))
55423+ goto exit;
55424+
55425+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
55426+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
55427+ goto exit; // let the kernel handle it
55428+
55429+ curr = current->acl;
55430+
55431+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
55432+ /* the family is allowed, if this is PF_INET allow it only if
55433+ the extra sock type/protocol checks pass */
55434+ if (domain == PF_INET)
55435+ goto inet_check;
55436+ goto exit;
55437+ } else {
55438+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55439+ __u32 fakeip = 0;
55440+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55441+ current->role->roletype, cred->uid,
55442+ cred->gid, current->exec_file ?
55443+ gr_to_filename(current->exec_file->f_path.dentry,
55444+ current->exec_file->f_path.mnt) :
55445+ curr->filename, curr->filename,
55446+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
55447+ &current->signal->saved_ip);
55448+ goto exit;
55449+ }
55450+ goto exit_fail;
55451+ }
55452+
55453+inet_check:
55454+ /* the rest of this checking is for IPv4 only */
55455+ if (!curr->ips)
55456+ goto exit;
55457+
55458+ if ((curr->ip_type & (1 << type)) &&
55459+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
55460+ goto exit;
55461+
55462+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55463+ /* we don't place acls on raw sockets , and sometimes
55464+ dgram/ip sockets are opened for ioctl and not
55465+ bind/connect, so we'll fake a bind learn log */
55466+ if (type == SOCK_RAW || type == SOCK_PACKET) {
55467+ __u32 fakeip = 0;
55468+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55469+ current->role->roletype, cred->uid,
55470+ cred->gid, current->exec_file ?
55471+ gr_to_filename(current->exec_file->f_path.dentry,
55472+ current->exec_file->f_path.mnt) :
55473+ curr->filename, curr->filename,
55474+ &fakeip, 0, type,
55475+ protocol, GR_CONNECT, &current->signal->saved_ip);
55476+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
55477+ __u32 fakeip = 0;
55478+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55479+ current->role->roletype, cred->uid,
55480+ cred->gid, current->exec_file ?
55481+ gr_to_filename(current->exec_file->f_path.dentry,
55482+ current->exec_file->f_path.mnt) :
55483+ curr->filename, curr->filename,
55484+ &fakeip, 0, type,
55485+ protocol, GR_BIND, &current->signal->saved_ip);
55486+ }
55487+ /* we'll log when they use connect or bind */
55488+ goto exit;
55489+ }
55490+
55491+exit_fail:
55492+ if (domain == PF_INET)
55493+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
55494+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
55495+ else
55496+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
55497+ gr_socktype_to_name(type), protocol);
55498+
55499+ return 0;
55500+exit:
55501+ return 1;
55502+}
55503+
55504+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
55505+{
55506+ if ((ip->mode & mode) &&
55507+ (ip_port >= ip->low) &&
55508+ (ip_port <= ip->high) &&
55509+ ((ntohl(ip_addr) & our_netmask) ==
55510+ (ntohl(our_addr) & our_netmask))
55511+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
55512+ && (ip->type & (1 << type))) {
55513+ if (ip->mode & GR_INVERT)
55514+ return 2; // specifically denied
55515+ else
55516+ return 1; // allowed
55517+ }
55518+
55519+ return 0; // not specifically allowed, may continue parsing
55520+}
55521+
55522+static int
55523+gr_search_connectbind(const int full_mode, struct sock *sk,
55524+ struct sockaddr_in *addr, const int type)
55525+{
55526+ char iface[IFNAMSIZ] = {0};
55527+ struct acl_subject_label *curr;
55528+ struct acl_ip_label *ip;
55529+ struct inet_sock *isk;
55530+ struct net_device *dev;
55531+ struct in_device *idev;
55532+ unsigned long i;
55533+ int ret;
55534+ int mode = full_mode & (GR_BIND | GR_CONNECT);
55535+ __u32 ip_addr = 0;
55536+ __u32 our_addr;
55537+ __u32 our_netmask;
55538+ char *p;
55539+ __u16 ip_port = 0;
55540+ const struct cred *cred = current_cred();
55541+
55542+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
55543+ return 0;
55544+
55545+ curr = current->acl;
55546+ isk = inet_sk(sk);
55547+
55548+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
55549+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
55550+ addr->sin_addr.s_addr = curr->inaddr_any_override;
55551+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
55552+ struct sockaddr_in saddr;
55553+ int err;
55554+
55555+ saddr.sin_family = AF_INET;
55556+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
55557+ saddr.sin_port = isk->inet_sport;
55558+
55559+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55560+ if (err)
55561+ return err;
55562+
55563+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55564+ if (err)
55565+ return err;
55566+ }
55567+
55568+ if (!curr->ips)
55569+ return 0;
55570+
55571+ ip_addr = addr->sin_addr.s_addr;
55572+ ip_port = ntohs(addr->sin_port);
55573+
55574+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55575+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55576+ current->role->roletype, cred->uid,
55577+ cred->gid, current->exec_file ?
55578+ gr_to_filename(current->exec_file->f_path.dentry,
55579+ current->exec_file->f_path.mnt) :
55580+ curr->filename, curr->filename,
55581+ &ip_addr, ip_port, type,
55582+ sk->sk_protocol, mode, &current->signal->saved_ip);
55583+ return 0;
55584+ }
55585+
55586+ for (i = 0; i < curr->ip_num; i++) {
55587+ ip = *(curr->ips + i);
55588+ if (ip->iface != NULL) {
55589+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
55590+ p = strchr(iface, ':');
55591+ if (p != NULL)
55592+ *p = '\0';
55593+ dev = dev_get_by_name(sock_net(sk), iface);
55594+ if (dev == NULL)
55595+ continue;
55596+ idev = in_dev_get(dev);
55597+ if (idev == NULL) {
55598+ dev_put(dev);
55599+ continue;
55600+ }
55601+ rcu_read_lock();
55602+ for_ifa(idev) {
55603+ if (!strcmp(ip->iface, ifa->ifa_label)) {
55604+ our_addr = ifa->ifa_address;
55605+ our_netmask = 0xffffffff;
55606+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55607+ if (ret == 1) {
55608+ rcu_read_unlock();
55609+ in_dev_put(idev);
55610+ dev_put(dev);
55611+ return 0;
55612+ } else if (ret == 2) {
55613+ rcu_read_unlock();
55614+ in_dev_put(idev);
55615+ dev_put(dev);
55616+ goto denied;
55617+ }
55618+ }
55619+ } endfor_ifa(idev);
55620+ rcu_read_unlock();
55621+ in_dev_put(idev);
55622+ dev_put(dev);
55623+ } else {
55624+ our_addr = ip->addr;
55625+ our_netmask = ip->netmask;
55626+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55627+ if (ret == 1)
55628+ return 0;
55629+ else if (ret == 2)
55630+ goto denied;
55631+ }
55632+ }
55633+
55634+denied:
55635+ if (mode == GR_BIND)
55636+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55637+ else if (mode == GR_CONNECT)
55638+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55639+
55640+ return -EACCES;
55641+}
55642+
55643+int
55644+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
55645+{
55646+ /* always allow disconnection of dgram sockets with connect */
55647+ if (addr->sin_family == AF_UNSPEC)
55648+ return 0;
55649+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
55650+}
55651+
55652+int
55653+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
55654+{
55655+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
55656+}
55657+
55658+int gr_search_listen(struct socket *sock)
55659+{
55660+ struct sock *sk = sock->sk;
55661+ struct sockaddr_in addr;
55662+
55663+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55664+ addr.sin_port = inet_sk(sk)->inet_sport;
55665+
55666+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55667+}
55668+
55669+int gr_search_accept(struct socket *sock)
55670+{
55671+ struct sock *sk = sock->sk;
55672+ struct sockaddr_in addr;
55673+
55674+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55675+ addr.sin_port = inet_sk(sk)->inet_sport;
55676+
55677+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55678+}
55679+
55680+int
55681+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
55682+{
55683+ if (addr)
55684+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
55685+ else {
55686+ struct sockaddr_in sin;
55687+ const struct inet_sock *inet = inet_sk(sk);
55688+
55689+ sin.sin_addr.s_addr = inet->inet_daddr;
55690+ sin.sin_port = inet->inet_dport;
55691+
55692+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55693+ }
55694+}
55695+
55696+int
55697+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
55698+{
55699+ struct sockaddr_in sin;
55700+
55701+ if (unlikely(skb->len < sizeof (struct udphdr)))
55702+ return 0; // skip this packet
55703+
55704+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
55705+ sin.sin_port = udp_hdr(skb)->source;
55706+
55707+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55708+}
55709diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
55710new file mode 100644
55711index 0000000..25f54ef
55712--- /dev/null
55713+++ b/grsecurity/gracl_learn.c
55714@@ -0,0 +1,207 @@
55715+#include <linux/kernel.h>
55716+#include <linux/mm.h>
55717+#include <linux/sched.h>
55718+#include <linux/poll.h>
55719+#include <linux/string.h>
55720+#include <linux/file.h>
55721+#include <linux/types.h>
55722+#include <linux/vmalloc.h>
55723+#include <linux/grinternal.h>
55724+
55725+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
55726+ size_t count, loff_t *ppos);
55727+extern int gr_acl_is_enabled(void);
55728+
55729+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
55730+static int gr_learn_attached;
55731+
55732+/* use a 512k buffer */
55733+#define LEARN_BUFFER_SIZE (512 * 1024)
55734+
55735+static DEFINE_SPINLOCK(gr_learn_lock);
55736+static DEFINE_MUTEX(gr_learn_user_mutex);
55737+
55738+/* we need to maintain two buffers, so that the kernel context of grlearn
55739+ uses a semaphore around the userspace copying, and the other kernel contexts
55740+ use a spinlock when copying into the buffer, since they cannot sleep
55741+*/
55742+static char *learn_buffer;
55743+static char *learn_buffer_user;
55744+static int learn_buffer_len;
55745+static int learn_buffer_user_len;
55746+
55747+static ssize_t
55748+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
55749+{
55750+ DECLARE_WAITQUEUE(wait, current);
55751+ ssize_t retval = 0;
55752+
55753+ add_wait_queue(&learn_wait, &wait);
55754+ set_current_state(TASK_INTERRUPTIBLE);
55755+ do {
55756+ mutex_lock(&gr_learn_user_mutex);
55757+ spin_lock(&gr_learn_lock);
55758+ if (learn_buffer_len)
55759+ break;
55760+ spin_unlock(&gr_learn_lock);
55761+ mutex_unlock(&gr_learn_user_mutex);
55762+ if (file->f_flags & O_NONBLOCK) {
55763+ retval = -EAGAIN;
55764+ goto out;
55765+ }
55766+ if (signal_pending(current)) {
55767+ retval = -ERESTARTSYS;
55768+ goto out;
55769+ }
55770+
55771+ schedule();
55772+ } while (1);
55773+
55774+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
55775+ learn_buffer_user_len = learn_buffer_len;
55776+ retval = learn_buffer_len;
55777+ learn_buffer_len = 0;
55778+
55779+ spin_unlock(&gr_learn_lock);
55780+
55781+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
55782+ retval = -EFAULT;
55783+
55784+ mutex_unlock(&gr_learn_user_mutex);
55785+out:
55786+ set_current_state(TASK_RUNNING);
55787+ remove_wait_queue(&learn_wait, &wait);
55788+ return retval;
55789+}
55790+
55791+static unsigned int
55792+poll_learn(struct file * file, poll_table * wait)
55793+{
55794+ poll_wait(file, &learn_wait, wait);
55795+
55796+ if (learn_buffer_len)
55797+ return (POLLIN | POLLRDNORM);
55798+
55799+ return 0;
55800+}
55801+
55802+void
55803+gr_clear_learn_entries(void)
55804+{
55805+ char *tmp;
55806+
55807+ mutex_lock(&gr_learn_user_mutex);
55808+ spin_lock(&gr_learn_lock);
55809+ tmp = learn_buffer;
55810+ learn_buffer = NULL;
55811+ spin_unlock(&gr_learn_lock);
55812+ if (tmp)
55813+ vfree(tmp);
55814+ if (learn_buffer_user != NULL) {
55815+ vfree(learn_buffer_user);
55816+ learn_buffer_user = NULL;
55817+ }
55818+ learn_buffer_len = 0;
55819+ mutex_unlock(&gr_learn_user_mutex);
55820+
55821+ return;
55822+}
55823+
55824+void
55825+gr_add_learn_entry(const char *fmt, ...)
55826+{
55827+ va_list args;
55828+ unsigned int len;
55829+
55830+ if (!gr_learn_attached)
55831+ return;
55832+
55833+ spin_lock(&gr_learn_lock);
55834+
55835+ /* leave a gap at the end so we know when it's "full" but don't have to
55836+ compute the exact length of the string we're trying to append
55837+ */
55838+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
55839+ spin_unlock(&gr_learn_lock);
55840+ wake_up_interruptible(&learn_wait);
55841+ return;
55842+ }
55843+ if (learn_buffer == NULL) {
55844+ spin_unlock(&gr_learn_lock);
55845+ return;
55846+ }
55847+
55848+ va_start(args, fmt);
55849+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
55850+ va_end(args);
55851+
55852+ learn_buffer_len += len + 1;
55853+
55854+ spin_unlock(&gr_learn_lock);
55855+ wake_up_interruptible(&learn_wait);
55856+
55857+ return;
55858+}
55859+
55860+static int
55861+open_learn(struct inode *inode, struct file *file)
55862+{
55863+ if (file->f_mode & FMODE_READ && gr_learn_attached)
55864+ return -EBUSY;
55865+ if (file->f_mode & FMODE_READ) {
55866+ int retval = 0;
55867+ mutex_lock(&gr_learn_user_mutex);
55868+ if (learn_buffer == NULL)
55869+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
55870+ if (learn_buffer_user == NULL)
55871+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
55872+ if (learn_buffer == NULL) {
55873+ retval = -ENOMEM;
55874+ goto out_error;
55875+ }
55876+ if (learn_buffer_user == NULL) {
55877+ retval = -ENOMEM;
55878+ goto out_error;
55879+ }
55880+ learn_buffer_len = 0;
55881+ learn_buffer_user_len = 0;
55882+ gr_learn_attached = 1;
55883+out_error:
55884+ mutex_unlock(&gr_learn_user_mutex);
55885+ return retval;
55886+ }
55887+ return 0;
55888+}
55889+
55890+static int
55891+close_learn(struct inode *inode, struct file *file)
55892+{
55893+ if (file->f_mode & FMODE_READ) {
55894+ char *tmp = NULL;
55895+ mutex_lock(&gr_learn_user_mutex);
55896+ spin_lock(&gr_learn_lock);
55897+ tmp = learn_buffer;
55898+ learn_buffer = NULL;
55899+ spin_unlock(&gr_learn_lock);
55900+ if (tmp)
55901+ vfree(tmp);
55902+ if (learn_buffer_user != NULL) {
55903+ vfree(learn_buffer_user);
55904+ learn_buffer_user = NULL;
55905+ }
55906+ learn_buffer_len = 0;
55907+ learn_buffer_user_len = 0;
55908+ gr_learn_attached = 0;
55909+ mutex_unlock(&gr_learn_user_mutex);
55910+ }
55911+
55912+ return 0;
55913+}
55914+
55915+const struct file_operations grsec_fops = {
55916+ .read = read_learn,
55917+ .write = write_grsec_handler,
55918+ .open = open_learn,
55919+ .release = close_learn,
55920+ .poll = poll_learn,
55921+};
55922diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
55923new file mode 100644
55924index 0000000..39645c9
55925--- /dev/null
55926+++ b/grsecurity/gracl_res.c
55927@@ -0,0 +1,68 @@
55928+#include <linux/kernel.h>
55929+#include <linux/sched.h>
55930+#include <linux/gracl.h>
55931+#include <linux/grinternal.h>
55932+
55933+static const char *restab_log[] = {
55934+ [RLIMIT_CPU] = "RLIMIT_CPU",
55935+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
55936+ [RLIMIT_DATA] = "RLIMIT_DATA",
55937+ [RLIMIT_STACK] = "RLIMIT_STACK",
55938+ [RLIMIT_CORE] = "RLIMIT_CORE",
55939+ [RLIMIT_RSS] = "RLIMIT_RSS",
55940+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
55941+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
55942+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
55943+ [RLIMIT_AS] = "RLIMIT_AS",
55944+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
55945+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
55946+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
55947+ [RLIMIT_NICE] = "RLIMIT_NICE",
55948+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
55949+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
55950+ [GR_CRASH_RES] = "RLIMIT_CRASH"
55951+};
55952+
55953+void
55954+gr_log_resource(const struct task_struct *task,
55955+ const int res, const unsigned long wanted, const int gt)
55956+{
55957+ const struct cred *cred;
55958+ unsigned long rlim;
55959+
55960+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
55961+ return;
55962+
55963+ // not yet supported resource
55964+ if (unlikely(!restab_log[res]))
55965+ return;
55966+
55967+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
55968+ rlim = task_rlimit_max(task, res);
55969+ else
55970+ rlim = task_rlimit(task, res);
55971+
55972+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
55973+ return;
55974+
55975+ rcu_read_lock();
55976+ cred = __task_cred(task);
55977+
55978+ if (res == RLIMIT_NPROC &&
55979+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
55980+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
55981+ goto out_rcu_unlock;
55982+ else if (res == RLIMIT_MEMLOCK &&
55983+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
55984+ goto out_rcu_unlock;
55985+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
55986+ goto out_rcu_unlock;
55987+ rcu_read_unlock();
55988+
55989+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
55990+
55991+ return;
55992+out_rcu_unlock:
55993+ rcu_read_unlock();
55994+ return;
55995+}
55996diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
55997new file mode 100644
55998index 0000000..5556be3
55999--- /dev/null
56000+++ b/grsecurity/gracl_segv.c
56001@@ -0,0 +1,299 @@
56002+#include <linux/kernel.h>
56003+#include <linux/mm.h>
56004+#include <asm/uaccess.h>
56005+#include <asm/errno.h>
56006+#include <asm/mman.h>
56007+#include <net/sock.h>
56008+#include <linux/file.h>
56009+#include <linux/fs.h>
56010+#include <linux/net.h>
56011+#include <linux/in.h>
56012+#include <linux/slab.h>
56013+#include <linux/types.h>
56014+#include <linux/sched.h>
56015+#include <linux/timer.h>
56016+#include <linux/gracl.h>
56017+#include <linux/grsecurity.h>
56018+#include <linux/grinternal.h>
56019+
56020+static struct crash_uid *uid_set;
56021+static unsigned short uid_used;
56022+static DEFINE_SPINLOCK(gr_uid_lock);
56023+extern rwlock_t gr_inode_lock;
56024+extern struct acl_subject_label *
56025+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
56026+ struct acl_role_label *role);
56027+
56028+#ifdef CONFIG_BTRFS_FS
56029+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56030+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56031+#endif
56032+
56033+static inline dev_t __get_dev(const struct dentry *dentry)
56034+{
56035+#ifdef CONFIG_BTRFS_FS
56036+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56037+ return get_btrfs_dev_from_inode(dentry->d_inode);
56038+ else
56039+#endif
56040+ return dentry->d_inode->i_sb->s_dev;
56041+}
56042+
56043+int
56044+gr_init_uidset(void)
56045+{
56046+ uid_set =
56047+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
56048+ uid_used = 0;
56049+
56050+ return uid_set ? 1 : 0;
56051+}
56052+
56053+void
56054+gr_free_uidset(void)
56055+{
56056+ if (uid_set)
56057+ kfree(uid_set);
56058+
56059+ return;
56060+}
56061+
56062+int
56063+gr_find_uid(const uid_t uid)
56064+{
56065+ struct crash_uid *tmp = uid_set;
56066+ uid_t buid;
56067+ int low = 0, high = uid_used - 1, mid;
56068+
56069+ while (high >= low) {
56070+ mid = (low + high) >> 1;
56071+ buid = tmp[mid].uid;
56072+ if (buid == uid)
56073+ return mid;
56074+ if (buid > uid)
56075+ high = mid - 1;
56076+ if (buid < uid)
56077+ low = mid + 1;
56078+ }
56079+
56080+ return -1;
56081+}
56082+
56083+static __inline__ void
56084+gr_insertsort(void)
56085+{
56086+ unsigned short i, j;
56087+ struct crash_uid index;
56088+
56089+ for (i = 1; i < uid_used; i++) {
56090+ index = uid_set[i];
56091+ j = i;
56092+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
56093+ uid_set[j] = uid_set[j - 1];
56094+ j--;
56095+ }
56096+ uid_set[j] = index;
56097+ }
56098+
56099+ return;
56100+}
56101+
56102+static __inline__ void
56103+gr_insert_uid(const uid_t uid, const unsigned long expires)
56104+{
56105+ int loc;
56106+
56107+ if (uid_used == GR_UIDTABLE_MAX)
56108+ return;
56109+
56110+ loc = gr_find_uid(uid);
56111+
56112+ if (loc >= 0) {
56113+ uid_set[loc].expires = expires;
56114+ return;
56115+ }
56116+
56117+ uid_set[uid_used].uid = uid;
56118+ uid_set[uid_used].expires = expires;
56119+ uid_used++;
56120+
56121+ gr_insertsort();
56122+
56123+ return;
56124+}
56125+
56126+void
56127+gr_remove_uid(const unsigned short loc)
56128+{
56129+ unsigned short i;
56130+
56131+ for (i = loc + 1; i < uid_used; i++)
56132+ uid_set[i - 1] = uid_set[i];
56133+
56134+ uid_used--;
56135+
56136+ return;
56137+}
56138+
56139+int
56140+gr_check_crash_uid(const uid_t uid)
56141+{
56142+ int loc;
56143+ int ret = 0;
56144+
56145+ if (unlikely(!gr_acl_is_enabled()))
56146+ return 0;
56147+
56148+ spin_lock(&gr_uid_lock);
56149+ loc = gr_find_uid(uid);
56150+
56151+ if (loc < 0)
56152+ goto out_unlock;
56153+
56154+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
56155+ gr_remove_uid(loc);
56156+ else
56157+ ret = 1;
56158+
56159+out_unlock:
56160+ spin_unlock(&gr_uid_lock);
56161+ return ret;
56162+}
56163+
56164+static __inline__ int
56165+proc_is_setxid(const struct cred *cred)
56166+{
56167+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
56168+ cred->uid != cred->fsuid)
56169+ return 1;
56170+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
56171+ cred->gid != cred->fsgid)
56172+ return 1;
56173+
56174+ return 0;
56175+}
56176+
56177+extern int gr_fake_force_sig(int sig, struct task_struct *t);
56178+
56179+void
56180+gr_handle_crash(struct task_struct *task, const int sig)
56181+{
56182+ struct acl_subject_label *curr;
56183+ struct task_struct *tsk, *tsk2;
56184+ const struct cred *cred;
56185+ const struct cred *cred2;
56186+
56187+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
56188+ return;
56189+
56190+ if (unlikely(!gr_acl_is_enabled()))
56191+ return;
56192+
56193+ curr = task->acl;
56194+
56195+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
56196+ return;
56197+
56198+ if (time_before_eq(curr->expires, get_seconds())) {
56199+ curr->expires = 0;
56200+ curr->crashes = 0;
56201+ }
56202+
56203+ curr->crashes++;
56204+
56205+ if (!curr->expires)
56206+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
56207+
56208+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56209+ time_after(curr->expires, get_seconds())) {
56210+ rcu_read_lock();
56211+ cred = __task_cred(task);
56212+ if (cred->uid && proc_is_setxid(cred)) {
56213+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56214+ spin_lock(&gr_uid_lock);
56215+ gr_insert_uid(cred->uid, curr->expires);
56216+ spin_unlock(&gr_uid_lock);
56217+ curr->expires = 0;
56218+ curr->crashes = 0;
56219+ read_lock(&tasklist_lock);
56220+ do_each_thread(tsk2, tsk) {
56221+ cred2 = __task_cred(tsk);
56222+ if (tsk != task && cred2->uid == cred->uid)
56223+ gr_fake_force_sig(SIGKILL, tsk);
56224+ } while_each_thread(tsk2, tsk);
56225+ read_unlock(&tasklist_lock);
56226+ } else {
56227+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56228+ read_lock(&tasklist_lock);
56229+ read_lock(&grsec_exec_file_lock);
56230+ do_each_thread(tsk2, tsk) {
56231+ if (likely(tsk != task)) {
56232+ // if this thread has the same subject as the one that triggered
56233+ // RES_CRASH and it's the same binary, kill it
56234+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
56235+ gr_fake_force_sig(SIGKILL, tsk);
56236+ }
56237+ } while_each_thread(tsk2, tsk);
56238+ read_unlock(&grsec_exec_file_lock);
56239+ read_unlock(&tasklist_lock);
56240+ }
56241+ rcu_read_unlock();
56242+ }
56243+
56244+ return;
56245+}
56246+
56247+int
56248+gr_check_crash_exec(const struct file *filp)
56249+{
56250+ struct acl_subject_label *curr;
56251+
56252+ if (unlikely(!gr_acl_is_enabled()))
56253+ return 0;
56254+
56255+ read_lock(&gr_inode_lock);
56256+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
56257+ __get_dev(filp->f_path.dentry),
56258+ current->role);
56259+ read_unlock(&gr_inode_lock);
56260+
56261+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
56262+ (!curr->crashes && !curr->expires))
56263+ return 0;
56264+
56265+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56266+ time_after(curr->expires, get_seconds()))
56267+ return 1;
56268+ else if (time_before_eq(curr->expires, get_seconds())) {
56269+ curr->crashes = 0;
56270+ curr->expires = 0;
56271+ }
56272+
56273+ return 0;
56274+}
56275+
56276+void
56277+gr_handle_alertkill(struct task_struct *task)
56278+{
56279+ struct acl_subject_label *curracl;
56280+ __u32 curr_ip;
56281+ struct task_struct *p, *p2;
56282+
56283+ if (unlikely(!gr_acl_is_enabled()))
56284+ return;
56285+
56286+ curracl = task->acl;
56287+ curr_ip = task->signal->curr_ip;
56288+
56289+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
56290+ read_lock(&tasklist_lock);
56291+ do_each_thread(p2, p) {
56292+ if (p->signal->curr_ip == curr_ip)
56293+ gr_fake_force_sig(SIGKILL, p);
56294+ } while_each_thread(p2, p);
56295+ read_unlock(&tasklist_lock);
56296+ } else if (curracl->mode & GR_KILLPROC)
56297+ gr_fake_force_sig(SIGKILL, task);
56298+
56299+ return;
56300+}
56301diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
56302new file mode 100644
56303index 0000000..9d83a69
56304--- /dev/null
56305+++ b/grsecurity/gracl_shm.c
56306@@ -0,0 +1,40 @@
56307+#include <linux/kernel.h>
56308+#include <linux/mm.h>
56309+#include <linux/sched.h>
56310+#include <linux/file.h>
56311+#include <linux/ipc.h>
56312+#include <linux/gracl.h>
56313+#include <linux/grsecurity.h>
56314+#include <linux/grinternal.h>
56315+
56316+int
56317+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56318+ const time_t shm_createtime, const uid_t cuid, const int shmid)
56319+{
56320+ struct task_struct *task;
56321+
56322+ if (!gr_acl_is_enabled())
56323+ return 1;
56324+
56325+ rcu_read_lock();
56326+ read_lock(&tasklist_lock);
56327+
56328+ task = find_task_by_vpid(shm_cprid);
56329+
56330+ if (unlikely(!task))
56331+ task = find_task_by_vpid(shm_lapid);
56332+
56333+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
56334+ (task->pid == shm_lapid)) &&
56335+ (task->acl->mode & GR_PROTSHM) &&
56336+ (task->acl != current->acl))) {
56337+ read_unlock(&tasklist_lock);
56338+ rcu_read_unlock();
56339+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
56340+ return 0;
56341+ }
56342+ read_unlock(&tasklist_lock);
56343+ rcu_read_unlock();
56344+
56345+ return 1;
56346+}
56347diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
56348new file mode 100644
56349index 0000000..bc0be01
56350--- /dev/null
56351+++ b/grsecurity/grsec_chdir.c
56352@@ -0,0 +1,19 @@
56353+#include <linux/kernel.h>
56354+#include <linux/sched.h>
56355+#include <linux/fs.h>
56356+#include <linux/file.h>
56357+#include <linux/grsecurity.h>
56358+#include <linux/grinternal.h>
56359+
56360+void
56361+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
56362+{
56363+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56364+ if ((grsec_enable_chdir && grsec_enable_group &&
56365+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
56366+ !grsec_enable_group)) {
56367+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
56368+ }
56369+#endif
56370+ return;
56371+}
56372diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
56373new file mode 100644
56374index 0000000..9807ee2
56375--- /dev/null
56376+++ b/grsecurity/grsec_chroot.c
56377@@ -0,0 +1,368 @@
56378+#include <linux/kernel.h>
56379+#include <linux/module.h>
56380+#include <linux/sched.h>
56381+#include <linux/file.h>
56382+#include <linux/fs.h>
56383+#include <linux/mount.h>
56384+#include <linux/types.h>
56385+#include "../fs/mount.h"
56386+#include <linux/grsecurity.h>
56387+#include <linux/grinternal.h>
56388+
56389+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
56390+{
56391+#ifdef CONFIG_GRKERNSEC
56392+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
56393+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
56394+ task->gr_is_chrooted = 1;
56395+ else
56396+ task->gr_is_chrooted = 0;
56397+
56398+ task->gr_chroot_dentry = path->dentry;
56399+#endif
56400+ return;
56401+}
56402+
56403+void gr_clear_chroot_entries(struct task_struct *task)
56404+{
56405+#ifdef CONFIG_GRKERNSEC
56406+ task->gr_is_chrooted = 0;
56407+ task->gr_chroot_dentry = NULL;
56408+#endif
56409+ return;
56410+}
56411+
56412+int
56413+gr_handle_chroot_unix(const pid_t pid)
56414+{
56415+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56416+ struct task_struct *p;
56417+
56418+ if (unlikely(!grsec_enable_chroot_unix))
56419+ return 1;
56420+
56421+ if (likely(!proc_is_chrooted(current)))
56422+ return 1;
56423+
56424+ rcu_read_lock();
56425+ read_lock(&tasklist_lock);
56426+ p = find_task_by_vpid_unrestricted(pid);
56427+ if (unlikely(p && !have_same_root(current, p))) {
56428+ read_unlock(&tasklist_lock);
56429+ rcu_read_unlock();
56430+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
56431+ return 0;
56432+ }
56433+ read_unlock(&tasklist_lock);
56434+ rcu_read_unlock();
56435+#endif
56436+ return 1;
56437+}
56438+
56439+int
56440+gr_handle_chroot_nice(void)
56441+{
56442+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56443+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
56444+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
56445+ return -EPERM;
56446+ }
56447+#endif
56448+ return 0;
56449+}
56450+
56451+int
56452+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
56453+{
56454+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56455+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
56456+ && proc_is_chrooted(current)) {
56457+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
56458+ return -EACCES;
56459+ }
56460+#endif
56461+ return 0;
56462+}
56463+
56464+int
56465+gr_handle_chroot_rawio(const struct inode *inode)
56466+{
56467+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56468+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56469+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
56470+ return 1;
56471+#endif
56472+ return 0;
56473+}
56474+
56475+int
56476+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
56477+{
56478+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56479+ struct task_struct *p;
56480+ int ret = 0;
56481+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
56482+ return ret;
56483+
56484+ read_lock(&tasklist_lock);
56485+ do_each_pid_task(pid, type, p) {
56486+ if (!have_same_root(current, p)) {
56487+ ret = 1;
56488+ goto out;
56489+ }
56490+ } while_each_pid_task(pid, type, p);
56491+out:
56492+ read_unlock(&tasklist_lock);
56493+ return ret;
56494+#endif
56495+ return 0;
56496+}
56497+
56498+int
56499+gr_pid_is_chrooted(struct task_struct *p)
56500+{
56501+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56502+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
56503+ return 0;
56504+
56505+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
56506+ !have_same_root(current, p)) {
56507+ return 1;
56508+ }
56509+#endif
56510+ return 0;
56511+}
56512+
56513+EXPORT_SYMBOL(gr_pid_is_chrooted);
56514+
56515+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
56516+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
56517+{
56518+ struct path path, currentroot;
56519+ int ret = 0;
56520+
56521+ path.dentry = (struct dentry *)u_dentry;
56522+ path.mnt = (struct vfsmount *)u_mnt;
56523+ get_fs_root(current->fs, &currentroot);
56524+ if (path_is_under(&path, &currentroot))
56525+ ret = 1;
56526+ path_put(&currentroot);
56527+
56528+ return ret;
56529+}
56530+#endif
56531+
56532+int
56533+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
56534+{
56535+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56536+ if (!grsec_enable_chroot_fchdir)
56537+ return 1;
56538+
56539+ if (!proc_is_chrooted(current))
56540+ return 1;
56541+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
56542+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
56543+ return 0;
56544+ }
56545+#endif
56546+ return 1;
56547+}
56548+
56549+int
56550+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56551+ const time_t shm_createtime)
56552+{
56553+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56554+ struct task_struct *p;
56555+ time_t starttime;
56556+
56557+ if (unlikely(!grsec_enable_chroot_shmat))
56558+ return 1;
56559+
56560+ if (likely(!proc_is_chrooted(current)))
56561+ return 1;
56562+
56563+ rcu_read_lock();
56564+ read_lock(&tasklist_lock);
56565+
56566+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
56567+ starttime = p->start_time.tv_sec;
56568+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
56569+ if (have_same_root(current, p)) {
56570+ goto allow;
56571+ } else {
56572+ read_unlock(&tasklist_lock);
56573+ rcu_read_unlock();
56574+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56575+ return 0;
56576+ }
56577+ }
56578+ /* creator exited, pid reuse, fall through to next check */
56579+ }
56580+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
56581+ if (unlikely(!have_same_root(current, p))) {
56582+ read_unlock(&tasklist_lock);
56583+ rcu_read_unlock();
56584+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56585+ return 0;
56586+ }
56587+ }
56588+
56589+allow:
56590+ read_unlock(&tasklist_lock);
56591+ rcu_read_unlock();
56592+#endif
56593+ return 1;
56594+}
56595+
56596+void
56597+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
56598+{
56599+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56600+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
56601+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
56602+#endif
56603+ return;
56604+}
56605+
56606+int
56607+gr_handle_chroot_mknod(const struct dentry *dentry,
56608+ const struct vfsmount *mnt, const int mode)
56609+{
56610+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56611+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
56612+ proc_is_chrooted(current)) {
56613+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
56614+ return -EPERM;
56615+ }
56616+#endif
56617+ return 0;
56618+}
56619+
56620+int
56621+gr_handle_chroot_mount(const struct dentry *dentry,
56622+ const struct vfsmount *mnt, const char *dev_name)
56623+{
56624+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56625+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
56626+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
56627+ return -EPERM;
56628+ }
56629+#endif
56630+ return 0;
56631+}
56632+
56633+int
56634+gr_handle_chroot_pivot(void)
56635+{
56636+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56637+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
56638+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
56639+ return -EPERM;
56640+ }
56641+#endif
56642+ return 0;
56643+}
56644+
56645+int
56646+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
56647+{
56648+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56649+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
56650+ !gr_is_outside_chroot(dentry, mnt)) {
56651+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
56652+ return -EPERM;
56653+ }
56654+#endif
56655+ return 0;
56656+}
56657+
56658+extern const char *captab_log[];
56659+extern int captab_log_entries;
56660+
56661+int
56662+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
56663+{
56664+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56665+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56666+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56667+ if (cap_raised(chroot_caps, cap)) {
56668+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
56669+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
56670+ }
56671+ return 0;
56672+ }
56673+ }
56674+#endif
56675+ return 1;
56676+}
56677+
56678+int
56679+gr_chroot_is_capable(const int cap)
56680+{
56681+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56682+ return gr_task_chroot_is_capable(current, current_cred(), cap);
56683+#endif
56684+ return 1;
56685+}
56686+
56687+int
56688+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
56689+{
56690+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56691+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56692+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56693+ if (cap_raised(chroot_caps, cap)) {
56694+ return 0;
56695+ }
56696+ }
56697+#endif
56698+ return 1;
56699+}
56700+
56701+int
56702+gr_chroot_is_capable_nolog(const int cap)
56703+{
56704+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56705+ return gr_task_chroot_is_capable_nolog(current, cap);
56706+#endif
56707+ return 1;
56708+}
56709+
56710+int
56711+gr_handle_chroot_sysctl(const int op)
56712+{
56713+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56714+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
56715+ proc_is_chrooted(current))
56716+ return -EACCES;
56717+#endif
56718+ return 0;
56719+}
56720+
56721+void
56722+gr_handle_chroot_chdir(struct path *path)
56723+{
56724+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56725+ if (grsec_enable_chroot_chdir)
56726+ set_fs_pwd(current->fs, path);
56727+#endif
56728+ return;
56729+}
56730+
56731+int
56732+gr_handle_chroot_chmod(const struct dentry *dentry,
56733+ const struct vfsmount *mnt, const int mode)
56734+{
56735+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56736+ /* allow chmod +s on directories, but not files */
56737+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
56738+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
56739+ proc_is_chrooted(current)) {
56740+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
56741+ return -EPERM;
56742+ }
56743+#endif
56744+ return 0;
56745+}
56746diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
56747new file mode 100644
56748index 0000000..213ad8b
56749--- /dev/null
56750+++ b/grsecurity/grsec_disabled.c
56751@@ -0,0 +1,437 @@
56752+#include <linux/kernel.h>
56753+#include <linux/module.h>
56754+#include <linux/sched.h>
56755+#include <linux/file.h>
56756+#include <linux/fs.h>
56757+#include <linux/kdev_t.h>
56758+#include <linux/net.h>
56759+#include <linux/in.h>
56760+#include <linux/ip.h>
56761+#include <linux/skbuff.h>
56762+#include <linux/sysctl.h>
56763+
56764+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56765+void
56766+pax_set_initial_flags(struct linux_binprm *bprm)
56767+{
56768+ return;
56769+}
56770+#endif
56771+
56772+#ifdef CONFIG_SYSCTL
56773+__u32
56774+gr_handle_sysctl(const struct ctl_table * table, const int op)
56775+{
56776+ return 0;
56777+}
56778+#endif
56779+
56780+#ifdef CONFIG_TASKSTATS
56781+int gr_is_taskstats_denied(int pid)
56782+{
56783+ return 0;
56784+}
56785+#endif
56786+
56787+int
56788+gr_acl_is_enabled(void)
56789+{
56790+ return 0;
56791+}
56792+
56793+void
56794+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
56795+{
56796+ return;
56797+}
56798+
56799+int
56800+gr_handle_rawio(const struct inode *inode)
56801+{
56802+ return 0;
56803+}
56804+
56805+void
56806+gr_acl_handle_psacct(struct task_struct *task, const long code)
56807+{
56808+ return;
56809+}
56810+
56811+int
56812+gr_handle_ptrace(struct task_struct *task, const long request)
56813+{
56814+ return 0;
56815+}
56816+
56817+int
56818+gr_handle_proc_ptrace(struct task_struct *task)
56819+{
56820+ return 0;
56821+}
56822+
56823+void
56824+gr_learn_resource(const struct task_struct *task,
56825+ const int res, const unsigned long wanted, const int gt)
56826+{
56827+ return;
56828+}
56829+
56830+int
56831+gr_set_acls(const int type)
56832+{
56833+ return 0;
56834+}
56835+
56836+int
56837+gr_check_hidden_task(const struct task_struct *tsk)
56838+{
56839+ return 0;
56840+}
56841+
56842+int
56843+gr_check_protected_task(const struct task_struct *task)
56844+{
56845+ return 0;
56846+}
56847+
56848+int
56849+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
56850+{
56851+ return 0;
56852+}
56853+
56854+void
56855+gr_copy_label(struct task_struct *tsk)
56856+{
56857+ return;
56858+}
56859+
56860+void
56861+gr_set_pax_flags(struct task_struct *task)
56862+{
56863+ return;
56864+}
56865+
56866+int
56867+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
56868+ const int unsafe_share)
56869+{
56870+ return 0;
56871+}
56872+
56873+void
56874+gr_handle_delete(const ino_t ino, const dev_t dev)
56875+{
56876+ return;
56877+}
56878+
56879+void
56880+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
56881+{
56882+ return;
56883+}
56884+
56885+void
56886+gr_handle_crash(struct task_struct *task, const int sig)
56887+{
56888+ return;
56889+}
56890+
56891+int
56892+gr_check_crash_exec(const struct file *filp)
56893+{
56894+ return 0;
56895+}
56896+
56897+int
56898+gr_check_crash_uid(const uid_t uid)
56899+{
56900+ return 0;
56901+}
56902+
56903+void
56904+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56905+ struct dentry *old_dentry,
56906+ struct dentry *new_dentry,
56907+ struct vfsmount *mnt, const __u8 replace)
56908+{
56909+ return;
56910+}
56911+
56912+int
56913+gr_search_socket(const int family, const int type, const int protocol)
56914+{
56915+ return 1;
56916+}
56917+
56918+int
56919+gr_search_connectbind(const int mode, const struct socket *sock,
56920+ const struct sockaddr_in *addr)
56921+{
56922+ return 0;
56923+}
56924+
56925+void
56926+gr_handle_alertkill(struct task_struct *task)
56927+{
56928+ return;
56929+}
56930+
56931+__u32
56932+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
56933+{
56934+ return 1;
56935+}
56936+
56937+__u32
56938+gr_acl_handle_hidden_file(const struct dentry * dentry,
56939+ const struct vfsmount * mnt)
56940+{
56941+ return 1;
56942+}
56943+
56944+__u32
56945+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
56946+ int acc_mode)
56947+{
56948+ return 1;
56949+}
56950+
56951+__u32
56952+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
56953+{
56954+ return 1;
56955+}
56956+
56957+__u32
56958+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
56959+{
56960+ return 1;
56961+}
56962+
56963+int
56964+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
56965+ unsigned int *vm_flags)
56966+{
56967+ return 1;
56968+}
56969+
56970+__u32
56971+gr_acl_handle_truncate(const struct dentry * dentry,
56972+ const struct vfsmount * mnt)
56973+{
56974+ return 1;
56975+}
56976+
56977+__u32
56978+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
56979+{
56980+ return 1;
56981+}
56982+
56983+__u32
56984+gr_acl_handle_access(const struct dentry * dentry,
56985+ const struct vfsmount * mnt, const int fmode)
56986+{
56987+ return 1;
56988+}
56989+
56990+__u32
56991+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
56992+ umode_t *mode)
56993+{
56994+ return 1;
56995+}
56996+
56997+__u32
56998+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
56999+{
57000+ return 1;
57001+}
57002+
57003+__u32
57004+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
57005+{
57006+ return 1;
57007+}
57008+
57009+void
57010+grsecurity_init(void)
57011+{
57012+ return;
57013+}
57014+
57015+umode_t gr_acl_umask(void)
57016+{
57017+ return 0;
57018+}
57019+
57020+__u32
57021+gr_acl_handle_mknod(const struct dentry * new_dentry,
57022+ const struct dentry * parent_dentry,
57023+ const struct vfsmount * parent_mnt,
57024+ const int mode)
57025+{
57026+ return 1;
57027+}
57028+
57029+__u32
57030+gr_acl_handle_mkdir(const struct dentry * new_dentry,
57031+ const struct dentry * parent_dentry,
57032+ const struct vfsmount * parent_mnt)
57033+{
57034+ return 1;
57035+}
57036+
57037+__u32
57038+gr_acl_handle_symlink(const struct dentry * new_dentry,
57039+ const struct dentry * parent_dentry,
57040+ const struct vfsmount * parent_mnt, const char *from)
57041+{
57042+ return 1;
57043+}
57044+
57045+__u32
57046+gr_acl_handle_link(const struct dentry * new_dentry,
57047+ const struct dentry * parent_dentry,
57048+ const struct vfsmount * parent_mnt,
57049+ const struct dentry * old_dentry,
57050+ const struct vfsmount * old_mnt, const char *to)
57051+{
57052+ return 1;
57053+}
57054+
57055+int
57056+gr_acl_handle_rename(const struct dentry *new_dentry,
57057+ const struct dentry *parent_dentry,
57058+ const struct vfsmount *parent_mnt,
57059+ const struct dentry *old_dentry,
57060+ const struct inode *old_parent_inode,
57061+ const struct vfsmount *old_mnt, const char *newname)
57062+{
57063+ return 0;
57064+}
57065+
57066+int
57067+gr_acl_handle_filldir(const struct file *file, const char *name,
57068+ const int namelen, const ino_t ino)
57069+{
57070+ return 1;
57071+}
57072+
57073+int
57074+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57075+ const time_t shm_createtime, const uid_t cuid, const int shmid)
57076+{
57077+ return 1;
57078+}
57079+
57080+int
57081+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
57082+{
57083+ return 0;
57084+}
57085+
57086+int
57087+gr_search_accept(const struct socket *sock)
57088+{
57089+ return 0;
57090+}
57091+
57092+int
57093+gr_search_listen(const struct socket *sock)
57094+{
57095+ return 0;
57096+}
57097+
57098+int
57099+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
57100+{
57101+ return 0;
57102+}
57103+
57104+__u32
57105+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
57106+{
57107+ return 1;
57108+}
57109+
57110+__u32
57111+gr_acl_handle_creat(const struct dentry * dentry,
57112+ const struct dentry * p_dentry,
57113+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
57114+ const int imode)
57115+{
57116+ return 1;
57117+}
57118+
57119+void
57120+gr_acl_handle_exit(void)
57121+{
57122+ return;
57123+}
57124+
57125+int
57126+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
57127+{
57128+ return 1;
57129+}
57130+
57131+void
57132+gr_set_role_label(const uid_t uid, const gid_t gid)
57133+{
57134+ return;
57135+}
57136+
57137+int
57138+gr_acl_handle_procpidmem(const struct task_struct *task)
57139+{
57140+ return 0;
57141+}
57142+
57143+int
57144+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
57145+{
57146+ return 0;
57147+}
57148+
57149+int
57150+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
57151+{
57152+ return 0;
57153+}
57154+
57155+void
57156+gr_set_kernel_label(struct task_struct *task)
57157+{
57158+ return;
57159+}
57160+
57161+int
57162+gr_check_user_change(int real, int effective, int fs)
57163+{
57164+ return 0;
57165+}
57166+
57167+int
57168+gr_check_group_change(int real, int effective, int fs)
57169+{
57170+ return 0;
57171+}
57172+
57173+int gr_acl_enable_at_secure(void)
57174+{
57175+ return 0;
57176+}
57177+
57178+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
57179+{
57180+ return dentry->d_inode->i_sb->s_dev;
57181+}
57182+
57183+EXPORT_SYMBOL(gr_learn_resource);
57184+EXPORT_SYMBOL(gr_set_kernel_label);
57185+#ifdef CONFIG_SECURITY
57186+EXPORT_SYMBOL(gr_check_user_change);
57187+EXPORT_SYMBOL(gr_check_group_change);
57188+#endif
57189diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
57190new file mode 100644
57191index 0000000..abfa971
57192--- /dev/null
57193+++ b/grsecurity/grsec_exec.c
57194@@ -0,0 +1,174 @@
57195+#include <linux/kernel.h>
57196+#include <linux/sched.h>
57197+#include <linux/file.h>
57198+#include <linux/binfmts.h>
57199+#include <linux/fs.h>
57200+#include <linux/types.h>
57201+#include <linux/grdefs.h>
57202+#include <linux/grsecurity.h>
57203+#include <linux/grinternal.h>
57204+#include <linux/capability.h>
57205+#include <linux/module.h>
57206+
57207+#include <asm/uaccess.h>
57208+
57209+#ifdef CONFIG_GRKERNSEC_EXECLOG
57210+static char gr_exec_arg_buf[132];
57211+static DEFINE_MUTEX(gr_exec_arg_mutex);
57212+#endif
57213+
57214+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
57215+
57216+void
57217+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
57218+{
57219+#ifdef CONFIG_GRKERNSEC_EXECLOG
57220+ char *grarg = gr_exec_arg_buf;
57221+ unsigned int i, x, execlen = 0;
57222+ char c;
57223+
57224+ if (!((grsec_enable_execlog && grsec_enable_group &&
57225+ in_group_p(grsec_audit_gid))
57226+ || (grsec_enable_execlog && !grsec_enable_group)))
57227+ return;
57228+
57229+ mutex_lock(&gr_exec_arg_mutex);
57230+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
57231+
57232+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
57233+ const char __user *p;
57234+ unsigned int len;
57235+
57236+ p = get_user_arg_ptr(argv, i);
57237+ if (IS_ERR(p))
57238+ goto log;
57239+
57240+ len = strnlen_user(p, 128 - execlen);
57241+ if (len > 128 - execlen)
57242+ len = 128 - execlen;
57243+ else if (len > 0)
57244+ len--;
57245+ if (copy_from_user(grarg + execlen, p, len))
57246+ goto log;
57247+
57248+ /* rewrite unprintable characters */
57249+ for (x = 0; x < len; x++) {
57250+ c = *(grarg + execlen + x);
57251+ if (c < 32 || c > 126)
57252+ *(grarg + execlen + x) = ' ';
57253+ }
57254+
57255+ execlen += len;
57256+ *(grarg + execlen) = ' ';
57257+ *(grarg + execlen + 1) = '\0';
57258+ execlen++;
57259+ }
57260+
57261+ log:
57262+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
57263+ bprm->file->f_path.mnt, grarg);
57264+ mutex_unlock(&gr_exec_arg_mutex);
57265+#endif
57266+ return;
57267+}
57268+
57269+#ifdef CONFIG_GRKERNSEC
57270+extern int gr_acl_is_capable(const int cap);
57271+extern int gr_acl_is_capable_nolog(const int cap);
57272+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
57273+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
57274+extern int gr_chroot_is_capable(const int cap);
57275+extern int gr_chroot_is_capable_nolog(const int cap);
57276+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
57277+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
57278+#endif
57279+
57280+const char *captab_log[] = {
57281+ "CAP_CHOWN",
57282+ "CAP_DAC_OVERRIDE",
57283+ "CAP_DAC_READ_SEARCH",
57284+ "CAP_FOWNER",
57285+ "CAP_FSETID",
57286+ "CAP_KILL",
57287+ "CAP_SETGID",
57288+ "CAP_SETUID",
57289+ "CAP_SETPCAP",
57290+ "CAP_LINUX_IMMUTABLE",
57291+ "CAP_NET_BIND_SERVICE",
57292+ "CAP_NET_BROADCAST",
57293+ "CAP_NET_ADMIN",
57294+ "CAP_NET_RAW",
57295+ "CAP_IPC_LOCK",
57296+ "CAP_IPC_OWNER",
57297+ "CAP_SYS_MODULE",
57298+ "CAP_SYS_RAWIO",
57299+ "CAP_SYS_CHROOT",
57300+ "CAP_SYS_PTRACE",
57301+ "CAP_SYS_PACCT",
57302+ "CAP_SYS_ADMIN",
57303+ "CAP_SYS_BOOT",
57304+ "CAP_SYS_NICE",
57305+ "CAP_SYS_RESOURCE",
57306+ "CAP_SYS_TIME",
57307+ "CAP_SYS_TTY_CONFIG",
57308+ "CAP_MKNOD",
57309+ "CAP_LEASE",
57310+ "CAP_AUDIT_WRITE",
57311+ "CAP_AUDIT_CONTROL",
57312+ "CAP_SETFCAP",
57313+ "CAP_MAC_OVERRIDE",
57314+ "CAP_MAC_ADMIN",
57315+ "CAP_SYSLOG",
57316+ "CAP_WAKE_ALARM"
57317+};
57318+
57319+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
57320+
57321+int gr_is_capable(const int cap)
57322+{
57323+#ifdef CONFIG_GRKERNSEC
57324+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
57325+ return 1;
57326+ return 0;
57327+#else
57328+ return 1;
57329+#endif
57330+}
57331+
57332+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
57333+{
57334+#ifdef CONFIG_GRKERNSEC
57335+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
57336+ return 1;
57337+ return 0;
57338+#else
57339+ return 1;
57340+#endif
57341+}
57342+
57343+int gr_is_capable_nolog(const int cap)
57344+{
57345+#ifdef CONFIG_GRKERNSEC
57346+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
57347+ return 1;
57348+ return 0;
57349+#else
57350+ return 1;
57351+#endif
57352+}
57353+
57354+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
57355+{
57356+#ifdef CONFIG_GRKERNSEC
57357+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
57358+ return 1;
57359+ return 0;
57360+#else
57361+ return 1;
57362+#endif
57363+}
57364+
57365+EXPORT_SYMBOL(gr_is_capable);
57366+EXPORT_SYMBOL(gr_is_capable_nolog);
57367+EXPORT_SYMBOL(gr_task_is_capable);
57368+EXPORT_SYMBOL(gr_task_is_capable_nolog);
57369diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
57370new file mode 100644
57371index 0000000..d3ee748
57372--- /dev/null
57373+++ b/grsecurity/grsec_fifo.c
57374@@ -0,0 +1,24 @@
57375+#include <linux/kernel.h>
57376+#include <linux/sched.h>
57377+#include <linux/fs.h>
57378+#include <linux/file.h>
57379+#include <linux/grinternal.h>
57380+
57381+int
57382+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
57383+ const struct dentry *dir, const int flag, const int acc_mode)
57384+{
57385+#ifdef CONFIG_GRKERNSEC_FIFO
57386+ const struct cred *cred = current_cred();
57387+
57388+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
57389+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
57390+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
57391+ (cred->fsuid != dentry->d_inode->i_uid)) {
57392+ if (!inode_permission(dentry->d_inode, acc_mode))
57393+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
57394+ return -EACCES;
57395+ }
57396+#endif
57397+ return 0;
57398+}
57399diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
57400new file mode 100644
57401index 0000000..8ca18bf
57402--- /dev/null
57403+++ b/grsecurity/grsec_fork.c
57404@@ -0,0 +1,23 @@
57405+#include <linux/kernel.h>
57406+#include <linux/sched.h>
57407+#include <linux/grsecurity.h>
57408+#include <linux/grinternal.h>
57409+#include <linux/errno.h>
57410+
57411+void
57412+gr_log_forkfail(const int retval)
57413+{
57414+#ifdef CONFIG_GRKERNSEC_FORKFAIL
57415+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
57416+ switch (retval) {
57417+ case -EAGAIN:
57418+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
57419+ break;
57420+ case -ENOMEM:
57421+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
57422+ break;
57423+ }
57424+ }
57425+#endif
57426+ return;
57427+}
57428diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
57429new file mode 100644
57430index 0000000..05a6015
57431--- /dev/null
57432+++ b/grsecurity/grsec_init.c
57433@@ -0,0 +1,283 @@
57434+#include <linux/kernel.h>
57435+#include <linux/sched.h>
57436+#include <linux/mm.h>
57437+#include <linux/gracl.h>
57438+#include <linux/slab.h>
57439+#include <linux/vmalloc.h>
57440+#include <linux/percpu.h>
57441+#include <linux/module.h>
57442+
57443+int grsec_enable_ptrace_readexec;
57444+int grsec_enable_setxid;
57445+int grsec_enable_symlinkown;
57446+int grsec_symlinkown_gid;
57447+int grsec_enable_brute;
57448+int grsec_enable_link;
57449+int grsec_enable_dmesg;
57450+int grsec_enable_harden_ptrace;
57451+int grsec_enable_fifo;
57452+int grsec_enable_execlog;
57453+int grsec_enable_signal;
57454+int grsec_enable_forkfail;
57455+int grsec_enable_audit_ptrace;
57456+int grsec_enable_time;
57457+int grsec_enable_audit_textrel;
57458+int grsec_enable_group;
57459+int grsec_audit_gid;
57460+int grsec_enable_chdir;
57461+int grsec_enable_mount;
57462+int grsec_enable_rofs;
57463+int grsec_enable_chroot_findtask;
57464+int grsec_enable_chroot_mount;
57465+int grsec_enable_chroot_shmat;
57466+int grsec_enable_chroot_fchdir;
57467+int grsec_enable_chroot_double;
57468+int grsec_enable_chroot_pivot;
57469+int grsec_enable_chroot_chdir;
57470+int grsec_enable_chroot_chmod;
57471+int grsec_enable_chroot_mknod;
57472+int grsec_enable_chroot_nice;
57473+int grsec_enable_chroot_execlog;
57474+int grsec_enable_chroot_caps;
57475+int grsec_enable_chroot_sysctl;
57476+int grsec_enable_chroot_unix;
57477+int grsec_enable_tpe;
57478+int grsec_tpe_gid;
57479+int grsec_enable_blackhole;
57480+#ifdef CONFIG_IPV6_MODULE
57481+EXPORT_SYMBOL(grsec_enable_blackhole);
57482+#endif
57483+int grsec_lastack_retries;
57484+int grsec_enable_tpe_all;
57485+int grsec_enable_tpe_invert;
57486+int grsec_enable_socket_all;
57487+int grsec_socket_all_gid;
57488+int grsec_enable_socket_client;
57489+int grsec_socket_client_gid;
57490+int grsec_enable_socket_server;
57491+int grsec_socket_server_gid;
57492+int grsec_resource_logging;
57493+int grsec_disable_privio;
57494+int grsec_enable_log_rwxmaps;
57495+int grsec_lock;
57496+
57497+DEFINE_SPINLOCK(grsec_alert_lock);
57498+unsigned long grsec_alert_wtime = 0;
57499+unsigned long grsec_alert_fyet = 0;
57500+
57501+DEFINE_SPINLOCK(grsec_audit_lock);
57502+
57503+DEFINE_RWLOCK(grsec_exec_file_lock);
57504+
57505+char *gr_shared_page[4];
57506+
57507+char *gr_alert_log_fmt;
57508+char *gr_audit_log_fmt;
57509+char *gr_alert_log_buf;
57510+char *gr_audit_log_buf;
57511+
57512+extern struct gr_arg *gr_usermode;
57513+extern unsigned char *gr_system_salt;
57514+extern unsigned char *gr_system_sum;
57515+
57516+void __init
57517+grsecurity_init(void)
57518+{
57519+ int j;
57520+ /* create the per-cpu shared pages */
57521+
57522+#ifdef CONFIG_X86
57523+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
57524+#endif
57525+
57526+ for (j = 0; j < 4; j++) {
57527+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
57528+ if (gr_shared_page[j] == NULL) {
57529+ panic("Unable to allocate grsecurity shared page");
57530+ return;
57531+ }
57532+ }
57533+
57534+ /* allocate log buffers */
57535+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
57536+ if (!gr_alert_log_fmt) {
57537+ panic("Unable to allocate grsecurity alert log format buffer");
57538+ return;
57539+ }
57540+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
57541+ if (!gr_audit_log_fmt) {
57542+ panic("Unable to allocate grsecurity audit log format buffer");
57543+ return;
57544+ }
57545+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57546+ if (!gr_alert_log_buf) {
57547+ panic("Unable to allocate grsecurity alert log buffer");
57548+ return;
57549+ }
57550+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57551+ if (!gr_audit_log_buf) {
57552+ panic("Unable to allocate grsecurity audit log buffer");
57553+ return;
57554+ }
57555+
57556+ /* allocate memory for authentication structure */
57557+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
57558+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
57559+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
57560+
57561+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
57562+ panic("Unable to allocate grsecurity authentication structure");
57563+ return;
57564+ }
57565+
57566+
57567+#ifdef CONFIG_GRKERNSEC_IO
57568+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
57569+ grsec_disable_privio = 1;
57570+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57571+ grsec_disable_privio = 1;
57572+#else
57573+ grsec_disable_privio = 0;
57574+#endif
57575+#endif
57576+
57577+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57578+ /* for backward compatibility, tpe_invert always defaults to on if
57579+ enabled in the kernel
57580+ */
57581+ grsec_enable_tpe_invert = 1;
57582+#endif
57583+
57584+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57585+#ifndef CONFIG_GRKERNSEC_SYSCTL
57586+ grsec_lock = 1;
57587+#endif
57588+
57589+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57590+ grsec_enable_audit_textrel = 1;
57591+#endif
57592+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57593+ grsec_enable_log_rwxmaps = 1;
57594+#endif
57595+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
57596+ grsec_enable_group = 1;
57597+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
57598+#endif
57599+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57600+ grsec_enable_ptrace_readexec = 1;
57601+#endif
57602+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57603+ grsec_enable_chdir = 1;
57604+#endif
57605+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57606+ grsec_enable_harden_ptrace = 1;
57607+#endif
57608+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57609+ grsec_enable_mount = 1;
57610+#endif
57611+#ifdef CONFIG_GRKERNSEC_LINK
57612+ grsec_enable_link = 1;
57613+#endif
57614+#ifdef CONFIG_GRKERNSEC_BRUTE
57615+ grsec_enable_brute = 1;
57616+#endif
57617+#ifdef CONFIG_GRKERNSEC_DMESG
57618+ grsec_enable_dmesg = 1;
57619+#endif
57620+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
57621+ grsec_enable_blackhole = 1;
57622+ grsec_lastack_retries = 4;
57623+#endif
57624+#ifdef CONFIG_GRKERNSEC_FIFO
57625+ grsec_enable_fifo = 1;
57626+#endif
57627+#ifdef CONFIG_GRKERNSEC_EXECLOG
57628+ grsec_enable_execlog = 1;
57629+#endif
57630+#ifdef CONFIG_GRKERNSEC_SETXID
57631+ grsec_enable_setxid = 1;
57632+#endif
57633+#ifdef CONFIG_GRKERNSEC_SIGNAL
57634+ grsec_enable_signal = 1;
57635+#endif
57636+#ifdef CONFIG_GRKERNSEC_FORKFAIL
57637+ grsec_enable_forkfail = 1;
57638+#endif
57639+#ifdef CONFIG_GRKERNSEC_TIME
57640+ grsec_enable_time = 1;
57641+#endif
57642+#ifdef CONFIG_GRKERNSEC_RESLOG
57643+ grsec_resource_logging = 1;
57644+#endif
57645+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57646+ grsec_enable_chroot_findtask = 1;
57647+#endif
57648+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57649+ grsec_enable_chroot_unix = 1;
57650+#endif
57651+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57652+ grsec_enable_chroot_mount = 1;
57653+#endif
57654+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57655+ grsec_enable_chroot_fchdir = 1;
57656+#endif
57657+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57658+ grsec_enable_chroot_shmat = 1;
57659+#endif
57660+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57661+ grsec_enable_audit_ptrace = 1;
57662+#endif
57663+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57664+ grsec_enable_chroot_double = 1;
57665+#endif
57666+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57667+ grsec_enable_chroot_pivot = 1;
57668+#endif
57669+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57670+ grsec_enable_chroot_chdir = 1;
57671+#endif
57672+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57673+ grsec_enable_chroot_chmod = 1;
57674+#endif
57675+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57676+ grsec_enable_chroot_mknod = 1;
57677+#endif
57678+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57679+ grsec_enable_chroot_nice = 1;
57680+#endif
57681+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57682+ grsec_enable_chroot_execlog = 1;
57683+#endif
57684+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57685+ grsec_enable_chroot_caps = 1;
57686+#endif
57687+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57688+ grsec_enable_chroot_sysctl = 1;
57689+#endif
57690+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
57691+ grsec_enable_symlinkown = 1;
57692+ grsec_symlinkown_gid = CONFIG_GRKERNSEC_SYMLINKOWN_GID;
57693+#endif
57694+#ifdef CONFIG_GRKERNSEC_TPE
57695+ grsec_enable_tpe = 1;
57696+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
57697+#ifdef CONFIG_GRKERNSEC_TPE_ALL
57698+ grsec_enable_tpe_all = 1;
57699+#endif
57700+#endif
57701+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
57702+ grsec_enable_socket_all = 1;
57703+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
57704+#endif
57705+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
57706+ grsec_enable_socket_client = 1;
57707+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
57708+#endif
57709+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57710+ grsec_enable_socket_server = 1;
57711+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
57712+#endif
57713+#endif
57714+
57715+ return;
57716+}
57717diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
57718new file mode 100644
57719index 0000000..35a96d1
57720--- /dev/null
57721+++ b/grsecurity/grsec_link.c
57722@@ -0,0 +1,59 @@
57723+#include <linux/kernel.h>
57724+#include <linux/sched.h>
57725+#include <linux/fs.h>
57726+#include <linux/file.h>
57727+#include <linux/grinternal.h>
57728+
57729+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
57730+{
57731+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
57732+ const struct inode *link_inode = link->dentry->d_inode;
57733+
57734+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
57735+ /* ignore root-owned links, e.g. /proc/self */
57736+ link_inode->i_uid &&
57737+ link_inode->i_uid != target->i_uid) {
57738+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
57739+ return 1;
57740+ }
57741+#endif
57742+ return 0;
57743+}
57744+
57745+int
57746+gr_handle_follow_link(const struct inode *parent,
57747+ const struct inode *inode,
57748+ const struct dentry *dentry, const struct vfsmount *mnt)
57749+{
57750+#ifdef CONFIG_GRKERNSEC_LINK
57751+ const struct cred *cred = current_cred();
57752+
57753+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
57754+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
57755+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
57756+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
57757+ return -EACCES;
57758+ }
57759+#endif
57760+ return 0;
57761+}
57762+
57763+int
57764+gr_handle_hardlink(const struct dentry *dentry,
57765+ const struct vfsmount *mnt,
57766+ struct inode *inode, const int mode, const char *to)
57767+{
57768+#ifdef CONFIG_GRKERNSEC_LINK
57769+ const struct cred *cred = current_cred();
57770+
57771+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
57772+ (!S_ISREG(mode) || (mode & S_ISUID) ||
57773+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
57774+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
57775+ !capable(CAP_FOWNER) && cred->uid) {
57776+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
57777+ return -EPERM;
57778+ }
57779+#endif
57780+ return 0;
57781+}
57782diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
57783new file mode 100644
57784index 0000000..a45d2e9
57785--- /dev/null
57786+++ b/grsecurity/grsec_log.c
57787@@ -0,0 +1,322 @@
57788+#include <linux/kernel.h>
57789+#include <linux/sched.h>
57790+#include <linux/file.h>
57791+#include <linux/tty.h>
57792+#include <linux/fs.h>
57793+#include <linux/grinternal.h>
57794+
57795+#ifdef CONFIG_TREE_PREEMPT_RCU
57796+#define DISABLE_PREEMPT() preempt_disable()
57797+#define ENABLE_PREEMPT() preempt_enable()
57798+#else
57799+#define DISABLE_PREEMPT()
57800+#define ENABLE_PREEMPT()
57801+#endif
57802+
57803+#define BEGIN_LOCKS(x) \
57804+ DISABLE_PREEMPT(); \
57805+ rcu_read_lock(); \
57806+ read_lock(&tasklist_lock); \
57807+ read_lock(&grsec_exec_file_lock); \
57808+ if (x != GR_DO_AUDIT) \
57809+ spin_lock(&grsec_alert_lock); \
57810+ else \
57811+ spin_lock(&grsec_audit_lock)
57812+
57813+#define END_LOCKS(x) \
57814+ if (x != GR_DO_AUDIT) \
57815+ spin_unlock(&grsec_alert_lock); \
57816+ else \
57817+ spin_unlock(&grsec_audit_lock); \
57818+ read_unlock(&grsec_exec_file_lock); \
57819+ read_unlock(&tasklist_lock); \
57820+ rcu_read_unlock(); \
57821+ ENABLE_PREEMPT(); \
57822+ if (x == GR_DONT_AUDIT) \
57823+ gr_handle_alertkill(current)
57824+
57825+enum {
57826+ FLOODING,
57827+ NO_FLOODING
57828+};
57829+
57830+extern char *gr_alert_log_fmt;
57831+extern char *gr_audit_log_fmt;
57832+extern char *gr_alert_log_buf;
57833+extern char *gr_audit_log_buf;
57834+
57835+static int gr_log_start(int audit)
57836+{
57837+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
57838+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
57839+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57840+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
57841+ unsigned long curr_secs = get_seconds();
57842+
57843+ if (audit == GR_DO_AUDIT)
57844+ goto set_fmt;
57845+
57846+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
57847+ grsec_alert_wtime = curr_secs;
57848+ grsec_alert_fyet = 0;
57849+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
57850+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
57851+ grsec_alert_fyet++;
57852+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
57853+ grsec_alert_wtime = curr_secs;
57854+ grsec_alert_fyet++;
57855+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
57856+ return FLOODING;
57857+ }
57858+ else return FLOODING;
57859+
57860+set_fmt:
57861+#endif
57862+ memset(buf, 0, PAGE_SIZE);
57863+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
57864+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
57865+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57866+ } else if (current->signal->curr_ip) {
57867+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
57868+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
57869+ } else if (gr_acl_is_enabled()) {
57870+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
57871+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57872+ } else {
57873+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
57874+ strcpy(buf, fmt);
57875+ }
57876+
57877+ return NO_FLOODING;
57878+}
57879+
57880+static void gr_log_middle(int audit, const char *msg, va_list ap)
57881+ __attribute__ ((format (printf, 2, 0)));
57882+
57883+static void gr_log_middle(int audit, const char *msg, va_list ap)
57884+{
57885+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57886+ unsigned int len = strlen(buf);
57887+
57888+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57889+
57890+ return;
57891+}
57892+
57893+static void gr_log_middle_varargs(int audit, const char *msg, ...)
57894+ __attribute__ ((format (printf, 2, 3)));
57895+
57896+static void gr_log_middle_varargs(int audit, const char *msg, ...)
57897+{
57898+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57899+ unsigned int len = strlen(buf);
57900+ va_list ap;
57901+
57902+ va_start(ap, msg);
57903+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57904+ va_end(ap);
57905+
57906+ return;
57907+}
57908+
57909+static void gr_log_end(int audit, int append_default)
57910+{
57911+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57912+
57913+ if (append_default) {
57914+ unsigned int len = strlen(buf);
57915+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
57916+ }
57917+
57918+ printk("%s\n", buf);
57919+
57920+ return;
57921+}
57922+
57923+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
57924+{
57925+ int logtype;
57926+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
57927+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
57928+ void *voidptr = NULL;
57929+ int num1 = 0, num2 = 0;
57930+ unsigned long ulong1 = 0, ulong2 = 0;
57931+ struct dentry *dentry = NULL;
57932+ struct vfsmount *mnt = NULL;
57933+ struct file *file = NULL;
57934+ struct task_struct *task = NULL;
57935+ const struct cred *cred, *pcred;
57936+ va_list ap;
57937+
57938+ BEGIN_LOCKS(audit);
57939+ logtype = gr_log_start(audit);
57940+ if (logtype == FLOODING) {
57941+ END_LOCKS(audit);
57942+ return;
57943+ }
57944+ va_start(ap, argtypes);
57945+ switch (argtypes) {
57946+ case GR_TTYSNIFF:
57947+ task = va_arg(ap, struct task_struct *);
57948+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
57949+ break;
57950+ case GR_SYSCTL_HIDDEN:
57951+ str1 = va_arg(ap, char *);
57952+ gr_log_middle_varargs(audit, msg, result, str1);
57953+ break;
57954+ case GR_RBAC:
57955+ dentry = va_arg(ap, struct dentry *);
57956+ mnt = va_arg(ap, struct vfsmount *);
57957+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
57958+ break;
57959+ case GR_RBAC_STR:
57960+ dentry = va_arg(ap, struct dentry *);
57961+ mnt = va_arg(ap, struct vfsmount *);
57962+ str1 = va_arg(ap, char *);
57963+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
57964+ break;
57965+ case GR_STR_RBAC:
57966+ str1 = va_arg(ap, char *);
57967+ dentry = va_arg(ap, struct dentry *);
57968+ mnt = va_arg(ap, struct vfsmount *);
57969+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
57970+ break;
57971+ case GR_RBAC_MODE2:
57972+ dentry = va_arg(ap, struct dentry *);
57973+ mnt = va_arg(ap, struct vfsmount *);
57974+ str1 = va_arg(ap, char *);
57975+ str2 = va_arg(ap, char *);
57976+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
57977+ break;
57978+ case GR_RBAC_MODE3:
57979+ dentry = va_arg(ap, struct dentry *);
57980+ mnt = va_arg(ap, struct vfsmount *);
57981+ str1 = va_arg(ap, char *);
57982+ str2 = va_arg(ap, char *);
57983+ str3 = va_arg(ap, char *);
57984+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
57985+ break;
57986+ case GR_FILENAME:
57987+ dentry = va_arg(ap, struct dentry *);
57988+ mnt = va_arg(ap, struct vfsmount *);
57989+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
57990+ break;
57991+ case GR_STR_FILENAME:
57992+ str1 = va_arg(ap, char *);
57993+ dentry = va_arg(ap, struct dentry *);
57994+ mnt = va_arg(ap, struct vfsmount *);
57995+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
57996+ break;
57997+ case GR_FILENAME_STR:
57998+ dentry = va_arg(ap, struct dentry *);
57999+ mnt = va_arg(ap, struct vfsmount *);
58000+ str1 = va_arg(ap, char *);
58001+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
58002+ break;
58003+ case GR_FILENAME_TWO_INT:
58004+ dentry = va_arg(ap, struct dentry *);
58005+ mnt = va_arg(ap, struct vfsmount *);
58006+ num1 = va_arg(ap, int);
58007+ num2 = va_arg(ap, int);
58008+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
58009+ break;
58010+ case GR_FILENAME_TWO_INT_STR:
58011+ dentry = va_arg(ap, struct dentry *);
58012+ mnt = va_arg(ap, struct vfsmount *);
58013+ num1 = va_arg(ap, int);
58014+ num2 = va_arg(ap, int);
58015+ str1 = va_arg(ap, char *);
58016+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
58017+ break;
58018+ case GR_TEXTREL:
58019+ file = va_arg(ap, struct file *);
58020+ ulong1 = va_arg(ap, unsigned long);
58021+ ulong2 = va_arg(ap, unsigned long);
58022+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
58023+ break;
58024+ case GR_PTRACE:
58025+ task = va_arg(ap, struct task_struct *);
58026+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
58027+ break;
58028+ case GR_RESOURCE:
58029+ task = va_arg(ap, struct task_struct *);
58030+ cred = __task_cred(task);
58031+ pcred = __task_cred(task->real_parent);
58032+ ulong1 = va_arg(ap, unsigned long);
58033+ str1 = va_arg(ap, char *);
58034+ ulong2 = va_arg(ap, unsigned long);
58035+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58036+ break;
58037+ case GR_CAP:
58038+ task = va_arg(ap, struct task_struct *);
58039+ cred = __task_cred(task);
58040+ pcred = __task_cred(task->real_parent);
58041+ str1 = va_arg(ap, char *);
58042+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58043+ break;
58044+ case GR_SIG:
58045+ str1 = va_arg(ap, char *);
58046+ voidptr = va_arg(ap, void *);
58047+ gr_log_middle_varargs(audit, msg, str1, voidptr);
58048+ break;
58049+ case GR_SIG2:
58050+ task = va_arg(ap, struct task_struct *);
58051+ cred = __task_cred(task);
58052+ pcred = __task_cred(task->real_parent);
58053+ num1 = va_arg(ap, int);
58054+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58055+ break;
58056+ case GR_CRASH1:
58057+ task = va_arg(ap, struct task_struct *);
58058+ cred = __task_cred(task);
58059+ pcred = __task_cred(task->real_parent);
58060+ ulong1 = va_arg(ap, unsigned long);
58061+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
58062+ break;
58063+ case GR_CRASH2:
58064+ task = va_arg(ap, struct task_struct *);
58065+ cred = __task_cred(task);
58066+ pcred = __task_cred(task->real_parent);
58067+ ulong1 = va_arg(ap, unsigned long);
58068+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
58069+ break;
58070+ case GR_RWXMAP:
58071+ file = va_arg(ap, struct file *);
58072+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
58073+ break;
58074+ case GR_PSACCT:
58075+ {
58076+ unsigned int wday, cday;
58077+ __u8 whr, chr;
58078+ __u8 wmin, cmin;
58079+ __u8 wsec, csec;
58080+ char cur_tty[64] = { 0 };
58081+ char parent_tty[64] = { 0 };
58082+
58083+ task = va_arg(ap, struct task_struct *);
58084+ wday = va_arg(ap, unsigned int);
58085+ cday = va_arg(ap, unsigned int);
58086+ whr = va_arg(ap, int);
58087+ chr = va_arg(ap, int);
58088+ wmin = va_arg(ap, int);
58089+ cmin = va_arg(ap, int);
58090+ wsec = va_arg(ap, int);
58091+ csec = va_arg(ap, int);
58092+ ulong1 = va_arg(ap, unsigned long);
58093+ cred = __task_cred(task);
58094+ pcred = __task_cred(task->real_parent);
58095+
58096+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58097+ }
58098+ break;
58099+ default:
58100+ gr_log_middle(audit, msg, ap);
58101+ }
58102+ va_end(ap);
58103+ // these don't need DEFAULTSECARGS printed on the end
58104+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
58105+ gr_log_end(audit, 0);
58106+ else
58107+ gr_log_end(audit, 1);
58108+ END_LOCKS(audit);
58109+}
58110diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
58111new file mode 100644
58112index 0000000..f536303
58113--- /dev/null
58114+++ b/grsecurity/grsec_mem.c
58115@@ -0,0 +1,40 @@
58116+#include <linux/kernel.h>
58117+#include <linux/sched.h>
58118+#include <linux/mm.h>
58119+#include <linux/mman.h>
58120+#include <linux/grinternal.h>
58121+
58122+void
58123+gr_handle_ioperm(void)
58124+{
58125+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
58126+ return;
58127+}
58128+
58129+void
58130+gr_handle_iopl(void)
58131+{
58132+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
58133+ return;
58134+}
58135+
58136+void
58137+gr_handle_mem_readwrite(u64 from, u64 to)
58138+{
58139+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
58140+ return;
58141+}
58142+
58143+void
58144+gr_handle_vm86(void)
58145+{
58146+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
58147+ return;
58148+}
58149+
58150+void
58151+gr_log_badprocpid(const char *entry)
58152+{
58153+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
58154+ return;
58155+}
58156diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
58157new file mode 100644
58158index 0000000..2131422
58159--- /dev/null
58160+++ b/grsecurity/grsec_mount.c
58161@@ -0,0 +1,62 @@
58162+#include <linux/kernel.h>
58163+#include <linux/sched.h>
58164+#include <linux/mount.h>
58165+#include <linux/grsecurity.h>
58166+#include <linux/grinternal.h>
58167+
58168+void
58169+gr_log_remount(const char *devname, const int retval)
58170+{
58171+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58172+ if (grsec_enable_mount && (retval >= 0))
58173+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
58174+#endif
58175+ return;
58176+}
58177+
58178+void
58179+gr_log_unmount(const char *devname, const int retval)
58180+{
58181+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58182+ if (grsec_enable_mount && (retval >= 0))
58183+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
58184+#endif
58185+ return;
58186+}
58187+
58188+void
58189+gr_log_mount(const char *from, const char *to, const int retval)
58190+{
58191+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58192+ if (grsec_enable_mount && (retval >= 0))
58193+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
58194+#endif
58195+ return;
58196+}
58197+
58198+int
58199+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
58200+{
58201+#ifdef CONFIG_GRKERNSEC_ROFS
58202+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
58203+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
58204+ return -EPERM;
58205+ } else
58206+ return 0;
58207+#endif
58208+ return 0;
58209+}
58210+
58211+int
58212+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
58213+{
58214+#ifdef CONFIG_GRKERNSEC_ROFS
58215+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
58216+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
58217+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
58218+ return -EPERM;
58219+ } else
58220+ return 0;
58221+#endif
58222+ return 0;
58223+}
58224diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
58225new file mode 100644
58226index 0000000..a3b12a0
58227--- /dev/null
58228+++ b/grsecurity/grsec_pax.c
58229@@ -0,0 +1,36 @@
58230+#include <linux/kernel.h>
58231+#include <linux/sched.h>
58232+#include <linux/mm.h>
58233+#include <linux/file.h>
58234+#include <linux/grinternal.h>
58235+#include <linux/grsecurity.h>
58236+
58237+void
58238+gr_log_textrel(struct vm_area_struct * vma)
58239+{
58240+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58241+ if (grsec_enable_audit_textrel)
58242+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
58243+#endif
58244+ return;
58245+}
58246+
58247+void
58248+gr_log_rwxmmap(struct file *file)
58249+{
58250+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58251+ if (grsec_enable_log_rwxmaps)
58252+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
58253+#endif
58254+ return;
58255+}
58256+
58257+void
58258+gr_log_rwxmprotect(struct file *file)
58259+{
58260+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58261+ if (grsec_enable_log_rwxmaps)
58262+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
58263+#endif
58264+ return;
58265+}
58266diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
58267new file mode 100644
58268index 0000000..f7f29aa
58269--- /dev/null
58270+++ b/grsecurity/grsec_ptrace.c
58271@@ -0,0 +1,30 @@
58272+#include <linux/kernel.h>
58273+#include <linux/sched.h>
58274+#include <linux/grinternal.h>
58275+#include <linux/security.h>
58276+
58277+void
58278+gr_audit_ptrace(struct task_struct *task)
58279+{
58280+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58281+ if (grsec_enable_audit_ptrace)
58282+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
58283+#endif
58284+ return;
58285+}
58286+
58287+int
58288+gr_ptrace_readexec(struct file *file, int unsafe_flags)
58289+{
58290+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58291+ const struct dentry *dentry = file->f_path.dentry;
58292+ const struct vfsmount *mnt = file->f_path.mnt;
58293+
58294+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
58295+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
58296+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
58297+ return -EACCES;
58298+ }
58299+#endif
58300+ return 0;
58301+}
58302diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
58303new file mode 100644
58304index 0000000..7a5b2de
58305--- /dev/null
58306+++ b/grsecurity/grsec_sig.c
58307@@ -0,0 +1,207 @@
58308+#include <linux/kernel.h>
58309+#include <linux/sched.h>
58310+#include <linux/delay.h>
58311+#include <linux/grsecurity.h>
58312+#include <linux/grinternal.h>
58313+#include <linux/hardirq.h>
58314+
58315+char *signames[] = {
58316+ [SIGSEGV] = "Segmentation fault",
58317+ [SIGILL] = "Illegal instruction",
58318+ [SIGABRT] = "Abort",
58319+ [SIGBUS] = "Invalid alignment/Bus error"
58320+};
58321+
58322+void
58323+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
58324+{
58325+#ifdef CONFIG_GRKERNSEC_SIGNAL
58326+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
58327+ (sig == SIGABRT) || (sig == SIGBUS))) {
58328+ if (t->pid == current->pid) {
58329+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
58330+ } else {
58331+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
58332+ }
58333+ }
58334+#endif
58335+ return;
58336+}
58337+
58338+int
58339+gr_handle_signal(const struct task_struct *p, const int sig)
58340+{
58341+#ifdef CONFIG_GRKERNSEC
58342+ /* ignore the 0 signal for protected task checks */
58343+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
58344+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
58345+ return -EPERM;
58346+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
58347+ return -EPERM;
58348+ }
58349+#endif
58350+ return 0;
58351+}
58352+
58353+#ifdef CONFIG_GRKERNSEC
58354+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
58355+
58356+int gr_fake_force_sig(int sig, struct task_struct *t)
58357+{
58358+ unsigned long int flags;
58359+ int ret, blocked, ignored;
58360+ struct k_sigaction *action;
58361+
58362+ spin_lock_irqsave(&t->sighand->siglock, flags);
58363+ action = &t->sighand->action[sig-1];
58364+ ignored = action->sa.sa_handler == SIG_IGN;
58365+ blocked = sigismember(&t->blocked, sig);
58366+ if (blocked || ignored) {
58367+ action->sa.sa_handler = SIG_DFL;
58368+ if (blocked) {
58369+ sigdelset(&t->blocked, sig);
58370+ recalc_sigpending_and_wake(t);
58371+ }
58372+ }
58373+ if (action->sa.sa_handler == SIG_DFL)
58374+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
58375+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
58376+
58377+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
58378+
58379+ return ret;
58380+}
58381+#endif
58382+
58383+#ifdef CONFIG_GRKERNSEC_BRUTE
58384+#define GR_USER_BAN_TIME (15 * 60)
58385+
58386+static int __get_dumpable(unsigned long mm_flags)
58387+{
58388+ int ret;
58389+
58390+ ret = mm_flags & MMF_DUMPABLE_MASK;
58391+ return (ret >= 2) ? 2 : ret;
58392+}
58393+#endif
58394+
58395+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
58396+{
58397+#ifdef CONFIG_GRKERNSEC_BRUTE
58398+ uid_t uid = 0;
58399+
58400+ if (!grsec_enable_brute)
58401+ return;
58402+
58403+ rcu_read_lock();
58404+ read_lock(&tasklist_lock);
58405+ read_lock(&grsec_exec_file_lock);
58406+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
58407+ p->real_parent->brute = 1;
58408+ else {
58409+ const struct cred *cred = __task_cred(p), *cred2;
58410+ struct task_struct *tsk, *tsk2;
58411+
58412+ if (!__get_dumpable(mm_flags) && cred->uid) {
58413+ struct user_struct *user;
58414+
58415+ uid = cred->uid;
58416+
58417+ /* this is put upon execution past expiration */
58418+ user = find_user(uid);
58419+ if (user == NULL)
58420+ goto unlock;
58421+ user->banned = 1;
58422+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
58423+ if (user->ban_expires == ~0UL)
58424+ user->ban_expires--;
58425+
58426+ do_each_thread(tsk2, tsk) {
58427+ cred2 = __task_cred(tsk);
58428+ if (tsk != p && cred2->uid == uid)
58429+ gr_fake_force_sig(SIGKILL, tsk);
58430+ } while_each_thread(tsk2, tsk);
58431+ }
58432+ }
58433+unlock:
58434+ read_unlock(&grsec_exec_file_lock);
58435+ read_unlock(&tasklist_lock);
58436+ rcu_read_unlock();
58437+
58438+ if (uid)
58439+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
58440+
58441+#endif
58442+ return;
58443+}
58444+
58445+void gr_handle_brute_check(void)
58446+{
58447+#ifdef CONFIG_GRKERNSEC_BRUTE
58448+ if (current->brute)
58449+ msleep(30 * 1000);
58450+#endif
58451+ return;
58452+}
58453+
58454+void gr_handle_kernel_exploit(void)
58455+{
58456+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
58457+ const struct cred *cred;
58458+ struct task_struct *tsk, *tsk2;
58459+ struct user_struct *user;
58460+ uid_t uid;
58461+
58462+ if (in_irq() || in_serving_softirq() || in_nmi())
58463+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
58464+
58465+ uid = current_uid();
58466+
58467+ if (uid == 0)
58468+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
58469+ else {
58470+ /* kill all the processes of this user, hold a reference
58471+ to their creds struct, and prevent them from creating
58472+ another process until system reset
58473+ */
58474+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
58475+ /* we intentionally leak this ref */
58476+ user = get_uid(current->cred->user);
58477+ if (user) {
58478+ user->banned = 1;
58479+ user->ban_expires = ~0UL;
58480+ }
58481+
58482+ read_lock(&tasklist_lock);
58483+ do_each_thread(tsk2, tsk) {
58484+ cred = __task_cred(tsk);
58485+ if (cred->uid == uid)
58486+ gr_fake_force_sig(SIGKILL, tsk);
58487+ } while_each_thread(tsk2, tsk);
58488+ read_unlock(&tasklist_lock);
58489+ }
58490+#endif
58491+}
58492+
58493+int __gr_process_user_ban(struct user_struct *user)
58494+{
58495+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58496+ if (unlikely(user->banned)) {
58497+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
58498+ user->banned = 0;
58499+ user->ban_expires = 0;
58500+ free_uid(user);
58501+ } else
58502+ return -EPERM;
58503+ }
58504+#endif
58505+ return 0;
58506+}
58507+
58508+int gr_process_user_ban(void)
58509+{
58510+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58511+ return __gr_process_user_ban(current->cred->user);
58512+#endif
58513+ return 0;
58514+}
58515diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
58516new file mode 100644
58517index 0000000..4030d57
58518--- /dev/null
58519+++ b/grsecurity/grsec_sock.c
58520@@ -0,0 +1,244 @@
58521+#include <linux/kernel.h>
58522+#include <linux/module.h>
58523+#include <linux/sched.h>
58524+#include <linux/file.h>
58525+#include <linux/net.h>
58526+#include <linux/in.h>
58527+#include <linux/ip.h>
58528+#include <net/sock.h>
58529+#include <net/inet_sock.h>
58530+#include <linux/grsecurity.h>
58531+#include <linux/grinternal.h>
58532+#include <linux/gracl.h>
58533+
58534+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
58535+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
58536+
58537+EXPORT_SYMBOL(gr_search_udp_recvmsg);
58538+EXPORT_SYMBOL(gr_search_udp_sendmsg);
58539+
58540+#ifdef CONFIG_UNIX_MODULE
58541+EXPORT_SYMBOL(gr_acl_handle_unix);
58542+EXPORT_SYMBOL(gr_acl_handle_mknod);
58543+EXPORT_SYMBOL(gr_handle_chroot_unix);
58544+EXPORT_SYMBOL(gr_handle_create);
58545+#endif
58546+
58547+#ifdef CONFIG_GRKERNSEC
58548+#define gr_conn_table_size 32749
58549+struct conn_table_entry {
58550+ struct conn_table_entry *next;
58551+ struct signal_struct *sig;
58552+};
58553+
58554+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
58555+DEFINE_SPINLOCK(gr_conn_table_lock);
58556+
58557+extern const char * gr_socktype_to_name(unsigned char type);
58558+extern const char * gr_proto_to_name(unsigned char proto);
58559+extern const char * gr_sockfamily_to_name(unsigned char family);
58560+
58561+static __inline__ int
58562+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
58563+{
58564+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
58565+}
58566+
58567+static __inline__ int
58568+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
58569+ __u16 sport, __u16 dport)
58570+{
58571+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
58572+ sig->gr_sport == sport && sig->gr_dport == dport))
58573+ return 1;
58574+ else
58575+ return 0;
58576+}
58577+
58578+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
58579+{
58580+ struct conn_table_entry **match;
58581+ unsigned int index;
58582+
58583+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58584+ sig->gr_sport, sig->gr_dport,
58585+ gr_conn_table_size);
58586+
58587+ newent->sig = sig;
58588+
58589+ match = &gr_conn_table[index];
58590+ newent->next = *match;
58591+ *match = newent;
58592+
58593+ return;
58594+}
58595+
58596+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
58597+{
58598+ struct conn_table_entry *match, *last = NULL;
58599+ unsigned int index;
58600+
58601+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58602+ sig->gr_sport, sig->gr_dport,
58603+ gr_conn_table_size);
58604+
58605+ match = gr_conn_table[index];
58606+ while (match && !conn_match(match->sig,
58607+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
58608+ sig->gr_dport)) {
58609+ last = match;
58610+ match = match->next;
58611+ }
58612+
58613+ if (match) {
58614+ if (last)
58615+ last->next = match->next;
58616+ else
58617+ gr_conn_table[index] = NULL;
58618+ kfree(match);
58619+ }
58620+
58621+ return;
58622+}
58623+
58624+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
58625+ __u16 sport, __u16 dport)
58626+{
58627+ struct conn_table_entry *match;
58628+ unsigned int index;
58629+
58630+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
58631+
58632+ match = gr_conn_table[index];
58633+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
58634+ match = match->next;
58635+
58636+ if (match)
58637+ return match->sig;
58638+ else
58639+ return NULL;
58640+}
58641+
58642+#endif
58643+
58644+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
58645+{
58646+#ifdef CONFIG_GRKERNSEC
58647+ struct signal_struct *sig = task->signal;
58648+ struct conn_table_entry *newent;
58649+
58650+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
58651+ if (newent == NULL)
58652+ return;
58653+ /* no bh lock needed since we are called with bh disabled */
58654+ spin_lock(&gr_conn_table_lock);
58655+ gr_del_task_from_ip_table_nolock(sig);
58656+ sig->gr_saddr = inet->inet_rcv_saddr;
58657+ sig->gr_daddr = inet->inet_daddr;
58658+ sig->gr_sport = inet->inet_sport;
58659+ sig->gr_dport = inet->inet_dport;
58660+ gr_add_to_task_ip_table_nolock(sig, newent);
58661+ spin_unlock(&gr_conn_table_lock);
58662+#endif
58663+ return;
58664+}
58665+
58666+void gr_del_task_from_ip_table(struct task_struct *task)
58667+{
58668+#ifdef CONFIG_GRKERNSEC
58669+ spin_lock_bh(&gr_conn_table_lock);
58670+ gr_del_task_from_ip_table_nolock(task->signal);
58671+ spin_unlock_bh(&gr_conn_table_lock);
58672+#endif
58673+ return;
58674+}
58675+
58676+void
58677+gr_attach_curr_ip(const struct sock *sk)
58678+{
58679+#ifdef CONFIG_GRKERNSEC
58680+ struct signal_struct *p, *set;
58681+ const struct inet_sock *inet = inet_sk(sk);
58682+
58683+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
58684+ return;
58685+
58686+ set = current->signal;
58687+
58688+ spin_lock_bh(&gr_conn_table_lock);
58689+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
58690+ inet->inet_dport, inet->inet_sport);
58691+ if (unlikely(p != NULL)) {
58692+ set->curr_ip = p->curr_ip;
58693+ set->used_accept = 1;
58694+ gr_del_task_from_ip_table_nolock(p);
58695+ spin_unlock_bh(&gr_conn_table_lock);
58696+ return;
58697+ }
58698+ spin_unlock_bh(&gr_conn_table_lock);
58699+
58700+ set->curr_ip = inet->inet_daddr;
58701+ set->used_accept = 1;
58702+#endif
58703+ return;
58704+}
58705+
58706+int
58707+gr_handle_sock_all(const int family, const int type, const int protocol)
58708+{
58709+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58710+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
58711+ (family != AF_UNIX)) {
58712+ if (family == AF_INET)
58713+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
58714+ else
58715+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
58716+ return -EACCES;
58717+ }
58718+#endif
58719+ return 0;
58720+}
58721+
58722+int
58723+gr_handle_sock_server(const struct sockaddr *sck)
58724+{
58725+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58726+ if (grsec_enable_socket_server &&
58727+ in_group_p(grsec_socket_server_gid) &&
58728+ sck && (sck->sa_family != AF_UNIX) &&
58729+ (sck->sa_family != AF_LOCAL)) {
58730+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58731+ return -EACCES;
58732+ }
58733+#endif
58734+ return 0;
58735+}
58736+
58737+int
58738+gr_handle_sock_server_other(const struct sock *sck)
58739+{
58740+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58741+ if (grsec_enable_socket_server &&
58742+ in_group_p(grsec_socket_server_gid) &&
58743+ sck && (sck->sk_family != AF_UNIX) &&
58744+ (sck->sk_family != AF_LOCAL)) {
58745+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58746+ return -EACCES;
58747+ }
58748+#endif
58749+ return 0;
58750+}
58751+
58752+int
58753+gr_handle_sock_client(const struct sockaddr *sck)
58754+{
58755+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58756+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
58757+ sck && (sck->sa_family != AF_UNIX) &&
58758+ (sck->sa_family != AF_LOCAL)) {
58759+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
58760+ return -EACCES;
58761+ }
58762+#endif
58763+ return 0;
58764+}
58765diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
58766new file mode 100644
58767index 0000000..f55ef0f
58768--- /dev/null
58769+++ b/grsecurity/grsec_sysctl.c
58770@@ -0,0 +1,469 @@
58771+#include <linux/kernel.h>
58772+#include <linux/sched.h>
58773+#include <linux/sysctl.h>
58774+#include <linux/grsecurity.h>
58775+#include <linux/grinternal.h>
58776+
58777+int
58778+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
58779+{
58780+#ifdef CONFIG_GRKERNSEC_SYSCTL
58781+ if (dirname == NULL || name == NULL)
58782+ return 0;
58783+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
58784+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
58785+ return -EACCES;
58786+ }
58787+#endif
58788+ return 0;
58789+}
58790+
58791+#ifdef CONFIG_GRKERNSEC_ROFS
58792+static int __maybe_unused one = 1;
58793+#endif
58794+
58795+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
58796+struct ctl_table grsecurity_table[] = {
58797+#ifdef CONFIG_GRKERNSEC_SYSCTL
58798+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
58799+#ifdef CONFIG_GRKERNSEC_IO
58800+ {
58801+ .procname = "disable_priv_io",
58802+ .data = &grsec_disable_privio,
58803+ .maxlen = sizeof(int),
58804+ .mode = 0600,
58805+ .proc_handler = &proc_dointvec,
58806+ },
58807+#endif
58808+#endif
58809+#ifdef CONFIG_GRKERNSEC_LINK
58810+ {
58811+ .procname = "linking_restrictions",
58812+ .data = &grsec_enable_link,
58813+ .maxlen = sizeof(int),
58814+ .mode = 0600,
58815+ .proc_handler = &proc_dointvec,
58816+ },
58817+#endif
58818+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
58819+ {
58820+ .procname = "enforce_symlinksifowner",
58821+ .data = &grsec_enable_symlinkown,
58822+ .maxlen = sizeof(int),
58823+ .mode = 0600,
58824+ .proc_handler = &proc_dointvec,
58825+ },
58826+ {
58827+ .procname = "symlinkown_gid",
58828+ .data = &grsec_symlinkown_gid,
58829+ .maxlen = sizeof(int),
58830+ .mode = 0600,
58831+ .proc_handler = &proc_dointvec,
58832+ },
58833+#endif
58834+#ifdef CONFIG_GRKERNSEC_BRUTE
58835+ {
58836+ .procname = "deter_bruteforce",
58837+ .data = &grsec_enable_brute,
58838+ .maxlen = sizeof(int),
58839+ .mode = 0600,
58840+ .proc_handler = &proc_dointvec,
58841+ },
58842+#endif
58843+#ifdef CONFIG_GRKERNSEC_FIFO
58844+ {
58845+ .procname = "fifo_restrictions",
58846+ .data = &grsec_enable_fifo,
58847+ .maxlen = sizeof(int),
58848+ .mode = 0600,
58849+ .proc_handler = &proc_dointvec,
58850+ },
58851+#endif
58852+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58853+ {
58854+ .procname = "ptrace_readexec",
58855+ .data = &grsec_enable_ptrace_readexec,
58856+ .maxlen = sizeof(int),
58857+ .mode = 0600,
58858+ .proc_handler = &proc_dointvec,
58859+ },
58860+#endif
58861+#ifdef CONFIG_GRKERNSEC_SETXID
58862+ {
58863+ .procname = "consistent_setxid",
58864+ .data = &grsec_enable_setxid,
58865+ .maxlen = sizeof(int),
58866+ .mode = 0600,
58867+ .proc_handler = &proc_dointvec,
58868+ },
58869+#endif
58870+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58871+ {
58872+ .procname = "ip_blackhole",
58873+ .data = &grsec_enable_blackhole,
58874+ .maxlen = sizeof(int),
58875+ .mode = 0600,
58876+ .proc_handler = &proc_dointvec,
58877+ },
58878+ {
58879+ .procname = "lastack_retries",
58880+ .data = &grsec_lastack_retries,
58881+ .maxlen = sizeof(int),
58882+ .mode = 0600,
58883+ .proc_handler = &proc_dointvec,
58884+ },
58885+#endif
58886+#ifdef CONFIG_GRKERNSEC_EXECLOG
58887+ {
58888+ .procname = "exec_logging",
58889+ .data = &grsec_enable_execlog,
58890+ .maxlen = sizeof(int),
58891+ .mode = 0600,
58892+ .proc_handler = &proc_dointvec,
58893+ },
58894+#endif
58895+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58896+ {
58897+ .procname = "rwxmap_logging",
58898+ .data = &grsec_enable_log_rwxmaps,
58899+ .maxlen = sizeof(int),
58900+ .mode = 0600,
58901+ .proc_handler = &proc_dointvec,
58902+ },
58903+#endif
58904+#ifdef CONFIG_GRKERNSEC_SIGNAL
58905+ {
58906+ .procname = "signal_logging",
58907+ .data = &grsec_enable_signal,
58908+ .maxlen = sizeof(int),
58909+ .mode = 0600,
58910+ .proc_handler = &proc_dointvec,
58911+ },
58912+#endif
58913+#ifdef CONFIG_GRKERNSEC_FORKFAIL
58914+ {
58915+ .procname = "forkfail_logging",
58916+ .data = &grsec_enable_forkfail,
58917+ .maxlen = sizeof(int),
58918+ .mode = 0600,
58919+ .proc_handler = &proc_dointvec,
58920+ },
58921+#endif
58922+#ifdef CONFIG_GRKERNSEC_TIME
58923+ {
58924+ .procname = "timechange_logging",
58925+ .data = &grsec_enable_time,
58926+ .maxlen = sizeof(int),
58927+ .mode = 0600,
58928+ .proc_handler = &proc_dointvec,
58929+ },
58930+#endif
58931+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58932+ {
58933+ .procname = "chroot_deny_shmat",
58934+ .data = &grsec_enable_chroot_shmat,
58935+ .maxlen = sizeof(int),
58936+ .mode = 0600,
58937+ .proc_handler = &proc_dointvec,
58938+ },
58939+#endif
58940+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58941+ {
58942+ .procname = "chroot_deny_unix",
58943+ .data = &grsec_enable_chroot_unix,
58944+ .maxlen = sizeof(int),
58945+ .mode = 0600,
58946+ .proc_handler = &proc_dointvec,
58947+ },
58948+#endif
58949+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58950+ {
58951+ .procname = "chroot_deny_mount",
58952+ .data = &grsec_enable_chroot_mount,
58953+ .maxlen = sizeof(int),
58954+ .mode = 0600,
58955+ .proc_handler = &proc_dointvec,
58956+ },
58957+#endif
58958+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58959+ {
58960+ .procname = "chroot_deny_fchdir",
58961+ .data = &grsec_enable_chroot_fchdir,
58962+ .maxlen = sizeof(int),
58963+ .mode = 0600,
58964+ .proc_handler = &proc_dointvec,
58965+ },
58966+#endif
58967+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58968+ {
58969+ .procname = "chroot_deny_chroot",
58970+ .data = &grsec_enable_chroot_double,
58971+ .maxlen = sizeof(int),
58972+ .mode = 0600,
58973+ .proc_handler = &proc_dointvec,
58974+ },
58975+#endif
58976+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58977+ {
58978+ .procname = "chroot_deny_pivot",
58979+ .data = &grsec_enable_chroot_pivot,
58980+ .maxlen = sizeof(int),
58981+ .mode = 0600,
58982+ .proc_handler = &proc_dointvec,
58983+ },
58984+#endif
58985+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58986+ {
58987+ .procname = "chroot_enforce_chdir",
58988+ .data = &grsec_enable_chroot_chdir,
58989+ .maxlen = sizeof(int),
58990+ .mode = 0600,
58991+ .proc_handler = &proc_dointvec,
58992+ },
58993+#endif
58994+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58995+ {
58996+ .procname = "chroot_deny_chmod",
58997+ .data = &grsec_enable_chroot_chmod,
58998+ .maxlen = sizeof(int),
58999+ .mode = 0600,
59000+ .proc_handler = &proc_dointvec,
59001+ },
59002+#endif
59003+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
59004+ {
59005+ .procname = "chroot_deny_mknod",
59006+ .data = &grsec_enable_chroot_mknod,
59007+ .maxlen = sizeof(int),
59008+ .mode = 0600,
59009+ .proc_handler = &proc_dointvec,
59010+ },
59011+#endif
59012+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
59013+ {
59014+ .procname = "chroot_restrict_nice",
59015+ .data = &grsec_enable_chroot_nice,
59016+ .maxlen = sizeof(int),
59017+ .mode = 0600,
59018+ .proc_handler = &proc_dointvec,
59019+ },
59020+#endif
59021+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
59022+ {
59023+ .procname = "chroot_execlog",
59024+ .data = &grsec_enable_chroot_execlog,
59025+ .maxlen = sizeof(int),
59026+ .mode = 0600,
59027+ .proc_handler = &proc_dointvec,
59028+ },
59029+#endif
59030+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59031+ {
59032+ .procname = "chroot_caps",
59033+ .data = &grsec_enable_chroot_caps,
59034+ .maxlen = sizeof(int),
59035+ .mode = 0600,
59036+ .proc_handler = &proc_dointvec,
59037+ },
59038+#endif
59039+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
59040+ {
59041+ .procname = "chroot_deny_sysctl",
59042+ .data = &grsec_enable_chroot_sysctl,
59043+ .maxlen = sizeof(int),
59044+ .mode = 0600,
59045+ .proc_handler = &proc_dointvec,
59046+ },
59047+#endif
59048+#ifdef CONFIG_GRKERNSEC_TPE
59049+ {
59050+ .procname = "tpe",
59051+ .data = &grsec_enable_tpe,
59052+ .maxlen = sizeof(int),
59053+ .mode = 0600,
59054+ .proc_handler = &proc_dointvec,
59055+ },
59056+ {
59057+ .procname = "tpe_gid",
59058+ .data = &grsec_tpe_gid,
59059+ .maxlen = sizeof(int),
59060+ .mode = 0600,
59061+ .proc_handler = &proc_dointvec,
59062+ },
59063+#endif
59064+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59065+ {
59066+ .procname = "tpe_invert",
59067+ .data = &grsec_enable_tpe_invert,
59068+ .maxlen = sizeof(int),
59069+ .mode = 0600,
59070+ .proc_handler = &proc_dointvec,
59071+ },
59072+#endif
59073+#ifdef CONFIG_GRKERNSEC_TPE_ALL
59074+ {
59075+ .procname = "tpe_restrict_all",
59076+ .data = &grsec_enable_tpe_all,
59077+ .maxlen = sizeof(int),
59078+ .mode = 0600,
59079+ .proc_handler = &proc_dointvec,
59080+ },
59081+#endif
59082+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59083+ {
59084+ .procname = "socket_all",
59085+ .data = &grsec_enable_socket_all,
59086+ .maxlen = sizeof(int),
59087+ .mode = 0600,
59088+ .proc_handler = &proc_dointvec,
59089+ },
59090+ {
59091+ .procname = "socket_all_gid",
59092+ .data = &grsec_socket_all_gid,
59093+ .maxlen = sizeof(int),
59094+ .mode = 0600,
59095+ .proc_handler = &proc_dointvec,
59096+ },
59097+#endif
59098+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59099+ {
59100+ .procname = "socket_client",
59101+ .data = &grsec_enable_socket_client,
59102+ .maxlen = sizeof(int),
59103+ .mode = 0600,
59104+ .proc_handler = &proc_dointvec,
59105+ },
59106+ {
59107+ .procname = "socket_client_gid",
59108+ .data = &grsec_socket_client_gid,
59109+ .maxlen = sizeof(int),
59110+ .mode = 0600,
59111+ .proc_handler = &proc_dointvec,
59112+ },
59113+#endif
59114+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59115+ {
59116+ .procname = "socket_server",
59117+ .data = &grsec_enable_socket_server,
59118+ .maxlen = sizeof(int),
59119+ .mode = 0600,
59120+ .proc_handler = &proc_dointvec,
59121+ },
59122+ {
59123+ .procname = "socket_server_gid",
59124+ .data = &grsec_socket_server_gid,
59125+ .maxlen = sizeof(int),
59126+ .mode = 0600,
59127+ .proc_handler = &proc_dointvec,
59128+ },
59129+#endif
59130+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
59131+ {
59132+ .procname = "audit_group",
59133+ .data = &grsec_enable_group,
59134+ .maxlen = sizeof(int),
59135+ .mode = 0600,
59136+ .proc_handler = &proc_dointvec,
59137+ },
59138+ {
59139+ .procname = "audit_gid",
59140+ .data = &grsec_audit_gid,
59141+ .maxlen = sizeof(int),
59142+ .mode = 0600,
59143+ .proc_handler = &proc_dointvec,
59144+ },
59145+#endif
59146+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
59147+ {
59148+ .procname = "audit_chdir",
59149+ .data = &grsec_enable_chdir,
59150+ .maxlen = sizeof(int),
59151+ .mode = 0600,
59152+ .proc_handler = &proc_dointvec,
59153+ },
59154+#endif
59155+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59156+ {
59157+ .procname = "audit_mount",
59158+ .data = &grsec_enable_mount,
59159+ .maxlen = sizeof(int),
59160+ .mode = 0600,
59161+ .proc_handler = &proc_dointvec,
59162+ },
59163+#endif
59164+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
59165+ {
59166+ .procname = "audit_textrel",
59167+ .data = &grsec_enable_audit_textrel,
59168+ .maxlen = sizeof(int),
59169+ .mode = 0600,
59170+ .proc_handler = &proc_dointvec,
59171+ },
59172+#endif
59173+#ifdef CONFIG_GRKERNSEC_DMESG
59174+ {
59175+ .procname = "dmesg",
59176+ .data = &grsec_enable_dmesg,
59177+ .maxlen = sizeof(int),
59178+ .mode = 0600,
59179+ .proc_handler = &proc_dointvec,
59180+ },
59181+#endif
59182+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59183+ {
59184+ .procname = "chroot_findtask",
59185+ .data = &grsec_enable_chroot_findtask,
59186+ .maxlen = sizeof(int),
59187+ .mode = 0600,
59188+ .proc_handler = &proc_dointvec,
59189+ },
59190+#endif
59191+#ifdef CONFIG_GRKERNSEC_RESLOG
59192+ {
59193+ .procname = "resource_logging",
59194+ .data = &grsec_resource_logging,
59195+ .maxlen = sizeof(int),
59196+ .mode = 0600,
59197+ .proc_handler = &proc_dointvec,
59198+ },
59199+#endif
59200+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
59201+ {
59202+ .procname = "audit_ptrace",
59203+ .data = &grsec_enable_audit_ptrace,
59204+ .maxlen = sizeof(int),
59205+ .mode = 0600,
59206+ .proc_handler = &proc_dointvec,
59207+ },
59208+#endif
59209+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59210+ {
59211+ .procname = "harden_ptrace",
59212+ .data = &grsec_enable_harden_ptrace,
59213+ .maxlen = sizeof(int),
59214+ .mode = 0600,
59215+ .proc_handler = &proc_dointvec,
59216+ },
59217+#endif
59218+ {
59219+ .procname = "grsec_lock",
59220+ .data = &grsec_lock,
59221+ .maxlen = sizeof(int),
59222+ .mode = 0600,
59223+ .proc_handler = &proc_dointvec,
59224+ },
59225+#endif
59226+#ifdef CONFIG_GRKERNSEC_ROFS
59227+ {
59228+ .procname = "romount_protect",
59229+ .data = &grsec_enable_rofs,
59230+ .maxlen = sizeof(int),
59231+ .mode = 0600,
59232+ .proc_handler = &proc_dointvec_minmax,
59233+ .extra1 = &one,
59234+ .extra2 = &one,
59235+ },
59236+#endif
59237+ { }
59238+};
59239+#endif
59240diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
59241new file mode 100644
59242index 0000000..0dc13c3
59243--- /dev/null
59244+++ b/grsecurity/grsec_time.c
59245@@ -0,0 +1,16 @@
59246+#include <linux/kernel.h>
59247+#include <linux/sched.h>
59248+#include <linux/grinternal.h>
59249+#include <linux/module.h>
59250+
59251+void
59252+gr_log_timechange(void)
59253+{
59254+#ifdef CONFIG_GRKERNSEC_TIME
59255+ if (grsec_enable_time)
59256+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
59257+#endif
59258+ return;
59259+}
59260+
59261+EXPORT_SYMBOL(gr_log_timechange);
59262diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
59263new file mode 100644
59264index 0000000..07e0dc0
59265--- /dev/null
59266+++ b/grsecurity/grsec_tpe.c
59267@@ -0,0 +1,73 @@
59268+#include <linux/kernel.h>
59269+#include <linux/sched.h>
59270+#include <linux/file.h>
59271+#include <linux/fs.h>
59272+#include <linux/grinternal.h>
59273+
59274+extern int gr_acl_tpe_check(void);
59275+
59276+int
59277+gr_tpe_allow(const struct file *file)
59278+{
59279+#ifdef CONFIG_GRKERNSEC
59280+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
59281+ const struct cred *cred = current_cred();
59282+ char *msg = NULL;
59283+ char *msg2 = NULL;
59284+
59285+ // never restrict root
59286+ if (!cred->uid)
59287+ return 1;
59288+
59289+ if (grsec_enable_tpe) {
59290+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59291+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
59292+ msg = "not being in trusted group";
59293+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
59294+ msg = "being in untrusted group";
59295+#else
59296+ if (in_group_p(grsec_tpe_gid))
59297+ msg = "being in untrusted group";
59298+#endif
59299+ }
59300+ if (!msg && gr_acl_tpe_check())
59301+ msg = "being in untrusted role";
59302+
59303+ // not in any affected group/role
59304+ if (!msg)
59305+ goto next_check;
59306+
59307+ if (inode->i_uid)
59308+ msg2 = "file in non-root-owned directory";
59309+ else if (inode->i_mode & S_IWOTH)
59310+ msg2 = "file in world-writable directory";
59311+ else if (inode->i_mode & S_IWGRP)
59312+ msg2 = "file in group-writable directory";
59313+
59314+ if (msg && msg2) {
59315+ char fullmsg[70] = {0};
59316+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
59317+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
59318+ return 0;
59319+ }
59320+ msg = NULL;
59321+next_check:
59322+#ifdef CONFIG_GRKERNSEC_TPE_ALL
59323+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
59324+ return 1;
59325+
59326+ if (inode->i_uid && (inode->i_uid != cred->uid))
59327+ msg = "directory not owned by user";
59328+ else if (inode->i_mode & S_IWOTH)
59329+ msg = "file in world-writable directory";
59330+ else if (inode->i_mode & S_IWGRP)
59331+ msg = "file in group-writable directory";
59332+
59333+ if (msg) {
59334+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
59335+ return 0;
59336+ }
59337+#endif
59338+#endif
59339+ return 1;
59340+}
59341diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
59342new file mode 100644
59343index 0000000..9f7b1ac
59344--- /dev/null
59345+++ b/grsecurity/grsum.c
59346@@ -0,0 +1,61 @@
59347+#include <linux/err.h>
59348+#include <linux/kernel.h>
59349+#include <linux/sched.h>
59350+#include <linux/mm.h>
59351+#include <linux/scatterlist.h>
59352+#include <linux/crypto.h>
59353+#include <linux/gracl.h>
59354+
59355+
59356+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
59357+#error "crypto and sha256 must be built into the kernel"
59358+#endif
59359+
59360+int
59361+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
59362+{
59363+ char *p;
59364+ struct crypto_hash *tfm;
59365+ struct hash_desc desc;
59366+ struct scatterlist sg;
59367+ unsigned char temp_sum[GR_SHA_LEN];
59368+ volatile int retval = 0;
59369+ volatile int dummy = 0;
59370+ unsigned int i;
59371+
59372+ sg_init_table(&sg, 1);
59373+
59374+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
59375+ if (IS_ERR(tfm)) {
59376+ /* should never happen, since sha256 should be built in */
59377+ return 1;
59378+ }
59379+
59380+ desc.tfm = tfm;
59381+ desc.flags = 0;
59382+
59383+ crypto_hash_init(&desc);
59384+
59385+ p = salt;
59386+ sg_set_buf(&sg, p, GR_SALT_LEN);
59387+ crypto_hash_update(&desc, &sg, sg.length);
59388+
59389+ p = entry->pw;
59390+ sg_set_buf(&sg, p, strlen(p));
59391+
59392+ crypto_hash_update(&desc, &sg, sg.length);
59393+
59394+ crypto_hash_final(&desc, temp_sum);
59395+
59396+ memset(entry->pw, 0, GR_PW_LEN);
59397+
59398+ for (i = 0; i < GR_SHA_LEN; i++)
59399+ if (sum[i] != temp_sum[i])
59400+ retval = 1;
59401+ else
59402+ dummy = 1; // waste a cycle
59403+
59404+ crypto_free_hash(tfm);
59405+
59406+ return retval;
59407+}
59408diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
59409index f1c8ca6..b5c1cc7 100644
59410--- a/include/acpi/acpi_bus.h
59411+++ b/include/acpi/acpi_bus.h
59412@@ -107,7 +107,7 @@ struct acpi_device_ops {
59413 acpi_op_bind bind;
59414 acpi_op_unbind unbind;
59415 acpi_op_notify notify;
59416-};
59417+} __no_const;
59418
59419 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
59420
59421diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
59422index b7babf0..c1e2d45 100644
59423--- a/include/asm-generic/atomic-long.h
59424+++ b/include/asm-generic/atomic-long.h
59425@@ -22,6 +22,12 @@
59426
59427 typedef atomic64_t atomic_long_t;
59428
59429+#ifdef CONFIG_PAX_REFCOUNT
59430+typedef atomic64_unchecked_t atomic_long_unchecked_t;
59431+#else
59432+typedef atomic64_t atomic_long_unchecked_t;
59433+#endif
59434+
59435 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
59436
59437 static inline long atomic_long_read(atomic_long_t *l)
59438@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
59439 return (long)atomic64_read(v);
59440 }
59441
59442+#ifdef CONFIG_PAX_REFCOUNT
59443+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59444+{
59445+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59446+
59447+ return (long)atomic64_read_unchecked(v);
59448+}
59449+#endif
59450+
59451 static inline void atomic_long_set(atomic_long_t *l, long i)
59452 {
59453 atomic64_t *v = (atomic64_t *)l;
59454@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59455 atomic64_set(v, i);
59456 }
59457
59458+#ifdef CONFIG_PAX_REFCOUNT
59459+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59460+{
59461+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59462+
59463+ atomic64_set_unchecked(v, i);
59464+}
59465+#endif
59466+
59467 static inline void atomic_long_inc(atomic_long_t *l)
59468 {
59469 atomic64_t *v = (atomic64_t *)l;
59470@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59471 atomic64_inc(v);
59472 }
59473
59474+#ifdef CONFIG_PAX_REFCOUNT
59475+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59476+{
59477+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59478+
59479+ atomic64_inc_unchecked(v);
59480+}
59481+#endif
59482+
59483 static inline void atomic_long_dec(atomic_long_t *l)
59484 {
59485 atomic64_t *v = (atomic64_t *)l;
59486@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59487 atomic64_dec(v);
59488 }
59489
59490+#ifdef CONFIG_PAX_REFCOUNT
59491+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59492+{
59493+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59494+
59495+ atomic64_dec_unchecked(v);
59496+}
59497+#endif
59498+
59499 static inline void atomic_long_add(long i, atomic_long_t *l)
59500 {
59501 atomic64_t *v = (atomic64_t *)l;
59502@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59503 atomic64_add(i, v);
59504 }
59505
59506+#ifdef CONFIG_PAX_REFCOUNT
59507+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59508+{
59509+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59510+
59511+ atomic64_add_unchecked(i, v);
59512+}
59513+#endif
59514+
59515 static inline void atomic_long_sub(long i, atomic_long_t *l)
59516 {
59517 atomic64_t *v = (atomic64_t *)l;
59518@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59519 atomic64_sub(i, v);
59520 }
59521
59522+#ifdef CONFIG_PAX_REFCOUNT
59523+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59524+{
59525+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59526+
59527+ atomic64_sub_unchecked(i, v);
59528+}
59529+#endif
59530+
59531 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59532 {
59533 atomic64_t *v = (atomic64_t *)l;
59534@@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59535 return (long)atomic64_inc_return(v);
59536 }
59537
59538+#ifdef CONFIG_PAX_REFCOUNT
59539+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59540+{
59541+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59542+
59543+ return (long)atomic64_inc_return_unchecked(v);
59544+}
59545+#endif
59546+
59547 static inline long atomic_long_dec_return(atomic_long_t *l)
59548 {
59549 atomic64_t *v = (atomic64_t *)l;
59550@@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59551
59552 typedef atomic_t atomic_long_t;
59553
59554+#ifdef CONFIG_PAX_REFCOUNT
59555+typedef atomic_unchecked_t atomic_long_unchecked_t;
59556+#else
59557+typedef atomic_t atomic_long_unchecked_t;
59558+#endif
59559+
59560 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
59561 static inline long atomic_long_read(atomic_long_t *l)
59562 {
59563@@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
59564 return (long)atomic_read(v);
59565 }
59566
59567+#ifdef CONFIG_PAX_REFCOUNT
59568+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59569+{
59570+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59571+
59572+ return (long)atomic_read_unchecked(v);
59573+}
59574+#endif
59575+
59576 static inline void atomic_long_set(atomic_long_t *l, long i)
59577 {
59578 atomic_t *v = (atomic_t *)l;
59579@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59580 atomic_set(v, i);
59581 }
59582
59583+#ifdef CONFIG_PAX_REFCOUNT
59584+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59585+{
59586+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59587+
59588+ atomic_set_unchecked(v, i);
59589+}
59590+#endif
59591+
59592 static inline void atomic_long_inc(atomic_long_t *l)
59593 {
59594 atomic_t *v = (atomic_t *)l;
59595@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59596 atomic_inc(v);
59597 }
59598
59599+#ifdef CONFIG_PAX_REFCOUNT
59600+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59601+{
59602+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59603+
59604+ atomic_inc_unchecked(v);
59605+}
59606+#endif
59607+
59608 static inline void atomic_long_dec(atomic_long_t *l)
59609 {
59610 atomic_t *v = (atomic_t *)l;
59611@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59612 atomic_dec(v);
59613 }
59614
59615+#ifdef CONFIG_PAX_REFCOUNT
59616+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59617+{
59618+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59619+
59620+ atomic_dec_unchecked(v);
59621+}
59622+#endif
59623+
59624 static inline void atomic_long_add(long i, atomic_long_t *l)
59625 {
59626 atomic_t *v = (atomic_t *)l;
59627@@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59628 atomic_add(i, v);
59629 }
59630
59631+#ifdef CONFIG_PAX_REFCOUNT
59632+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59633+{
59634+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59635+
59636+ atomic_add_unchecked(i, v);
59637+}
59638+#endif
59639+
59640 static inline void atomic_long_sub(long i, atomic_long_t *l)
59641 {
59642 atomic_t *v = (atomic_t *)l;
59643@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59644 atomic_sub(i, v);
59645 }
59646
59647+#ifdef CONFIG_PAX_REFCOUNT
59648+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59649+{
59650+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59651+
59652+ atomic_sub_unchecked(i, v);
59653+}
59654+#endif
59655+
59656 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59657 {
59658 atomic_t *v = (atomic_t *)l;
59659@@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59660 return (long)atomic_inc_return(v);
59661 }
59662
59663+#ifdef CONFIG_PAX_REFCOUNT
59664+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59665+{
59666+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59667+
59668+ return (long)atomic_inc_return_unchecked(v);
59669+}
59670+#endif
59671+
59672 static inline long atomic_long_dec_return(atomic_long_t *l)
59673 {
59674 atomic_t *v = (atomic_t *)l;
59675@@ -255,4 +393,53 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59676
59677 #endif /* BITS_PER_LONG == 64 */
59678
59679+#ifdef CONFIG_PAX_REFCOUNT
59680+static inline void pax_refcount_needs_these_functions(void)
59681+{
59682+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
59683+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
59684+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
59685+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
59686+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
59687+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
59688+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
59689+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
59690+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
59691+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
59692+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
59693+ atomic_clear_mask_unchecked(0, NULL);
59694+ atomic_set_mask_unchecked(0, NULL);
59695+
59696+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
59697+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
59698+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
59699+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
59700+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
59701+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
59702+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
59703+}
59704+#else
59705+#define atomic_read_unchecked(v) atomic_read(v)
59706+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
59707+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
59708+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
59709+#define atomic_inc_unchecked(v) atomic_inc(v)
59710+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
59711+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
59712+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
59713+#define atomic_dec_unchecked(v) atomic_dec(v)
59714+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
59715+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
59716+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
59717+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
59718+
59719+#define atomic_long_read_unchecked(v) atomic_long_read(v)
59720+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
59721+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
59722+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
59723+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
59724+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
59725+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
59726+#endif
59727+
59728 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
59729diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
59730index 1ced641..c896ee8 100644
59731--- a/include/asm-generic/atomic.h
59732+++ b/include/asm-generic/atomic.h
59733@@ -159,7 +159,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
59734 * Atomically clears the bits set in @mask from @v
59735 */
59736 #ifndef atomic_clear_mask
59737-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
59738+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
59739 {
59740 unsigned long flags;
59741
59742diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
59743index b18ce4f..2ee2843 100644
59744--- a/include/asm-generic/atomic64.h
59745+++ b/include/asm-generic/atomic64.h
59746@@ -16,6 +16,8 @@ typedef struct {
59747 long long counter;
59748 } atomic64_t;
59749
59750+typedef atomic64_t atomic64_unchecked_t;
59751+
59752 #define ATOMIC64_INIT(i) { (i) }
59753
59754 extern long long atomic64_read(const atomic64_t *v);
59755@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
59756 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
59757 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
59758
59759+#define atomic64_read_unchecked(v) atomic64_read(v)
59760+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
59761+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
59762+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
59763+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
59764+#define atomic64_inc_unchecked(v) atomic64_inc(v)
59765+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
59766+#define atomic64_dec_unchecked(v) atomic64_dec(v)
59767+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
59768+
59769 #endif /* _ASM_GENERIC_ATOMIC64_H */
59770diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
59771index 1bfcfe5..e04c5c9 100644
59772--- a/include/asm-generic/cache.h
59773+++ b/include/asm-generic/cache.h
59774@@ -6,7 +6,7 @@
59775 * cache lines need to provide their own cache.h.
59776 */
59777
59778-#define L1_CACHE_SHIFT 5
59779-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
59780+#define L1_CACHE_SHIFT 5UL
59781+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
59782
59783 #endif /* __ASM_GENERIC_CACHE_H */
59784diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
59785index 0d68a1e..b74a761 100644
59786--- a/include/asm-generic/emergency-restart.h
59787+++ b/include/asm-generic/emergency-restart.h
59788@@ -1,7 +1,7 @@
59789 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
59790 #define _ASM_GENERIC_EMERGENCY_RESTART_H
59791
59792-static inline void machine_emergency_restart(void)
59793+static inline __noreturn void machine_emergency_restart(void)
59794 {
59795 machine_restart(NULL);
59796 }
59797diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
59798index 0232ccb..13d9165 100644
59799--- a/include/asm-generic/kmap_types.h
59800+++ b/include/asm-generic/kmap_types.h
59801@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
59802 KMAP_D(17) KM_NMI,
59803 KMAP_D(18) KM_NMI_PTE,
59804 KMAP_D(19) KM_KDB,
59805+KMAP_D(20) KM_CLEARPAGE,
59806 /*
59807 * Remember to update debug_kmap_atomic() when adding new kmap types!
59808 */
59809-KMAP_D(20) KM_TYPE_NR
59810+KMAP_D(21) KM_TYPE_NR
59811 };
59812
59813 #undef KMAP_D
59814diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
59815index 9ceb03b..2efbcbd 100644
59816--- a/include/asm-generic/local.h
59817+++ b/include/asm-generic/local.h
59818@@ -39,6 +39,7 @@ typedef struct
59819 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
59820 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
59821 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
59822+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
59823
59824 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
59825 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
59826diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
59827index 725612b..9cc513a 100644
59828--- a/include/asm-generic/pgtable-nopmd.h
59829+++ b/include/asm-generic/pgtable-nopmd.h
59830@@ -1,14 +1,19 @@
59831 #ifndef _PGTABLE_NOPMD_H
59832 #define _PGTABLE_NOPMD_H
59833
59834-#ifndef __ASSEMBLY__
59835-
59836 #include <asm-generic/pgtable-nopud.h>
59837
59838-struct mm_struct;
59839-
59840 #define __PAGETABLE_PMD_FOLDED
59841
59842+#define PMD_SHIFT PUD_SHIFT
59843+#define PTRS_PER_PMD 1
59844+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
59845+#define PMD_MASK (~(PMD_SIZE-1))
59846+
59847+#ifndef __ASSEMBLY__
59848+
59849+struct mm_struct;
59850+
59851 /*
59852 * Having the pmd type consist of a pud gets the size right, and allows
59853 * us to conceptually access the pud entry that this pmd is folded into
59854@@ -16,11 +21,6 @@ struct mm_struct;
59855 */
59856 typedef struct { pud_t pud; } pmd_t;
59857
59858-#define PMD_SHIFT PUD_SHIFT
59859-#define PTRS_PER_PMD 1
59860-#define PMD_SIZE (1UL << PMD_SHIFT)
59861-#define PMD_MASK (~(PMD_SIZE-1))
59862-
59863 /*
59864 * The "pud_xxx()" functions here are trivial for a folded two-level
59865 * setup: the pmd is never bad, and a pmd always exists (as it's folded
59866diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
59867index 810431d..0ec4804f 100644
59868--- a/include/asm-generic/pgtable-nopud.h
59869+++ b/include/asm-generic/pgtable-nopud.h
59870@@ -1,10 +1,15 @@
59871 #ifndef _PGTABLE_NOPUD_H
59872 #define _PGTABLE_NOPUD_H
59873
59874-#ifndef __ASSEMBLY__
59875-
59876 #define __PAGETABLE_PUD_FOLDED
59877
59878+#define PUD_SHIFT PGDIR_SHIFT
59879+#define PTRS_PER_PUD 1
59880+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
59881+#define PUD_MASK (~(PUD_SIZE-1))
59882+
59883+#ifndef __ASSEMBLY__
59884+
59885 /*
59886 * Having the pud type consist of a pgd gets the size right, and allows
59887 * us to conceptually access the pgd entry that this pud is folded into
59888@@ -12,11 +17,6 @@
59889 */
59890 typedef struct { pgd_t pgd; } pud_t;
59891
59892-#define PUD_SHIFT PGDIR_SHIFT
59893-#define PTRS_PER_PUD 1
59894-#define PUD_SIZE (1UL << PUD_SHIFT)
59895-#define PUD_MASK (~(PUD_SIZE-1))
59896-
59897 /*
59898 * The "pgd_xxx()" functions here are trivial for a folded two-level
59899 * setup: the pud is never bad, and a pud always exists (as it's folded
59900@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
59901 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
59902
59903 #define pgd_populate(mm, pgd, pud) do { } while (0)
59904+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
59905 /*
59906 * (puds are folded into pgds so this doesn't get actually called,
59907 * but the define is needed for a generic inline function.)
59908diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
59909index c7ec2cd..909d125 100644
59910--- a/include/asm-generic/pgtable.h
59911+++ b/include/asm-generic/pgtable.h
59912@@ -531,6 +531,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
59913 #endif
59914 }
59915
59916+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
59917+static inline unsigned long pax_open_kernel(void) { return 0; }
59918+#endif
59919+
59920+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
59921+static inline unsigned long pax_close_kernel(void) { return 0; }
59922+#endif
59923+
59924 #endif /* CONFIG_MMU */
59925
59926 #endif /* !__ASSEMBLY__ */
59927diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
59928index 8aeadf6..f1dc019 100644
59929--- a/include/asm-generic/vmlinux.lds.h
59930+++ b/include/asm-generic/vmlinux.lds.h
59931@@ -218,6 +218,7 @@
59932 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
59933 VMLINUX_SYMBOL(__start_rodata) = .; \
59934 *(.rodata) *(.rodata.*) \
59935+ *(.data..read_only) \
59936 *(__vermagic) /* Kernel version magic */ \
59937 . = ALIGN(8); \
59938 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
59939@@ -716,17 +717,18 @@
59940 * section in the linker script will go there too. @phdr should have
59941 * a leading colon.
59942 *
59943- * Note that this macros defines __per_cpu_load as an absolute symbol.
59944+ * Note that this macros defines per_cpu_load as an absolute symbol.
59945 * If there is no need to put the percpu section at a predetermined
59946 * address, use PERCPU_SECTION.
59947 */
59948 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
59949- VMLINUX_SYMBOL(__per_cpu_load) = .; \
59950- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
59951+ per_cpu_load = .; \
59952+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
59953 - LOAD_OFFSET) { \
59954+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
59955 PERCPU_INPUT(cacheline) \
59956 } phdr \
59957- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
59958+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
59959
59960 /**
59961 * PERCPU_SECTION - define output section for percpu area, simple version
59962diff --git a/include/drm/drmP.h b/include/drm/drmP.h
59963index dd73104..fde86bd 100644
59964--- a/include/drm/drmP.h
59965+++ b/include/drm/drmP.h
59966@@ -72,6 +72,7 @@
59967 #include <linux/workqueue.h>
59968 #include <linux/poll.h>
59969 #include <asm/pgalloc.h>
59970+#include <asm/local.h>
59971 #include "drm.h"
59972
59973 #include <linux/idr.h>
59974@@ -1074,7 +1075,7 @@ struct drm_device {
59975
59976 /** \name Usage Counters */
59977 /*@{ */
59978- int open_count; /**< Outstanding files open */
59979+ local_t open_count; /**< Outstanding files open */
59980 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
59981 atomic_t vma_count; /**< Outstanding vma areas open */
59982 int buf_use; /**< Buffers in use -- cannot alloc */
59983@@ -1085,7 +1086,7 @@ struct drm_device {
59984 /*@{ */
59985 unsigned long counters;
59986 enum drm_stat_type types[15];
59987- atomic_t counts[15];
59988+ atomic_unchecked_t counts[15];
59989 /*@} */
59990
59991 struct list_head filelist;
59992diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
59993index 37515d1..34fa8b0 100644
59994--- a/include/drm/drm_crtc_helper.h
59995+++ b/include/drm/drm_crtc_helper.h
59996@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
59997
59998 /* disable crtc when not in use - more explicit than dpms off */
59999 void (*disable)(struct drm_crtc *crtc);
60000-};
60001+} __no_const;
60002
60003 struct drm_encoder_helper_funcs {
60004 void (*dpms)(struct drm_encoder *encoder, int mode);
60005@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
60006 struct drm_connector *connector);
60007 /* disable encoder when not in use - more explicit than dpms off */
60008 void (*disable)(struct drm_encoder *encoder);
60009-};
60010+} __no_const;
60011
60012 struct drm_connector_helper_funcs {
60013 int (*get_modes)(struct drm_connector *connector);
60014diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
60015index d6d1da4..fdd1ac5 100644
60016--- a/include/drm/ttm/ttm_memory.h
60017+++ b/include/drm/ttm/ttm_memory.h
60018@@ -48,7 +48,7 @@
60019
60020 struct ttm_mem_shrink {
60021 int (*do_shrink) (struct ttm_mem_shrink *);
60022-};
60023+} __no_const;
60024
60025 /**
60026 * struct ttm_mem_global - Global memory accounting structure.
60027diff --git a/include/linux/a.out.h b/include/linux/a.out.h
60028index e86dfca..40cc55f 100644
60029--- a/include/linux/a.out.h
60030+++ b/include/linux/a.out.h
60031@@ -39,6 +39,14 @@ enum machine_type {
60032 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
60033 };
60034
60035+/* Constants for the N_FLAGS field */
60036+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60037+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
60038+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
60039+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
60040+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60041+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60042+
60043 #if !defined (N_MAGIC)
60044 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
60045 #endif
60046diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
60047index 06fd4bb..1caec0d 100644
60048--- a/include/linux/atmdev.h
60049+++ b/include/linux/atmdev.h
60050@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
60051 #endif
60052
60053 struct k_atm_aal_stats {
60054-#define __HANDLE_ITEM(i) atomic_t i
60055+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60056 __AAL_STAT_ITEMS
60057 #undef __HANDLE_ITEM
60058 };
60059diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
60060index 366422b..1fa7f84 100644
60061--- a/include/linux/binfmts.h
60062+++ b/include/linux/binfmts.h
60063@@ -89,6 +89,7 @@ struct linux_binfmt {
60064 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
60065 int (*load_shlib)(struct file *);
60066 int (*core_dump)(struct coredump_params *cprm);
60067+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
60068 unsigned long min_coredump; /* minimal dump size */
60069 };
60070
60071diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
60072index 4d4ac24..2c3ccce 100644
60073--- a/include/linux/blkdev.h
60074+++ b/include/linux/blkdev.h
60075@@ -1376,7 +1376,7 @@ struct block_device_operations {
60076 /* this callback is with swap_lock and sometimes page table lock held */
60077 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
60078 struct module *owner;
60079-};
60080+} __do_const;
60081
60082 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
60083 unsigned long);
60084diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
60085index 4d1a074..88f929a 100644
60086--- a/include/linux/blktrace_api.h
60087+++ b/include/linux/blktrace_api.h
60088@@ -162,7 +162,7 @@ struct blk_trace {
60089 struct dentry *dir;
60090 struct dentry *dropped_file;
60091 struct dentry *msg_file;
60092- atomic_t dropped;
60093+ atomic_unchecked_t dropped;
60094 };
60095
60096 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
60097diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
60098index 83195fb..0b0f77d 100644
60099--- a/include/linux/byteorder/little_endian.h
60100+++ b/include/linux/byteorder/little_endian.h
60101@@ -42,51 +42,51 @@
60102
60103 static inline __le64 __cpu_to_le64p(const __u64 *p)
60104 {
60105- return (__force __le64)*p;
60106+ return (__force const __le64)*p;
60107 }
60108 static inline __u64 __le64_to_cpup(const __le64 *p)
60109 {
60110- return (__force __u64)*p;
60111+ return (__force const __u64)*p;
60112 }
60113 static inline __le32 __cpu_to_le32p(const __u32 *p)
60114 {
60115- return (__force __le32)*p;
60116+ return (__force const __le32)*p;
60117 }
60118 static inline __u32 __le32_to_cpup(const __le32 *p)
60119 {
60120- return (__force __u32)*p;
60121+ return (__force const __u32)*p;
60122 }
60123 static inline __le16 __cpu_to_le16p(const __u16 *p)
60124 {
60125- return (__force __le16)*p;
60126+ return (__force const __le16)*p;
60127 }
60128 static inline __u16 __le16_to_cpup(const __le16 *p)
60129 {
60130- return (__force __u16)*p;
60131+ return (__force const __u16)*p;
60132 }
60133 static inline __be64 __cpu_to_be64p(const __u64 *p)
60134 {
60135- return (__force __be64)__swab64p(p);
60136+ return (__force const __be64)__swab64p(p);
60137 }
60138 static inline __u64 __be64_to_cpup(const __be64 *p)
60139 {
60140- return __swab64p((__u64 *)p);
60141+ return __swab64p((const __u64 *)p);
60142 }
60143 static inline __be32 __cpu_to_be32p(const __u32 *p)
60144 {
60145- return (__force __be32)__swab32p(p);
60146+ return (__force const __be32)__swab32p(p);
60147 }
60148 static inline __u32 __be32_to_cpup(const __be32 *p)
60149 {
60150- return __swab32p((__u32 *)p);
60151+ return __swab32p((const __u32 *)p);
60152 }
60153 static inline __be16 __cpu_to_be16p(const __u16 *p)
60154 {
60155- return (__force __be16)__swab16p(p);
60156+ return (__force const __be16)__swab16p(p);
60157 }
60158 static inline __u16 __be16_to_cpup(const __be16 *p)
60159 {
60160- return __swab16p((__u16 *)p);
60161+ return __swab16p((const __u16 *)p);
60162 }
60163 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
60164 #define __le64_to_cpus(x) do { (void)(x); } while (0)
60165diff --git a/include/linux/cache.h b/include/linux/cache.h
60166index 4c57065..4307975 100644
60167--- a/include/linux/cache.h
60168+++ b/include/linux/cache.h
60169@@ -16,6 +16,10 @@
60170 #define __read_mostly
60171 #endif
60172
60173+#ifndef __read_only
60174+#define __read_only __read_mostly
60175+#endif
60176+
60177 #ifndef ____cacheline_aligned
60178 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
60179 #endif
60180diff --git a/include/linux/capability.h b/include/linux/capability.h
60181index 12d52de..b5f7fa7 100644
60182--- a/include/linux/capability.h
60183+++ b/include/linux/capability.h
60184@@ -548,6 +548,8 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
60185 extern bool capable(int cap);
60186 extern bool ns_capable(struct user_namespace *ns, int cap);
60187 extern bool nsown_capable(int cap);
60188+extern bool capable_nolog(int cap);
60189+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
60190
60191 /* audit system wants to get cap info from files as well */
60192 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
60193diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
60194index 42e55de..1cd0e66 100644
60195--- a/include/linux/cleancache.h
60196+++ b/include/linux/cleancache.h
60197@@ -31,7 +31,7 @@ struct cleancache_ops {
60198 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
60199 void (*invalidate_inode)(int, struct cleancache_filekey);
60200 void (*invalidate_fs)(int);
60201-};
60202+} __no_const;
60203
60204 extern struct cleancache_ops
60205 cleancache_register_ops(struct cleancache_ops *ops);
60206diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
60207index 2f40791..a62d196 100644
60208--- a/include/linux/compiler-gcc4.h
60209+++ b/include/linux/compiler-gcc4.h
60210@@ -32,6 +32,16 @@
60211 #define __linktime_error(message) __attribute__((__error__(message)))
60212
60213 #if __GNUC_MINOR__ >= 5
60214+
60215+#ifdef CONSTIFY_PLUGIN
60216+#define __no_const __attribute__((no_const))
60217+#define __do_const __attribute__((do_const))
60218+#endif
60219+
60220+#ifdef SIZE_OVERFLOW_PLUGIN
60221+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
60222+#endif
60223+
60224 /*
60225 * Mark a position in code as unreachable. This can be used to
60226 * suppress control flow warnings after asm blocks that transfer
60227@@ -47,6 +57,11 @@
60228 #define __noclone __attribute__((__noclone__))
60229
60230 #endif
60231+
60232+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
60233+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
60234+#define __bos0(ptr) __bos((ptr), 0)
60235+#define __bos1(ptr) __bos((ptr), 1)
60236 #endif
60237
60238 #if __GNUC_MINOR__ > 0
60239diff --git a/include/linux/compiler.h b/include/linux/compiler.h
60240index 923d093..726c17f 100644
60241--- a/include/linux/compiler.h
60242+++ b/include/linux/compiler.h
60243@@ -5,31 +5,62 @@
60244
60245 #ifdef __CHECKER__
60246 # define __user __attribute__((noderef, address_space(1)))
60247+# define __force_user __force __user
60248 # define __kernel __attribute__((address_space(0)))
60249+# define __force_kernel __force __kernel
60250 # define __safe __attribute__((safe))
60251 # define __force __attribute__((force))
60252 # define __nocast __attribute__((nocast))
60253 # define __iomem __attribute__((noderef, address_space(2)))
60254+# define __force_iomem __force __iomem
60255 # define __acquires(x) __attribute__((context(x,0,1)))
60256 # define __releases(x) __attribute__((context(x,1,0)))
60257 # define __acquire(x) __context__(x,1)
60258 # define __release(x) __context__(x,-1)
60259 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
60260 # define __percpu __attribute__((noderef, address_space(3)))
60261+# define __force_percpu __force __percpu
60262 #ifdef CONFIG_SPARSE_RCU_POINTER
60263 # define __rcu __attribute__((noderef, address_space(4)))
60264+# define __force_rcu __force __rcu
60265 #else
60266 # define __rcu
60267+# define __force_rcu
60268 #endif
60269 extern void __chk_user_ptr(const volatile void __user *);
60270 extern void __chk_io_ptr(const volatile void __iomem *);
60271+#elif defined(CHECKER_PLUGIN)
60272+//# define __user
60273+//# define __force_user
60274+//# define __kernel
60275+//# define __force_kernel
60276+# define __safe
60277+# define __force
60278+# define __nocast
60279+# define __iomem
60280+# define __force_iomem
60281+# define __chk_user_ptr(x) (void)0
60282+# define __chk_io_ptr(x) (void)0
60283+# define __builtin_warning(x, y...) (1)
60284+# define __acquires(x)
60285+# define __releases(x)
60286+# define __acquire(x) (void)0
60287+# define __release(x) (void)0
60288+# define __cond_lock(x,c) (c)
60289+# define __percpu
60290+# define __force_percpu
60291+# define __rcu
60292+# define __force_rcu
60293 #else
60294 # define __user
60295+# define __force_user
60296 # define __kernel
60297+# define __force_kernel
60298 # define __safe
60299 # define __force
60300 # define __nocast
60301 # define __iomem
60302+# define __force_iomem
60303 # define __chk_user_ptr(x) (void)0
60304 # define __chk_io_ptr(x) (void)0
60305 # define __builtin_warning(x, y...) (1)
60306@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
60307 # define __release(x) (void)0
60308 # define __cond_lock(x,c) (c)
60309 # define __percpu
60310+# define __force_percpu
60311 # define __rcu
60312+# define __force_rcu
60313 #endif
60314
60315 #ifdef __KERNEL__
60316@@ -264,6 +297,18 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60317 # define __attribute_const__ /* unimplemented */
60318 #endif
60319
60320+#ifndef __no_const
60321+# define __no_const
60322+#endif
60323+
60324+#ifndef __do_const
60325+# define __do_const
60326+#endif
60327+
60328+#ifndef __size_overflow
60329+# define __size_overflow(...)
60330+#endif
60331+
60332 /*
60333 * Tell gcc if a function is cold. The compiler will assume any path
60334 * directly leading to the call is unlikely.
60335@@ -273,6 +318,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60336 #define __cold
60337 #endif
60338
60339+#ifndef __alloc_size
60340+#define __alloc_size(...)
60341+#endif
60342+
60343+#ifndef __bos
60344+#define __bos(ptr, arg)
60345+#endif
60346+
60347+#ifndef __bos0
60348+#define __bos0(ptr)
60349+#endif
60350+
60351+#ifndef __bos1
60352+#define __bos1(ptr)
60353+#endif
60354+
60355 /* Simple shorthand for a section definition */
60356 #ifndef __section
60357 # define __section(S) __attribute__ ((__section__(#S)))
60358@@ -308,6 +369,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60359 * use is to mediate communication between process-level code and irq/NMI
60360 * handlers, all running on the same CPU.
60361 */
60362-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
60363+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
60364+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
60365
60366 #endif /* __LINUX_COMPILER_H */
60367diff --git a/include/linux/cred.h b/include/linux/cred.h
60368index adadf71..6af5560 100644
60369--- a/include/linux/cred.h
60370+++ b/include/linux/cred.h
60371@@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
60372 static inline void validate_process_creds(void)
60373 {
60374 }
60375+static inline void validate_task_creds(struct task_struct *task)
60376+{
60377+}
60378 #endif
60379
60380 /**
60381diff --git a/include/linux/crypto.h b/include/linux/crypto.h
60382index b92eadf..b4ecdc1 100644
60383--- a/include/linux/crypto.h
60384+++ b/include/linux/crypto.h
60385@@ -373,7 +373,7 @@ struct cipher_tfm {
60386 const u8 *key, unsigned int keylen);
60387 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60388 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60389-};
60390+} __no_const;
60391
60392 struct hash_tfm {
60393 int (*init)(struct hash_desc *desc);
60394@@ -394,13 +394,13 @@ struct compress_tfm {
60395 int (*cot_decompress)(struct crypto_tfm *tfm,
60396 const u8 *src, unsigned int slen,
60397 u8 *dst, unsigned int *dlen);
60398-};
60399+} __no_const;
60400
60401 struct rng_tfm {
60402 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
60403 unsigned int dlen);
60404 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
60405-};
60406+} __no_const;
60407
60408 #define crt_ablkcipher crt_u.ablkcipher
60409 #define crt_aead crt_u.aead
60410diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
60411index 7925bf0..d5143d2 100644
60412--- a/include/linux/decompress/mm.h
60413+++ b/include/linux/decompress/mm.h
60414@@ -77,7 +77,7 @@ static void free(void *where)
60415 * warnings when not needed (indeed large_malloc / large_free are not
60416 * needed by inflate */
60417
60418-#define malloc(a) kmalloc(a, GFP_KERNEL)
60419+#define malloc(a) kmalloc((a), GFP_KERNEL)
60420 #define free(a) kfree(a)
60421
60422 #define large_malloc(a) vmalloc(a)
60423diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
60424index dfc099e..e583e66 100644
60425--- a/include/linux/dma-mapping.h
60426+++ b/include/linux/dma-mapping.h
60427@@ -51,7 +51,7 @@ struct dma_map_ops {
60428 u64 (*get_required_mask)(struct device *dev);
60429 #endif
60430 int is_phys;
60431-};
60432+} __do_const;
60433
60434 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
60435
60436diff --git a/include/linux/efi.h b/include/linux/efi.h
60437index ec45ccd..9923c32 100644
60438--- a/include/linux/efi.h
60439+++ b/include/linux/efi.h
60440@@ -635,7 +635,7 @@ struct efivar_operations {
60441 efi_get_variable_t *get_variable;
60442 efi_get_next_variable_t *get_next_variable;
60443 efi_set_variable_t *set_variable;
60444-};
60445+} __no_const;
60446
60447 struct efivars {
60448 /*
60449diff --git a/include/linux/elf.h b/include/linux/elf.h
60450index 999b4f5..57753b4 100644
60451--- a/include/linux/elf.h
60452+++ b/include/linux/elf.h
60453@@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
60454 #define PT_GNU_EH_FRAME 0x6474e550
60455
60456 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
60457+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
60458+
60459+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
60460+
60461+/* Constants for the e_flags field */
60462+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60463+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
60464+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
60465+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
60466+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60467+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60468
60469 /*
60470 * Extended Numbering
60471@@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
60472 #define DT_DEBUG 21
60473 #define DT_TEXTREL 22
60474 #define DT_JMPREL 23
60475+#define DT_FLAGS 30
60476+ #define DF_TEXTREL 0x00000004
60477 #define DT_ENCODING 32
60478 #define OLD_DT_LOOS 0x60000000
60479 #define DT_LOOS 0x6000000d
60480@@ -243,6 +256,19 @@ typedef struct elf64_hdr {
60481 #define PF_W 0x2
60482 #define PF_X 0x1
60483
60484+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
60485+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
60486+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
60487+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
60488+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
60489+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
60490+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
60491+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
60492+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
60493+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
60494+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
60495+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
60496+
60497 typedef struct elf32_phdr{
60498 Elf32_Word p_type;
60499 Elf32_Off p_offset;
60500@@ -335,6 +361,8 @@ typedef struct elf64_shdr {
60501 #define EI_OSABI 7
60502 #define EI_PAD 8
60503
60504+#define EI_PAX 14
60505+
60506 #define ELFMAG0 0x7f /* EI_MAG */
60507 #define ELFMAG1 'E'
60508 #define ELFMAG2 'L'
60509@@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
60510 #define elf_note elf32_note
60511 #define elf_addr_t Elf32_Off
60512 #define Elf_Half Elf32_Half
60513+#define elf_dyn Elf32_Dyn
60514
60515 #else
60516
60517@@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
60518 #define elf_note elf64_note
60519 #define elf_addr_t Elf64_Off
60520 #define Elf_Half Elf64_Half
60521+#define elf_dyn Elf64_Dyn
60522
60523 #endif
60524
60525diff --git a/include/linux/filter.h b/include/linux/filter.h
60526index 8eeb205..d59bfa2 100644
60527--- a/include/linux/filter.h
60528+++ b/include/linux/filter.h
60529@@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
60530
60531 struct sk_buff;
60532 struct sock;
60533+struct bpf_jit_work;
60534
60535 struct sk_filter
60536 {
60537@@ -141,6 +142,9 @@ struct sk_filter
60538 unsigned int len; /* Number of filter blocks */
60539 unsigned int (*bpf_func)(const struct sk_buff *skb,
60540 const struct sock_filter *filter);
60541+#ifdef CONFIG_BPF_JIT
60542+ struct bpf_jit_work *work;
60543+#endif
60544 struct rcu_head rcu;
60545 struct sock_filter insns[0];
60546 };
60547diff --git a/include/linux/firewire.h b/include/linux/firewire.h
60548index cdc9b71..ce69fb5 100644
60549--- a/include/linux/firewire.h
60550+++ b/include/linux/firewire.h
60551@@ -413,7 +413,7 @@ struct fw_iso_context {
60552 union {
60553 fw_iso_callback_t sc;
60554 fw_iso_mc_callback_t mc;
60555- } callback;
60556+ } __no_const callback;
60557 void *callback_data;
60558 };
60559
60560diff --git a/include/linux/fs.h b/include/linux/fs.h
60561index 25c40b9..1bfd4f4 100644
60562--- a/include/linux/fs.h
60563+++ b/include/linux/fs.h
60564@@ -1634,7 +1634,8 @@ struct file_operations {
60565 int (*setlease)(struct file *, long, struct file_lock **);
60566 long (*fallocate)(struct file *file, int mode, loff_t offset,
60567 loff_t len);
60568-};
60569+} __do_const;
60570+typedef struct file_operations __no_const file_operations_no_const;
60571
60572 struct inode_operations {
60573 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
60574diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
60575index 003dc0f..3c4ea97 100644
60576--- a/include/linux/fs_struct.h
60577+++ b/include/linux/fs_struct.h
60578@@ -6,7 +6,7 @@
60579 #include <linux/seqlock.h>
60580
60581 struct fs_struct {
60582- int users;
60583+ atomic_t users;
60584 spinlock_t lock;
60585 seqcount_t seq;
60586 int umask;
60587diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
60588index ce31408..b1ad003 100644
60589--- a/include/linux/fscache-cache.h
60590+++ b/include/linux/fscache-cache.h
60591@@ -102,7 +102,7 @@ struct fscache_operation {
60592 fscache_operation_release_t release;
60593 };
60594
60595-extern atomic_t fscache_op_debug_id;
60596+extern atomic_unchecked_t fscache_op_debug_id;
60597 extern void fscache_op_work_func(struct work_struct *work);
60598
60599 extern void fscache_enqueue_operation(struct fscache_operation *);
60600@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
60601 {
60602 INIT_WORK(&op->work, fscache_op_work_func);
60603 atomic_set(&op->usage, 1);
60604- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
60605+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
60606 op->processor = processor;
60607 op->release = release;
60608 INIT_LIST_HEAD(&op->pend_link);
60609diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
60610index a6dfe69..569586df 100644
60611--- a/include/linux/fsnotify.h
60612+++ b/include/linux/fsnotify.h
60613@@ -315,7 +315,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
60614 */
60615 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
60616 {
60617- return kstrdup(name, GFP_KERNEL);
60618+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
60619 }
60620
60621 /*
60622diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
60623index 91d0e0a3..035666b 100644
60624--- a/include/linux/fsnotify_backend.h
60625+++ b/include/linux/fsnotify_backend.h
60626@@ -105,6 +105,7 @@ struct fsnotify_ops {
60627 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
60628 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
60629 };
60630+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
60631
60632 /*
60633 * A group is a "thing" that wants to receive notification about filesystem
60634diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
60635index 176a939..1462211 100644
60636--- a/include/linux/ftrace_event.h
60637+++ b/include/linux/ftrace_event.h
60638@@ -97,7 +97,7 @@ struct trace_event_functions {
60639 trace_print_func raw;
60640 trace_print_func hex;
60641 trace_print_func binary;
60642-};
60643+} __no_const;
60644
60645 struct trace_event {
60646 struct hlist_node node;
60647@@ -263,7 +263,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
60648 extern int trace_add_event_call(struct ftrace_event_call *call);
60649 extern void trace_remove_event_call(struct ftrace_event_call *call);
60650
60651-#define is_signed_type(type) (((type)(-1)) < 0)
60652+#define is_signed_type(type) (((type)(-1)) < (type)1)
60653
60654 int trace_set_clr_event(const char *system, const char *event, int set);
60655
60656diff --git a/include/linux/genhd.h b/include/linux/genhd.h
60657index 017a7fb..33a8507 100644
60658--- a/include/linux/genhd.h
60659+++ b/include/linux/genhd.h
60660@@ -185,7 +185,7 @@ struct gendisk {
60661 struct kobject *slave_dir;
60662
60663 struct timer_rand_state *random;
60664- atomic_t sync_io; /* RAID */
60665+ atomic_unchecked_t sync_io; /* RAID */
60666 struct disk_events *ev;
60667 #ifdef CONFIG_BLK_DEV_INTEGRITY
60668 struct blk_integrity *integrity;
60669diff --git a/include/linux/gfp.h b/include/linux/gfp.h
60670index 581e74b..8c34a24 100644
60671--- a/include/linux/gfp.h
60672+++ b/include/linux/gfp.h
60673@@ -38,6 +38,12 @@ struct vm_area_struct;
60674 #define ___GFP_OTHER_NODE 0x800000u
60675 #define ___GFP_WRITE 0x1000000u
60676
60677+#ifdef CONFIG_PAX_USERCOPY_SLABS
60678+#define ___GFP_USERCOPY 0x2000000u
60679+#else
60680+#define ___GFP_USERCOPY 0
60681+#endif
60682+
60683 /*
60684 * GFP bitmasks..
60685 *
60686@@ -87,6 +93,7 @@ struct vm_area_struct;
60687 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
60688 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
60689 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
60690+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
60691
60692 /*
60693 * This may seem redundant, but it's a way of annotating false positives vs.
60694@@ -94,7 +101,7 @@ struct vm_area_struct;
60695 */
60696 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
60697
60698-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
60699+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
60700 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
60701
60702 /* This equals 0, but use constants in case they ever change */
60703@@ -148,6 +155,8 @@ struct vm_area_struct;
60704 /* 4GB DMA on some platforms */
60705 #define GFP_DMA32 __GFP_DMA32
60706
60707+#define GFP_USERCOPY __GFP_USERCOPY
60708+
60709 /* Convert GFP flags to their corresponding migrate type */
60710 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
60711 {
60712diff --git a/include/linux/gracl.h b/include/linux/gracl.h
60713new file mode 100644
60714index 0000000..c938b1f
60715--- /dev/null
60716+++ b/include/linux/gracl.h
60717@@ -0,0 +1,319 @@
60718+#ifndef GR_ACL_H
60719+#define GR_ACL_H
60720+
60721+#include <linux/grdefs.h>
60722+#include <linux/resource.h>
60723+#include <linux/capability.h>
60724+#include <linux/dcache.h>
60725+#include <asm/resource.h>
60726+
60727+/* Major status information */
60728+
60729+#define GR_VERSION "grsecurity 2.9.1"
60730+#define GRSECURITY_VERSION 0x2901
60731+
60732+enum {
60733+ GR_SHUTDOWN = 0,
60734+ GR_ENABLE = 1,
60735+ GR_SPROLE = 2,
60736+ GR_RELOAD = 3,
60737+ GR_SEGVMOD = 4,
60738+ GR_STATUS = 5,
60739+ GR_UNSPROLE = 6,
60740+ GR_PASSSET = 7,
60741+ GR_SPROLEPAM = 8,
60742+};
60743+
60744+/* Password setup definitions
60745+ * kernel/grhash.c */
60746+enum {
60747+ GR_PW_LEN = 128,
60748+ GR_SALT_LEN = 16,
60749+ GR_SHA_LEN = 32,
60750+};
60751+
60752+enum {
60753+ GR_SPROLE_LEN = 64,
60754+};
60755+
60756+enum {
60757+ GR_NO_GLOB = 0,
60758+ GR_REG_GLOB,
60759+ GR_CREATE_GLOB
60760+};
60761+
60762+#define GR_NLIMITS 32
60763+
60764+/* Begin Data Structures */
60765+
60766+struct sprole_pw {
60767+ unsigned char *rolename;
60768+ unsigned char salt[GR_SALT_LEN];
60769+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
60770+};
60771+
60772+struct name_entry {
60773+ __u32 key;
60774+ ino_t inode;
60775+ dev_t device;
60776+ char *name;
60777+ __u16 len;
60778+ __u8 deleted;
60779+ struct name_entry *prev;
60780+ struct name_entry *next;
60781+};
60782+
60783+struct inodev_entry {
60784+ struct name_entry *nentry;
60785+ struct inodev_entry *prev;
60786+ struct inodev_entry *next;
60787+};
60788+
60789+struct acl_role_db {
60790+ struct acl_role_label **r_hash;
60791+ __u32 r_size;
60792+};
60793+
60794+struct inodev_db {
60795+ struct inodev_entry **i_hash;
60796+ __u32 i_size;
60797+};
60798+
60799+struct name_db {
60800+ struct name_entry **n_hash;
60801+ __u32 n_size;
60802+};
60803+
60804+struct crash_uid {
60805+ uid_t uid;
60806+ unsigned long expires;
60807+};
60808+
60809+struct gr_hash_struct {
60810+ void **table;
60811+ void **nametable;
60812+ void *first;
60813+ __u32 table_size;
60814+ __u32 used_size;
60815+ int type;
60816+};
60817+
60818+/* Userspace Grsecurity ACL data structures */
60819+
60820+struct acl_subject_label {
60821+ char *filename;
60822+ ino_t inode;
60823+ dev_t device;
60824+ __u32 mode;
60825+ kernel_cap_t cap_mask;
60826+ kernel_cap_t cap_lower;
60827+ kernel_cap_t cap_invert_audit;
60828+
60829+ struct rlimit res[GR_NLIMITS];
60830+ __u32 resmask;
60831+
60832+ __u8 user_trans_type;
60833+ __u8 group_trans_type;
60834+ uid_t *user_transitions;
60835+ gid_t *group_transitions;
60836+ __u16 user_trans_num;
60837+ __u16 group_trans_num;
60838+
60839+ __u32 sock_families[2];
60840+ __u32 ip_proto[8];
60841+ __u32 ip_type;
60842+ struct acl_ip_label **ips;
60843+ __u32 ip_num;
60844+ __u32 inaddr_any_override;
60845+
60846+ __u32 crashes;
60847+ unsigned long expires;
60848+
60849+ struct acl_subject_label *parent_subject;
60850+ struct gr_hash_struct *hash;
60851+ struct acl_subject_label *prev;
60852+ struct acl_subject_label *next;
60853+
60854+ struct acl_object_label **obj_hash;
60855+ __u32 obj_hash_size;
60856+ __u16 pax_flags;
60857+};
60858+
60859+struct role_allowed_ip {
60860+ __u32 addr;
60861+ __u32 netmask;
60862+
60863+ struct role_allowed_ip *prev;
60864+ struct role_allowed_ip *next;
60865+};
60866+
60867+struct role_transition {
60868+ char *rolename;
60869+
60870+ struct role_transition *prev;
60871+ struct role_transition *next;
60872+};
60873+
60874+struct acl_role_label {
60875+ char *rolename;
60876+ uid_t uidgid;
60877+ __u16 roletype;
60878+
60879+ __u16 auth_attempts;
60880+ unsigned long expires;
60881+
60882+ struct acl_subject_label *root_label;
60883+ struct gr_hash_struct *hash;
60884+
60885+ struct acl_role_label *prev;
60886+ struct acl_role_label *next;
60887+
60888+ struct role_transition *transitions;
60889+ struct role_allowed_ip *allowed_ips;
60890+ uid_t *domain_children;
60891+ __u16 domain_child_num;
60892+
60893+ umode_t umask;
60894+
60895+ struct acl_subject_label **subj_hash;
60896+ __u32 subj_hash_size;
60897+};
60898+
60899+struct user_acl_role_db {
60900+ struct acl_role_label **r_table;
60901+ __u32 num_pointers; /* Number of allocations to track */
60902+ __u32 num_roles; /* Number of roles */
60903+ __u32 num_domain_children; /* Number of domain children */
60904+ __u32 num_subjects; /* Number of subjects */
60905+ __u32 num_objects; /* Number of objects */
60906+};
60907+
60908+struct acl_object_label {
60909+ char *filename;
60910+ ino_t inode;
60911+ dev_t device;
60912+ __u32 mode;
60913+
60914+ struct acl_subject_label *nested;
60915+ struct acl_object_label *globbed;
60916+
60917+ /* next two structures not used */
60918+
60919+ struct acl_object_label *prev;
60920+ struct acl_object_label *next;
60921+};
60922+
60923+struct acl_ip_label {
60924+ char *iface;
60925+ __u32 addr;
60926+ __u32 netmask;
60927+ __u16 low, high;
60928+ __u8 mode;
60929+ __u32 type;
60930+ __u32 proto[8];
60931+
60932+ /* next two structures not used */
60933+
60934+ struct acl_ip_label *prev;
60935+ struct acl_ip_label *next;
60936+};
60937+
60938+struct gr_arg {
60939+ struct user_acl_role_db role_db;
60940+ unsigned char pw[GR_PW_LEN];
60941+ unsigned char salt[GR_SALT_LEN];
60942+ unsigned char sum[GR_SHA_LEN];
60943+ unsigned char sp_role[GR_SPROLE_LEN];
60944+ struct sprole_pw *sprole_pws;
60945+ dev_t segv_device;
60946+ ino_t segv_inode;
60947+ uid_t segv_uid;
60948+ __u16 num_sprole_pws;
60949+ __u16 mode;
60950+};
60951+
60952+struct gr_arg_wrapper {
60953+ struct gr_arg *arg;
60954+ __u32 version;
60955+ __u32 size;
60956+};
60957+
60958+struct subject_map {
60959+ struct acl_subject_label *user;
60960+ struct acl_subject_label *kernel;
60961+ struct subject_map *prev;
60962+ struct subject_map *next;
60963+};
60964+
60965+struct acl_subj_map_db {
60966+ struct subject_map **s_hash;
60967+ __u32 s_size;
60968+};
60969+
60970+/* End Data Structures Section */
60971+
60972+/* Hash functions generated by empirical testing by Brad Spengler
60973+ Makes good use of the low bits of the inode. Generally 0-1 times
60974+ in loop for successful match. 0-3 for unsuccessful match.
60975+ Shift/add algorithm with modulus of table size and an XOR*/
60976+
60977+static __inline__ unsigned int
60978+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
60979+{
60980+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
60981+}
60982+
60983+ static __inline__ unsigned int
60984+shash(const struct acl_subject_label *userp, const unsigned int sz)
60985+{
60986+ return ((const unsigned long)userp % sz);
60987+}
60988+
60989+static __inline__ unsigned int
60990+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
60991+{
60992+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
60993+}
60994+
60995+static __inline__ unsigned int
60996+nhash(const char *name, const __u16 len, const unsigned int sz)
60997+{
60998+ return full_name_hash((const unsigned char *)name, len) % sz;
60999+}
61000+
61001+#define FOR_EACH_ROLE_START(role) \
61002+ role = role_list; \
61003+ while (role) {
61004+
61005+#define FOR_EACH_ROLE_END(role) \
61006+ role = role->prev; \
61007+ }
61008+
61009+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
61010+ subj = NULL; \
61011+ iter = 0; \
61012+ while (iter < role->subj_hash_size) { \
61013+ if (subj == NULL) \
61014+ subj = role->subj_hash[iter]; \
61015+ if (subj == NULL) { \
61016+ iter++; \
61017+ continue; \
61018+ }
61019+
61020+#define FOR_EACH_SUBJECT_END(subj,iter) \
61021+ subj = subj->next; \
61022+ if (subj == NULL) \
61023+ iter++; \
61024+ }
61025+
61026+
61027+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
61028+ subj = role->hash->first; \
61029+ while (subj != NULL) {
61030+
61031+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
61032+ subj = subj->next; \
61033+ }
61034+
61035+#endif
61036+
61037diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
61038new file mode 100644
61039index 0000000..323ecf2
61040--- /dev/null
61041+++ b/include/linux/gralloc.h
61042@@ -0,0 +1,9 @@
61043+#ifndef __GRALLOC_H
61044+#define __GRALLOC_H
61045+
61046+void acl_free_all(void);
61047+int acl_alloc_stack_init(unsigned long size);
61048+void *acl_alloc(unsigned long len);
61049+void *acl_alloc_num(unsigned long num, unsigned long len);
61050+
61051+#endif
61052diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
61053new file mode 100644
61054index 0000000..b30e9bc
61055--- /dev/null
61056+++ b/include/linux/grdefs.h
61057@@ -0,0 +1,140 @@
61058+#ifndef GRDEFS_H
61059+#define GRDEFS_H
61060+
61061+/* Begin grsecurity status declarations */
61062+
61063+enum {
61064+ GR_READY = 0x01,
61065+ GR_STATUS_INIT = 0x00 // disabled state
61066+};
61067+
61068+/* Begin ACL declarations */
61069+
61070+/* Role flags */
61071+
61072+enum {
61073+ GR_ROLE_USER = 0x0001,
61074+ GR_ROLE_GROUP = 0x0002,
61075+ GR_ROLE_DEFAULT = 0x0004,
61076+ GR_ROLE_SPECIAL = 0x0008,
61077+ GR_ROLE_AUTH = 0x0010,
61078+ GR_ROLE_NOPW = 0x0020,
61079+ GR_ROLE_GOD = 0x0040,
61080+ GR_ROLE_LEARN = 0x0080,
61081+ GR_ROLE_TPE = 0x0100,
61082+ GR_ROLE_DOMAIN = 0x0200,
61083+ GR_ROLE_PAM = 0x0400,
61084+ GR_ROLE_PERSIST = 0x0800
61085+};
61086+
61087+/* ACL Subject and Object mode flags */
61088+enum {
61089+ GR_DELETED = 0x80000000
61090+};
61091+
61092+/* ACL Object-only mode flags */
61093+enum {
61094+ GR_READ = 0x00000001,
61095+ GR_APPEND = 0x00000002,
61096+ GR_WRITE = 0x00000004,
61097+ GR_EXEC = 0x00000008,
61098+ GR_FIND = 0x00000010,
61099+ GR_INHERIT = 0x00000020,
61100+ GR_SETID = 0x00000040,
61101+ GR_CREATE = 0x00000080,
61102+ GR_DELETE = 0x00000100,
61103+ GR_LINK = 0x00000200,
61104+ GR_AUDIT_READ = 0x00000400,
61105+ GR_AUDIT_APPEND = 0x00000800,
61106+ GR_AUDIT_WRITE = 0x00001000,
61107+ GR_AUDIT_EXEC = 0x00002000,
61108+ GR_AUDIT_FIND = 0x00004000,
61109+ GR_AUDIT_INHERIT= 0x00008000,
61110+ GR_AUDIT_SETID = 0x00010000,
61111+ GR_AUDIT_CREATE = 0x00020000,
61112+ GR_AUDIT_DELETE = 0x00040000,
61113+ GR_AUDIT_LINK = 0x00080000,
61114+ GR_PTRACERD = 0x00100000,
61115+ GR_NOPTRACE = 0x00200000,
61116+ GR_SUPPRESS = 0x00400000,
61117+ GR_NOLEARN = 0x00800000,
61118+ GR_INIT_TRANSFER= 0x01000000
61119+};
61120+
61121+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
61122+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
61123+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
61124+
61125+/* ACL subject-only mode flags */
61126+enum {
61127+ GR_KILL = 0x00000001,
61128+ GR_VIEW = 0x00000002,
61129+ GR_PROTECTED = 0x00000004,
61130+ GR_LEARN = 0x00000008,
61131+ GR_OVERRIDE = 0x00000010,
61132+ /* just a placeholder, this mode is only used in userspace */
61133+ GR_DUMMY = 0x00000020,
61134+ GR_PROTSHM = 0x00000040,
61135+ GR_KILLPROC = 0x00000080,
61136+ GR_KILLIPPROC = 0x00000100,
61137+ /* just a placeholder, this mode is only used in userspace */
61138+ GR_NOTROJAN = 0x00000200,
61139+ GR_PROTPROCFD = 0x00000400,
61140+ GR_PROCACCT = 0x00000800,
61141+ GR_RELAXPTRACE = 0x00001000,
61142+ GR_NESTED = 0x00002000,
61143+ GR_INHERITLEARN = 0x00004000,
61144+ GR_PROCFIND = 0x00008000,
61145+ GR_POVERRIDE = 0x00010000,
61146+ GR_KERNELAUTH = 0x00020000,
61147+ GR_ATSECURE = 0x00040000,
61148+ GR_SHMEXEC = 0x00080000
61149+};
61150+
61151+enum {
61152+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
61153+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
61154+ GR_PAX_ENABLE_MPROTECT = 0x0004,
61155+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
61156+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
61157+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
61158+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
61159+ GR_PAX_DISABLE_MPROTECT = 0x0400,
61160+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
61161+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
61162+};
61163+
61164+enum {
61165+ GR_ID_USER = 0x01,
61166+ GR_ID_GROUP = 0x02,
61167+};
61168+
61169+enum {
61170+ GR_ID_ALLOW = 0x01,
61171+ GR_ID_DENY = 0x02,
61172+};
61173+
61174+#define GR_CRASH_RES 31
61175+#define GR_UIDTABLE_MAX 500
61176+
61177+/* begin resource learning section */
61178+enum {
61179+ GR_RLIM_CPU_BUMP = 60,
61180+ GR_RLIM_FSIZE_BUMP = 50000,
61181+ GR_RLIM_DATA_BUMP = 10000,
61182+ GR_RLIM_STACK_BUMP = 1000,
61183+ GR_RLIM_CORE_BUMP = 10000,
61184+ GR_RLIM_RSS_BUMP = 500000,
61185+ GR_RLIM_NPROC_BUMP = 1,
61186+ GR_RLIM_NOFILE_BUMP = 5,
61187+ GR_RLIM_MEMLOCK_BUMP = 50000,
61188+ GR_RLIM_AS_BUMP = 500000,
61189+ GR_RLIM_LOCKS_BUMP = 2,
61190+ GR_RLIM_SIGPENDING_BUMP = 5,
61191+ GR_RLIM_MSGQUEUE_BUMP = 10000,
61192+ GR_RLIM_NICE_BUMP = 1,
61193+ GR_RLIM_RTPRIO_BUMP = 1,
61194+ GR_RLIM_RTTIME_BUMP = 1000000
61195+};
61196+
61197+#endif
61198diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
61199new file mode 100644
61200index 0000000..c9292f7
61201--- /dev/null
61202+++ b/include/linux/grinternal.h
61203@@ -0,0 +1,223 @@
61204+#ifndef __GRINTERNAL_H
61205+#define __GRINTERNAL_H
61206+
61207+#ifdef CONFIG_GRKERNSEC
61208+
61209+#include <linux/fs.h>
61210+#include <linux/mnt_namespace.h>
61211+#include <linux/nsproxy.h>
61212+#include <linux/gracl.h>
61213+#include <linux/grdefs.h>
61214+#include <linux/grmsg.h>
61215+
61216+void gr_add_learn_entry(const char *fmt, ...)
61217+ __attribute__ ((format (printf, 1, 2)));
61218+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
61219+ const struct vfsmount *mnt);
61220+__u32 gr_check_create(const struct dentry *new_dentry,
61221+ const struct dentry *parent,
61222+ const struct vfsmount *mnt, const __u32 mode);
61223+int gr_check_protected_task(const struct task_struct *task);
61224+__u32 to_gr_audit(const __u32 reqmode);
61225+int gr_set_acls(const int type);
61226+int gr_apply_subject_to_task(struct task_struct *task);
61227+int gr_acl_is_enabled(void);
61228+char gr_roletype_to_char(void);
61229+
61230+void gr_handle_alertkill(struct task_struct *task);
61231+char *gr_to_filename(const struct dentry *dentry,
61232+ const struct vfsmount *mnt);
61233+char *gr_to_filename1(const struct dentry *dentry,
61234+ const struct vfsmount *mnt);
61235+char *gr_to_filename2(const struct dentry *dentry,
61236+ const struct vfsmount *mnt);
61237+char *gr_to_filename3(const struct dentry *dentry,
61238+ const struct vfsmount *mnt);
61239+
61240+extern int grsec_enable_ptrace_readexec;
61241+extern int grsec_enable_harden_ptrace;
61242+extern int grsec_enable_link;
61243+extern int grsec_enable_fifo;
61244+extern int grsec_enable_execve;
61245+extern int grsec_enable_shm;
61246+extern int grsec_enable_execlog;
61247+extern int grsec_enable_signal;
61248+extern int grsec_enable_audit_ptrace;
61249+extern int grsec_enable_forkfail;
61250+extern int grsec_enable_time;
61251+extern int grsec_enable_rofs;
61252+extern int grsec_enable_chroot_shmat;
61253+extern int grsec_enable_chroot_mount;
61254+extern int grsec_enable_chroot_double;
61255+extern int grsec_enable_chroot_pivot;
61256+extern int grsec_enable_chroot_chdir;
61257+extern int grsec_enable_chroot_chmod;
61258+extern int grsec_enable_chroot_mknod;
61259+extern int grsec_enable_chroot_fchdir;
61260+extern int grsec_enable_chroot_nice;
61261+extern int grsec_enable_chroot_execlog;
61262+extern int grsec_enable_chroot_caps;
61263+extern int grsec_enable_chroot_sysctl;
61264+extern int grsec_enable_chroot_unix;
61265+extern int grsec_enable_symlinkown;
61266+extern int grsec_symlinkown_gid;
61267+extern int grsec_enable_tpe;
61268+extern int grsec_tpe_gid;
61269+extern int grsec_enable_tpe_all;
61270+extern int grsec_enable_tpe_invert;
61271+extern int grsec_enable_socket_all;
61272+extern int grsec_socket_all_gid;
61273+extern int grsec_enable_socket_client;
61274+extern int grsec_socket_client_gid;
61275+extern int grsec_enable_socket_server;
61276+extern int grsec_socket_server_gid;
61277+extern int grsec_audit_gid;
61278+extern int grsec_enable_group;
61279+extern int grsec_enable_audit_textrel;
61280+extern int grsec_enable_log_rwxmaps;
61281+extern int grsec_enable_mount;
61282+extern int grsec_enable_chdir;
61283+extern int grsec_resource_logging;
61284+extern int grsec_enable_blackhole;
61285+extern int grsec_lastack_retries;
61286+extern int grsec_enable_brute;
61287+extern int grsec_lock;
61288+
61289+extern spinlock_t grsec_alert_lock;
61290+extern unsigned long grsec_alert_wtime;
61291+extern unsigned long grsec_alert_fyet;
61292+
61293+extern spinlock_t grsec_audit_lock;
61294+
61295+extern rwlock_t grsec_exec_file_lock;
61296+
61297+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
61298+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
61299+ (tsk)->exec_file->f_vfsmnt) : "/")
61300+
61301+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
61302+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
61303+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
61304+
61305+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
61306+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
61307+ (tsk)->exec_file->f_vfsmnt) : "/")
61308+
61309+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
61310+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
61311+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
61312+
61313+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
61314+
61315+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
61316+
61317+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
61318+ (task)->pid, (cred)->uid, \
61319+ (cred)->euid, (cred)->gid, (cred)->egid, \
61320+ gr_parent_task_fullpath(task), \
61321+ (task)->real_parent->comm, (task)->real_parent->pid, \
61322+ (pcred)->uid, (pcred)->euid, \
61323+ (pcred)->gid, (pcred)->egid
61324+
61325+#define GR_CHROOT_CAPS {{ \
61326+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
61327+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
61328+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
61329+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
61330+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
61331+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
61332+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
61333+
61334+#define security_learn(normal_msg,args...) \
61335+({ \
61336+ read_lock(&grsec_exec_file_lock); \
61337+ gr_add_learn_entry(normal_msg "\n", ## args); \
61338+ read_unlock(&grsec_exec_file_lock); \
61339+})
61340+
61341+enum {
61342+ GR_DO_AUDIT,
61343+ GR_DONT_AUDIT,
61344+ /* used for non-audit messages that we shouldn't kill the task on */
61345+ GR_DONT_AUDIT_GOOD
61346+};
61347+
61348+enum {
61349+ GR_TTYSNIFF,
61350+ GR_RBAC,
61351+ GR_RBAC_STR,
61352+ GR_STR_RBAC,
61353+ GR_RBAC_MODE2,
61354+ GR_RBAC_MODE3,
61355+ GR_FILENAME,
61356+ GR_SYSCTL_HIDDEN,
61357+ GR_NOARGS,
61358+ GR_ONE_INT,
61359+ GR_ONE_INT_TWO_STR,
61360+ GR_ONE_STR,
61361+ GR_STR_INT,
61362+ GR_TWO_STR_INT,
61363+ GR_TWO_INT,
61364+ GR_TWO_U64,
61365+ GR_THREE_INT,
61366+ GR_FIVE_INT_TWO_STR,
61367+ GR_TWO_STR,
61368+ GR_THREE_STR,
61369+ GR_FOUR_STR,
61370+ GR_STR_FILENAME,
61371+ GR_FILENAME_STR,
61372+ GR_FILENAME_TWO_INT,
61373+ GR_FILENAME_TWO_INT_STR,
61374+ GR_TEXTREL,
61375+ GR_PTRACE,
61376+ GR_RESOURCE,
61377+ GR_CAP,
61378+ GR_SIG,
61379+ GR_SIG2,
61380+ GR_CRASH1,
61381+ GR_CRASH2,
61382+ GR_PSACCT,
61383+ GR_RWXMAP
61384+};
61385+
61386+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
61387+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
61388+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
61389+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
61390+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
61391+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
61392+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
61393+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
61394+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
61395+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
61396+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
61397+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
61398+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
61399+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
61400+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
61401+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
61402+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
61403+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
61404+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
61405+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
61406+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
61407+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
61408+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
61409+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
61410+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
61411+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
61412+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
61413+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
61414+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
61415+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
61416+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
61417+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
61418+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
61419+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
61420+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
61421+
61422+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
61423+
61424+#endif
61425+
61426+#endif
61427diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
61428new file mode 100644
61429index 0000000..54f4e85
61430--- /dev/null
61431+++ b/include/linux/grmsg.h
61432@@ -0,0 +1,110 @@
61433+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
61434+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
61435+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
61436+#define GR_STOPMOD_MSG "denied modification of module state by "
61437+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
61438+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
61439+#define GR_IOPERM_MSG "denied use of ioperm() by "
61440+#define GR_IOPL_MSG "denied use of iopl() by "
61441+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
61442+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
61443+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
61444+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
61445+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
61446+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
61447+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
61448+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
61449+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
61450+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
61451+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
61452+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
61453+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
61454+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
61455+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
61456+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
61457+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
61458+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
61459+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
61460+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
61461+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
61462+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
61463+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
61464+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
61465+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
61466+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
61467+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
61468+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
61469+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
61470+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
61471+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
61472+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
61473+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
61474+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
61475+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
61476+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
61477+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
61478+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
61479+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
61480+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
61481+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
61482+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
61483+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
61484+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
61485+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
61486+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
61487+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
61488+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
61489+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
61490+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
61491+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
61492+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
61493+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
61494+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
61495+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
61496+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
61497+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
61498+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
61499+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
61500+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
61501+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
61502+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
61503+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
61504+#define GR_FAILFORK_MSG "failed fork with errno %s by "
61505+#define GR_NICE_CHROOT_MSG "denied priority change by "
61506+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
61507+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
61508+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
61509+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
61510+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
61511+#define GR_TIME_MSG "time set by "
61512+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
61513+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
61514+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
61515+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
61516+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
61517+#define GR_BIND_MSG "denied bind() by "
61518+#define GR_CONNECT_MSG "denied connect() by "
61519+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
61520+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
61521+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
61522+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
61523+#define GR_CAP_ACL_MSG "use of %s denied for "
61524+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
61525+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
61526+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
61527+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
61528+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
61529+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
61530+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
61531+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
61532+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
61533+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
61534+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
61535+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
61536+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
61537+#define GR_VM86_MSG "denied use of vm86 by "
61538+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
61539+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
61540+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
61541+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
61542+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
61543diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
61544new file mode 100644
61545index 0000000..38bfb04
61546--- /dev/null
61547+++ b/include/linux/grsecurity.h
61548@@ -0,0 +1,233 @@
61549+#ifndef GR_SECURITY_H
61550+#define GR_SECURITY_H
61551+#include <linux/fs.h>
61552+#include <linux/fs_struct.h>
61553+#include <linux/binfmts.h>
61554+#include <linux/gracl.h>
61555+
61556+/* notify of brain-dead configs */
61557+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61558+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
61559+#endif
61560+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
61561+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
61562+#endif
61563+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
61564+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
61565+#endif
61566+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
61567+#error "CONFIG_PAX enabled, but no PaX options are enabled."
61568+#endif
61569+
61570+#include <linux/compat.h>
61571+
61572+struct user_arg_ptr {
61573+#ifdef CONFIG_COMPAT
61574+ bool is_compat;
61575+#endif
61576+ union {
61577+ const char __user *const __user *native;
61578+#ifdef CONFIG_COMPAT
61579+ compat_uptr_t __user *compat;
61580+#endif
61581+ } ptr;
61582+};
61583+
61584+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
61585+void gr_handle_brute_check(void);
61586+void gr_handle_kernel_exploit(void);
61587+int gr_process_user_ban(void);
61588+
61589+char gr_roletype_to_char(void);
61590+
61591+int gr_acl_enable_at_secure(void);
61592+
61593+int gr_check_user_change(int real, int effective, int fs);
61594+int gr_check_group_change(int real, int effective, int fs);
61595+
61596+void gr_del_task_from_ip_table(struct task_struct *p);
61597+
61598+int gr_pid_is_chrooted(struct task_struct *p);
61599+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
61600+int gr_handle_chroot_nice(void);
61601+int gr_handle_chroot_sysctl(const int op);
61602+int gr_handle_chroot_setpriority(struct task_struct *p,
61603+ const int niceval);
61604+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
61605+int gr_handle_chroot_chroot(const struct dentry *dentry,
61606+ const struct vfsmount *mnt);
61607+void gr_handle_chroot_chdir(struct path *path);
61608+int gr_handle_chroot_chmod(const struct dentry *dentry,
61609+ const struct vfsmount *mnt, const int mode);
61610+int gr_handle_chroot_mknod(const struct dentry *dentry,
61611+ const struct vfsmount *mnt, const int mode);
61612+int gr_handle_chroot_mount(const struct dentry *dentry,
61613+ const struct vfsmount *mnt,
61614+ const char *dev_name);
61615+int gr_handle_chroot_pivot(void);
61616+int gr_handle_chroot_unix(const pid_t pid);
61617+
61618+int gr_handle_rawio(const struct inode *inode);
61619+
61620+void gr_handle_ioperm(void);
61621+void gr_handle_iopl(void);
61622+
61623+umode_t gr_acl_umask(void);
61624+
61625+int gr_tpe_allow(const struct file *file);
61626+
61627+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
61628+void gr_clear_chroot_entries(struct task_struct *task);
61629+
61630+void gr_log_forkfail(const int retval);
61631+void gr_log_timechange(void);
61632+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
61633+void gr_log_chdir(const struct dentry *dentry,
61634+ const struct vfsmount *mnt);
61635+void gr_log_chroot_exec(const struct dentry *dentry,
61636+ const struct vfsmount *mnt);
61637+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
61638+void gr_log_remount(const char *devname, const int retval);
61639+void gr_log_unmount(const char *devname, const int retval);
61640+void gr_log_mount(const char *from, const char *to, const int retval);
61641+void gr_log_textrel(struct vm_area_struct *vma);
61642+void gr_log_rwxmmap(struct file *file);
61643+void gr_log_rwxmprotect(struct file *file);
61644+
61645+int gr_handle_follow_link(const struct inode *parent,
61646+ const struct inode *inode,
61647+ const struct dentry *dentry,
61648+ const struct vfsmount *mnt);
61649+int gr_handle_fifo(const struct dentry *dentry,
61650+ const struct vfsmount *mnt,
61651+ const struct dentry *dir, const int flag,
61652+ const int acc_mode);
61653+int gr_handle_hardlink(const struct dentry *dentry,
61654+ const struct vfsmount *mnt,
61655+ struct inode *inode,
61656+ const int mode, const char *to);
61657+
61658+int gr_is_capable(const int cap);
61659+int gr_is_capable_nolog(const int cap);
61660+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
61661+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
61662+
61663+void gr_learn_resource(const struct task_struct *task, const int limit,
61664+ const unsigned long wanted, const int gt);
61665+void gr_copy_label(struct task_struct *tsk);
61666+void gr_handle_crash(struct task_struct *task, const int sig);
61667+int gr_handle_signal(const struct task_struct *p, const int sig);
61668+int gr_check_crash_uid(const uid_t uid);
61669+int gr_check_protected_task(const struct task_struct *task);
61670+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
61671+int gr_acl_handle_mmap(const struct file *file,
61672+ const unsigned long prot);
61673+int gr_acl_handle_mprotect(const struct file *file,
61674+ const unsigned long prot);
61675+int gr_check_hidden_task(const struct task_struct *tsk);
61676+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
61677+ const struct vfsmount *mnt);
61678+__u32 gr_acl_handle_utime(const struct dentry *dentry,
61679+ const struct vfsmount *mnt);
61680+__u32 gr_acl_handle_access(const struct dentry *dentry,
61681+ const struct vfsmount *mnt, const int fmode);
61682+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
61683+ const struct vfsmount *mnt, umode_t *mode);
61684+__u32 gr_acl_handle_chown(const struct dentry *dentry,
61685+ const struct vfsmount *mnt);
61686+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
61687+ const struct vfsmount *mnt);
61688+int gr_handle_ptrace(struct task_struct *task, const long request);
61689+int gr_handle_proc_ptrace(struct task_struct *task);
61690+__u32 gr_acl_handle_execve(const struct dentry *dentry,
61691+ const struct vfsmount *mnt);
61692+int gr_check_crash_exec(const struct file *filp);
61693+int gr_acl_is_enabled(void);
61694+void gr_set_kernel_label(struct task_struct *task);
61695+void gr_set_role_label(struct task_struct *task, const uid_t uid,
61696+ const gid_t gid);
61697+int gr_set_proc_label(const struct dentry *dentry,
61698+ const struct vfsmount *mnt,
61699+ const int unsafe_flags);
61700+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
61701+ const struct vfsmount *mnt);
61702+__u32 gr_acl_handle_open(const struct dentry *dentry,
61703+ const struct vfsmount *mnt, int acc_mode);
61704+__u32 gr_acl_handle_creat(const struct dentry *dentry,
61705+ const struct dentry *p_dentry,
61706+ const struct vfsmount *p_mnt,
61707+ int open_flags, int acc_mode, const int imode);
61708+void gr_handle_create(const struct dentry *dentry,
61709+ const struct vfsmount *mnt);
61710+void gr_handle_proc_create(const struct dentry *dentry,
61711+ const struct inode *inode);
61712+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
61713+ const struct dentry *parent_dentry,
61714+ const struct vfsmount *parent_mnt,
61715+ const int mode);
61716+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
61717+ const struct dentry *parent_dentry,
61718+ const struct vfsmount *parent_mnt);
61719+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
61720+ const struct vfsmount *mnt);
61721+void gr_handle_delete(const ino_t ino, const dev_t dev);
61722+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
61723+ const struct vfsmount *mnt);
61724+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
61725+ const struct dentry *parent_dentry,
61726+ const struct vfsmount *parent_mnt,
61727+ const char *from);
61728+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
61729+ const struct dentry *parent_dentry,
61730+ const struct vfsmount *parent_mnt,
61731+ const struct dentry *old_dentry,
61732+ const struct vfsmount *old_mnt, const char *to);
61733+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
61734+int gr_acl_handle_rename(struct dentry *new_dentry,
61735+ struct dentry *parent_dentry,
61736+ const struct vfsmount *parent_mnt,
61737+ struct dentry *old_dentry,
61738+ struct inode *old_parent_inode,
61739+ struct vfsmount *old_mnt, const char *newname);
61740+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61741+ struct dentry *old_dentry,
61742+ struct dentry *new_dentry,
61743+ struct vfsmount *mnt, const __u8 replace);
61744+__u32 gr_check_link(const struct dentry *new_dentry,
61745+ const struct dentry *parent_dentry,
61746+ const struct vfsmount *parent_mnt,
61747+ const struct dentry *old_dentry,
61748+ const struct vfsmount *old_mnt);
61749+int gr_acl_handle_filldir(const struct file *file, const char *name,
61750+ const unsigned int namelen, const ino_t ino);
61751+
61752+__u32 gr_acl_handle_unix(const struct dentry *dentry,
61753+ const struct vfsmount *mnt);
61754+void gr_acl_handle_exit(void);
61755+void gr_acl_handle_psacct(struct task_struct *task, const long code);
61756+int gr_acl_handle_procpidmem(const struct task_struct *task);
61757+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
61758+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
61759+void gr_audit_ptrace(struct task_struct *task);
61760+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
61761+
61762+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
61763+
61764+#ifdef CONFIG_GRKERNSEC
61765+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
61766+void gr_handle_vm86(void);
61767+void gr_handle_mem_readwrite(u64 from, u64 to);
61768+
61769+void gr_log_badprocpid(const char *entry);
61770+
61771+extern int grsec_enable_dmesg;
61772+extern int grsec_disable_privio;
61773+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61774+extern int grsec_enable_chroot_findtask;
61775+#endif
61776+#ifdef CONFIG_GRKERNSEC_SETXID
61777+extern int grsec_enable_setxid;
61778+#endif
61779+#endif
61780+
61781+#endif
61782diff --git a/include/linux/grsock.h b/include/linux/grsock.h
61783new file mode 100644
61784index 0000000..e7ffaaf
61785--- /dev/null
61786+++ b/include/linux/grsock.h
61787@@ -0,0 +1,19 @@
61788+#ifndef __GRSOCK_H
61789+#define __GRSOCK_H
61790+
61791+extern void gr_attach_curr_ip(const struct sock *sk);
61792+extern int gr_handle_sock_all(const int family, const int type,
61793+ const int protocol);
61794+extern int gr_handle_sock_server(const struct sockaddr *sck);
61795+extern int gr_handle_sock_server_other(const struct sock *sck);
61796+extern int gr_handle_sock_client(const struct sockaddr *sck);
61797+extern int gr_search_connect(struct socket * sock,
61798+ struct sockaddr_in * addr);
61799+extern int gr_search_bind(struct socket * sock,
61800+ struct sockaddr_in * addr);
61801+extern int gr_search_listen(struct socket * sock);
61802+extern int gr_search_accept(struct socket * sock);
61803+extern int gr_search_socket(const int domain, const int type,
61804+ const int protocol);
61805+
61806+#endif
61807diff --git a/include/linux/hid.h b/include/linux/hid.h
61808index 3a95da6..51986f1 100644
61809--- a/include/linux/hid.h
61810+++ b/include/linux/hid.h
61811@@ -696,7 +696,7 @@ struct hid_ll_driver {
61812 unsigned int code, int value);
61813
61814 int (*parse)(struct hid_device *hdev);
61815-};
61816+} __no_const;
61817
61818 #define PM_HINT_FULLON 1<<5
61819 #define PM_HINT_NORMAL 1<<1
61820diff --git a/include/linux/highmem.h b/include/linux/highmem.h
61821index d3999b4..1304cb4 100644
61822--- a/include/linux/highmem.h
61823+++ b/include/linux/highmem.h
61824@@ -221,6 +221,18 @@ static inline void clear_highpage(struct page *page)
61825 kunmap_atomic(kaddr);
61826 }
61827
61828+static inline void sanitize_highpage(struct page *page)
61829+{
61830+ void *kaddr;
61831+ unsigned long flags;
61832+
61833+ local_irq_save(flags);
61834+ kaddr = kmap_atomic(page);
61835+ clear_page(kaddr);
61836+ kunmap_atomic(kaddr);
61837+ local_irq_restore(flags);
61838+}
61839+
61840 static inline void zero_user_segments(struct page *page,
61841 unsigned start1, unsigned end1,
61842 unsigned start2, unsigned end2)
61843diff --git a/include/linux/i2c.h b/include/linux/i2c.h
61844index 195d8b3..e20cfab 100644
61845--- a/include/linux/i2c.h
61846+++ b/include/linux/i2c.h
61847@@ -365,6 +365,7 @@ struct i2c_algorithm {
61848 /* To determine what the adapter supports */
61849 u32 (*functionality) (struct i2c_adapter *);
61850 };
61851+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
61852
61853 /*
61854 * i2c_adapter is the structure used to identify a physical i2c bus along
61855diff --git a/include/linux/i2o.h b/include/linux/i2o.h
61856index d23c3c2..eb63c81 100644
61857--- a/include/linux/i2o.h
61858+++ b/include/linux/i2o.h
61859@@ -565,7 +565,7 @@ struct i2o_controller {
61860 struct i2o_device *exec; /* Executive */
61861 #if BITS_PER_LONG == 64
61862 spinlock_t context_list_lock; /* lock for context_list */
61863- atomic_t context_list_counter; /* needed for unique contexts */
61864+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
61865 struct list_head context_list; /* list of context id's
61866 and pointers */
61867 #endif
61868diff --git a/include/linux/if_team.h b/include/linux/if_team.h
61869index 58404b0..439ed95 100644
61870--- a/include/linux/if_team.h
61871+++ b/include/linux/if_team.h
61872@@ -64,6 +64,7 @@ struct team_mode_ops {
61873 void (*port_leave)(struct team *team, struct team_port *port);
61874 void (*port_change_mac)(struct team *team, struct team_port *port);
61875 };
61876+typedef struct team_mode_ops __no_const team_mode_ops_no_const;
61877
61878 enum team_option_type {
61879 TEAM_OPTION_TYPE_U32,
61880@@ -112,7 +113,7 @@ struct team {
61881 struct list_head option_list;
61882
61883 const struct team_mode *mode;
61884- struct team_mode_ops ops;
61885+ team_mode_ops_no_const ops;
61886 long mode_priv[TEAM_MODE_PRIV_LONGS];
61887 };
61888
61889diff --git a/include/linux/init.h b/include/linux/init.h
61890index 6b95109..4aca62c 100644
61891--- a/include/linux/init.h
61892+++ b/include/linux/init.h
61893@@ -294,13 +294,13 @@ void __init parse_early_options(char *cmdline);
61894
61895 /* Each module must use one module_init(). */
61896 #define module_init(initfn) \
61897- static inline initcall_t __inittest(void) \
61898+ static inline __used initcall_t __inittest(void) \
61899 { return initfn; } \
61900 int init_module(void) __attribute__((alias(#initfn)));
61901
61902 /* This is only required if you want to be unloadable. */
61903 #define module_exit(exitfn) \
61904- static inline exitcall_t __exittest(void) \
61905+ static inline __used exitcall_t __exittest(void) \
61906 { return exitfn; } \
61907 void cleanup_module(void) __attribute__((alias(#exitfn)));
61908
61909diff --git a/include/linux/init_task.h b/include/linux/init_task.h
61910index e4baff5..83bb175 100644
61911--- a/include/linux/init_task.h
61912+++ b/include/linux/init_task.h
61913@@ -134,6 +134,12 @@ extern struct cred init_cred;
61914
61915 #define INIT_TASK_COMM "swapper"
61916
61917+#ifdef CONFIG_X86
61918+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
61919+#else
61920+#define INIT_TASK_THREAD_INFO
61921+#endif
61922+
61923 /*
61924 * INIT_TASK is used to set up the first task table, touch at
61925 * your own risk!. Base=0, limit=0x1fffff (=2MB)
61926@@ -172,6 +178,7 @@ extern struct cred init_cred;
61927 RCU_INIT_POINTER(.cred, &init_cred), \
61928 .comm = INIT_TASK_COMM, \
61929 .thread = INIT_THREAD, \
61930+ INIT_TASK_THREAD_INFO \
61931 .fs = &init_fs, \
61932 .files = &init_files, \
61933 .signal = &init_signals, \
61934diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
61935index e6ca56d..8583707 100644
61936--- a/include/linux/intel-iommu.h
61937+++ b/include/linux/intel-iommu.h
61938@@ -296,7 +296,7 @@ struct iommu_flush {
61939 u8 fm, u64 type);
61940 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
61941 unsigned int size_order, u64 type);
61942-};
61943+} __no_const;
61944
61945 enum {
61946 SR_DMAR_FECTL_REG,
61947diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
61948index 2aea5d2..0b82f0c 100644
61949--- a/include/linux/interrupt.h
61950+++ b/include/linux/interrupt.h
61951@@ -439,7 +439,7 @@ enum
61952 /* map softirq index to softirq name. update 'softirq_to_name' in
61953 * kernel/softirq.c when adding a new softirq.
61954 */
61955-extern char *softirq_to_name[NR_SOFTIRQS];
61956+extern const char * const softirq_to_name[NR_SOFTIRQS];
61957
61958 /* softirq mask and active fields moved to irq_cpustat_t in
61959 * asm/hardirq.h to get better cache usage. KAO
61960@@ -447,12 +447,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
61961
61962 struct softirq_action
61963 {
61964- void (*action)(struct softirq_action *);
61965+ void (*action)(void);
61966 };
61967
61968 asmlinkage void do_softirq(void);
61969 asmlinkage void __do_softirq(void);
61970-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
61971+extern void open_softirq(int nr, void (*action)(void));
61972 extern void softirq_init(void);
61973 extern void __raise_softirq_irqoff(unsigned int nr);
61974
61975diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
61976index 3875719..4cd454c 100644
61977--- a/include/linux/kallsyms.h
61978+++ b/include/linux/kallsyms.h
61979@@ -15,7 +15,8 @@
61980
61981 struct module;
61982
61983-#ifdef CONFIG_KALLSYMS
61984+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
61985+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
61986 /* Lookup the address for a symbol. Returns 0 if not found. */
61987 unsigned long kallsyms_lookup_name(const char *name);
61988
61989@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
61990 /* Stupid that this does nothing, but I didn't create this mess. */
61991 #define __print_symbol(fmt, addr)
61992 #endif /*CONFIG_KALLSYMS*/
61993+#else /* when included by kallsyms.c, vsnprintf.c, or
61994+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
61995+extern void __print_symbol(const char *fmt, unsigned long address);
61996+extern int sprint_backtrace(char *buffer, unsigned long address);
61997+extern int sprint_symbol(char *buffer, unsigned long address);
61998+const char *kallsyms_lookup(unsigned long addr,
61999+ unsigned long *symbolsize,
62000+ unsigned long *offset,
62001+ char **modname, char *namebuf);
62002+#endif
62003
62004 /* This macro allows us to keep printk typechecking */
62005 static __printf(1, 2)
62006diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
62007index c4d2fc1..5df9c19 100644
62008--- a/include/linux/kgdb.h
62009+++ b/include/linux/kgdb.h
62010@@ -53,7 +53,7 @@ extern int kgdb_connected;
62011 extern int kgdb_io_module_registered;
62012
62013 extern atomic_t kgdb_setting_breakpoint;
62014-extern atomic_t kgdb_cpu_doing_single_step;
62015+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
62016
62017 extern struct task_struct *kgdb_usethread;
62018 extern struct task_struct *kgdb_contthread;
62019@@ -252,7 +252,7 @@ struct kgdb_arch {
62020 void (*disable_hw_break)(struct pt_regs *regs);
62021 void (*remove_all_hw_break)(void);
62022 void (*correct_hw_break)(void);
62023-};
62024+} __do_const;
62025
62026 /**
62027 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
62028@@ -277,7 +277,7 @@ struct kgdb_io {
62029 void (*pre_exception) (void);
62030 void (*post_exception) (void);
62031 int is_console;
62032-};
62033+} __do_const;
62034
62035 extern struct kgdb_arch arch_kgdb_ops;
62036
62037diff --git a/include/linux/kmod.h b/include/linux/kmod.h
62038index dd99c32..da06047 100644
62039--- a/include/linux/kmod.h
62040+++ b/include/linux/kmod.h
62041@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
62042 * usually useless though. */
62043 extern __printf(2, 3)
62044 int __request_module(bool wait, const char *name, ...);
62045+extern __printf(3, 4)
62046+int ___request_module(bool wait, char *param_name, const char *name, ...);
62047 #define request_module(mod...) __request_module(true, mod)
62048 #define request_module_nowait(mod...) __request_module(false, mod)
62049 #define try_then_request_module(x, mod...) \
62050diff --git a/include/linux/kref.h b/include/linux/kref.h
62051index 9c07dce..a92fa71 100644
62052--- a/include/linux/kref.h
62053+++ b/include/linux/kref.h
62054@@ -63,7 +63,7 @@ static inline void kref_get(struct kref *kref)
62055 static inline int kref_sub(struct kref *kref, unsigned int count,
62056 void (*release)(struct kref *kref))
62057 {
62058- WARN_ON(release == NULL);
62059+ BUG_ON(release == NULL);
62060
62061 if (atomic_sub_and_test((int) count, &kref->refcount)) {
62062 release(kref);
62063diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
62064index 72cbf08..dd0201d 100644
62065--- a/include/linux/kvm_host.h
62066+++ b/include/linux/kvm_host.h
62067@@ -322,7 +322,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
62068 void vcpu_load(struct kvm_vcpu *vcpu);
62069 void vcpu_put(struct kvm_vcpu *vcpu);
62070
62071-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
62072+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
62073 struct module *module);
62074 void kvm_exit(void);
62075
62076@@ -486,7 +486,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
62077 struct kvm_guest_debug *dbg);
62078 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
62079
62080-int kvm_arch_init(void *opaque);
62081+int kvm_arch_init(const void *opaque);
62082 void kvm_arch_exit(void);
62083
62084 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
62085diff --git a/include/linux/libata.h b/include/linux/libata.h
62086index 6e887c7..4539601 100644
62087--- a/include/linux/libata.h
62088+++ b/include/linux/libata.h
62089@@ -910,7 +910,7 @@ struct ata_port_operations {
62090 * fields must be pointers.
62091 */
62092 const struct ata_port_operations *inherits;
62093-};
62094+} __do_const;
62095
62096 struct ata_port_info {
62097 unsigned long flags;
62098diff --git a/include/linux/mca.h b/include/linux/mca.h
62099index 3797270..7765ede 100644
62100--- a/include/linux/mca.h
62101+++ b/include/linux/mca.h
62102@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
62103 int region);
62104 void * (*mca_transform_memory)(struct mca_device *,
62105 void *memory);
62106-};
62107+} __no_const;
62108
62109 struct mca_bus {
62110 u64 default_dma_mask;
62111diff --git a/include/linux/memory.h b/include/linux/memory.h
62112index 1ac7f6e..a5794d0 100644
62113--- a/include/linux/memory.h
62114+++ b/include/linux/memory.h
62115@@ -143,7 +143,7 @@ struct memory_accessor {
62116 size_t count);
62117 ssize_t (*write)(struct memory_accessor *, const char *buf,
62118 off_t offset, size_t count);
62119-};
62120+} __no_const;
62121
62122 /*
62123 * Kernel text modification mutex, used for code patching. Users of this lock
62124diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
62125index ee96cd5..7823c3a 100644
62126--- a/include/linux/mfd/abx500.h
62127+++ b/include/linux/mfd/abx500.h
62128@@ -455,6 +455,7 @@ struct abx500_ops {
62129 int (*event_registers_startup_state_get) (struct device *, u8 *);
62130 int (*startup_irq_enabled) (struct device *, unsigned int);
62131 };
62132+typedef struct abx500_ops __no_const abx500_ops_no_const;
62133
62134 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
62135 void abx500_remove_ops(struct device *dev);
62136diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
62137index 9b07725..3d55001 100644
62138--- a/include/linux/mfd/abx500/ux500_chargalg.h
62139+++ b/include/linux/mfd/abx500/ux500_chargalg.h
62140@@ -19,7 +19,7 @@ struct ux500_charger_ops {
62141 int (*enable) (struct ux500_charger *, int, int, int);
62142 int (*kick_wd) (struct ux500_charger *);
62143 int (*update_curr) (struct ux500_charger *, int);
62144-};
62145+} __no_const;
62146
62147 /**
62148 * struct ux500_charger - power supply ux500 charger sub class
62149diff --git a/include/linux/mm.h b/include/linux/mm.h
62150index 74aa71b..4ae97ba 100644
62151--- a/include/linux/mm.h
62152+++ b/include/linux/mm.h
62153@@ -116,7 +116,14 @@ extern unsigned int kobjsize(const void *objp);
62154
62155 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
62156 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
62157+
62158+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62159+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
62160+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
62161+#else
62162 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
62163+#endif
62164+
62165 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
62166 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
62167
62168@@ -1013,34 +1020,6 @@ int set_page_dirty(struct page *page);
62169 int set_page_dirty_lock(struct page *page);
62170 int clear_page_dirty_for_io(struct page *page);
62171
62172-/* Is the vma a continuation of the stack vma above it? */
62173-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
62174-{
62175- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
62176-}
62177-
62178-static inline int stack_guard_page_start(struct vm_area_struct *vma,
62179- unsigned long addr)
62180-{
62181- return (vma->vm_flags & VM_GROWSDOWN) &&
62182- (vma->vm_start == addr) &&
62183- !vma_growsdown(vma->vm_prev, addr);
62184-}
62185-
62186-/* Is the vma a continuation of the stack vma below it? */
62187-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
62188-{
62189- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
62190-}
62191-
62192-static inline int stack_guard_page_end(struct vm_area_struct *vma,
62193- unsigned long addr)
62194-{
62195- return (vma->vm_flags & VM_GROWSUP) &&
62196- (vma->vm_end == addr) &&
62197- !vma_growsup(vma->vm_next, addr);
62198-}
62199-
62200 extern pid_t
62201 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
62202
62203@@ -1139,6 +1118,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
62204 }
62205 #endif
62206
62207+#ifdef CONFIG_MMU
62208+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
62209+#else
62210+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
62211+{
62212+ return __pgprot(0);
62213+}
62214+#endif
62215+
62216 int vma_wants_writenotify(struct vm_area_struct *vma);
62217
62218 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
62219@@ -1157,8 +1145,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
62220 {
62221 return 0;
62222 }
62223+
62224+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
62225+ unsigned long address)
62226+{
62227+ return 0;
62228+}
62229 #else
62230 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
62231+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
62232 #endif
62233
62234 #ifdef __PAGETABLE_PMD_FOLDED
62235@@ -1167,8 +1162,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
62236 {
62237 return 0;
62238 }
62239+
62240+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
62241+ unsigned long address)
62242+{
62243+ return 0;
62244+}
62245 #else
62246 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
62247+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
62248 #endif
62249
62250 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
62251@@ -1186,11 +1188,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
62252 NULL: pud_offset(pgd, address);
62253 }
62254
62255+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
62256+{
62257+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
62258+ NULL: pud_offset(pgd, address);
62259+}
62260+
62261 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
62262 {
62263 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
62264 NULL: pmd_offset(pud, address);
62265 }
62266+
62267+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
62268+{
62269+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
62270+ NULL: pmd_offset(pud, address);
62271+}
62272 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
62273
62274 #if USE_SPLIT_PTLOCKS
62275@@ -1400,6 +1414,7 @@ extern unsigned long do_mmap(struct file *, unsigned long,
62276 unsigned long, unsigned long,
62277 unsigned long, unsigned long);
62278 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
62279+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
62280
62281 /* These take the mm semaphore themselves */
62282 extern unsigned long vm_brk(unsigned long, unsigned long);
62283@@ -1462,6 +1477,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
62284 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
62285 struct vm_area_struct **pprev);
62286
62287+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
62288+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
62289+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
62290+
62291 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
62292 NULL if none. Assume start_addr < end_addr. */
62293 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
62294@@ -1490,15 +1509,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
62295 return vma;
62296 }
62297
62298-#ifdef CONFIG_MMU
62299-pgprot_t vm_get_page_prot(unsigned long vm_flags);
62300-#else
62301-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
62302-{
62303- return __pgprot(0);
62304-}
62305-#endif
62306-
62307 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
62308 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
62309 unsigned long pfn, unsigned long size, pgprot_t);
62310@@ -1602,7 +1612,7 @@ extern int unpoison_memory(unsigned long pfn);
62311 extern int sysctl_memory_failure_early_kill;
62312 extern int sysctl_memory_failure_recovery;
62313 extern void shake_page(struct page *p, int access);
62314-extern atomic_long_t mce_bad_pages;
62315+extern atomic_long_unchecked_t mce_bad_pages;
62316 extern int soft_offline_page(struct page *page, int flags);
62317
62318 extern void dump_page(struct page *page);
62319@@ -1633,5 +1643,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
62320 static inline bool page_is_guard(struct page *page) { return false; }
62321 #endif /* CONFIG_DEBUG_PAGEALLOC */
62322
62323+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
62324+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
62325+#else
62326+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
62327+#endif
62328+
62329 #endif /* __KERNEL__ */
62330 #endif /* _LINUX_MM_H */
62331diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
62332index b35752f..41075a0 100644
62333--- a/include/linux/mm_types.h
62334+++ b/include/linux/mm_types.h
62335@@ -262,6 +262,8 @@ struct vm_area_struct {
62336 #ifdef CONFIG_NUMA
62337 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
62338 #endif
62339+
62340+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
62341 };
62342
62343 struct core_thread {
62344@@ -336,7 +338,7 @@ struct mm_struct {
62345 unsigned long def_flags;
62346 unsigned long nr_ptes; /* Page table pages */
62347 unsigned long start_code, end_code, start_data, end_data;
62348- unsigned long start_brk, brk, start_stack;
62349+ unsigned long brk_gap, start_brk, brk, start_stack;
62350 unsigned long arg_start, arg_end, env_start, env_end;
62351
62352 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
62353@@ -398,6 +400,24 @@ struct mm_struct {
62354 #ifdef CONFIG_CPUMASK_OFFSTACK
62355 struct cpumask cpumask_allocation;
62356 #endif
62357+
62358+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS) || defined(CONFIG_PAX_HAVE_ACL_FLAGS) || defined(CONFIG_PAX_HOOK_ACL_FLAGS)
62359+ unsigned long pax_flags;
62360+#endif
62361+
62362+#ifdef CONFIG_PAX_DLRESOLVE
62363+ unsigned long call_dl_resolve;
62364+#endif
62365+
62366+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
62367+ unsigned long call_syscall;
62368+#endif
62369+
62370+#ifdef CONFIG_PAX_ASLR
62371+ unsigned long delta_mmap; /* randomized offset */
62372+ unsigned long delta_stack; /* randomized offset */
62373+#endif
62374+
62375 };
62376
62377 static inline void mm_init_cpumask(struct mm_struct *mm)
62378diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
62379index 1d1b1e1..2a13c78 100644
62380--- a/include/linux/mmu_notifier.h
62381+++ b/include/linux/mmu_notifier.h
62382@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
62383 */
62384 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
62385 ({ \
62386- pte_t __pte; \
62387+ pte_t ___pte; \
62388 struct vm_area_struct *___vma = __vma; \
62389 unsigned long ___address = __address; \
62390- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
62391+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
62392 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
62393- __pte; \
62394+ ___pte; \
62395 })
62396
62397 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
62398diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
62399index 5f6806b..49db2b2 100644
62400--- a/include/linux/mmzone.h
62401+++ b/include/linux/mmzone.h
62402@@ -380,7 +380,7 @@ struct zone {
62403 unsigned long flags; /* zone flags, see below */
62404
62405 /* Zone statistics */
62406- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62407+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62408
62409 /*
62410 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
62411diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
62412index 501da4c..ba79bb4 100644
62413--- a/include/linux/mod_devicetable.h
62414+++ b/include/linux/mod_devicetable.h
62415@@ -12,7 +12,7 @@
62416 typedef unsigned long kernel_ulong_t;
62417 #endif
62418
62419-#define PCI_ANY_ID (~0)
62420+#define PCI_ANY_ID ((__u16)~0)
62421
62422 struct pci_device_id {
62423 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
62424@@ -131,7 +131,7 @@ struct usb_device_id {
62425 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
62426 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
62427
62428-#define HID_ANY_ID (~0)
62429+#define HID_ANY_ID (~0U)
62430
62431 struct hid_device_id {
62432 __u16 bus;
62433diff --git a/include/linux/module.h b/include/linux/module.h
62434index fbcafe2..e5d9587 100644
62435--- a/include/linux/module.h
62436+++ b/include/linux/module.h
62437@@ -17,6 +17,7 @@
62438 #include <linux/moduleparam.h>
62439 #include <linux/tracepoint.h>
62440 #include <linux/export.h>
62441+#include <linux/fs.h>
62442
62443 #include <linux/percpu.h>
62444 #include <asm/module.h>
62445@@ -273,19 +274,16 @@ struct module
62446 int (*init)(void);
62447
62448 /* If this is non-NULL, vfree after init() returns */
62449- void *module_init;
62450+ void *module_init_rx, *module_init_rw;
62451
62452 /* Here is the actual code + data, vfree'd on unload. */
62453- void *module_core;
62454+ void *module_core_rx, *module_core_rw;
62455
62456 /* Here are the sizes of the init and core sections */
62457- unsigned int init_size, core_size;
62458+ unsigned int init_size_rw, core_size_rw;
62459
62460 /* The size of the executable code in each section. */
62461- unsigned int init_text_size, core_text_size;
62462-
62463- /* Size of RO sections of the module (text+rodata) */
62464- unsigned int init_ro_size, core_ro_size;
62465+ unsigned int init_size_rx, core_size_rx;
62466
62467 /* Arch-specific module values */
62468 struct mod_arch_specific arch;
62469@@ -341,6 +339,10 @@ struct module
62470 #ifdef CONFIG_EVENT_TRACING
62471 struct ftrace_event_call **trace_events;
62472 unsigned int num_trace_events;
62473+ struct file_operations trace_id;
62474+ struct file_operations trace_enable;
62475+ struct file_operations trace_format;
62476+ struct file_operations trace_filter;
62477 #endif
62478 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
62479 unsigned int num_ftrace_callsites;
62480@@ -388,16 +390,46 @@ bool is_module_address(unsigned long addr);
62481 bool is_module_percpu_address(unsigned long addr);
62482 bool is_module_text_address(unsigned long addr);
62483
62484+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
62485+{
62486+
62487+#ifdef CONFIG_PAX_KERNEXEC
62488+ if (ktla_ktva(addr) >= (unsigned long)start &&
62489+ ktla_ktva(addr) < (unsigned long)start + size)
62490+ return 1;
62491+#endif
62492+
62493+ return ((void *)addr >= start && (void *)addr < start + size);
62494+}
62495+
62496+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
62497+{
62498+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
62499+}
62500+
62501+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
62502+{
62503+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
62504+}
62505+
62506+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
62507+{
62508+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
62509+}
62510+
62511+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
62512+{
62513+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
62514+}
62515+
62516 static inline int within_module_core(unsigned long addr, struct module *mod)
62517 {
62518- return (unsigned long)mod->module_core <= addr &&
62519- addr < (unsigned long)mod->module_core + mod->core_size;
62520+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
62521 }
62522
62523 static inline int within_module_init(unsigned long addr, struct module *mod)
62524 {
62525- return (unsigned long)mod->module_init <= addr &&
62526- addr < (unsigned long)mod->module_init + mod->init_size;
62527+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
62528 }
62529
62530 /* Search for module by name: must hold module_mutex. */
62531diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
62532index b2be02e..72d2f78 100644
62533--- a/include/linux/moduleloader.h
62534+++ b/include/linux/moduleloader.h
62535@@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
62536
62537 /* Allocator used for allocating struct module, core sections and init
62538 sections. Returns NULL on failure. */
62539-void *module_alloc(unsigned long size);
62540+void *module_alloc(unsigned long size) __size_overflow(1);
62541+
62542+#ifdef CONFIG_PAX_KERNEXEC
62543+void *module_alloc_exec(unsigned long size) __size_overflow(1);
62544+#else
62545+#define module_alloc_exec(x) module_alloc(x)
62546+#endif
62547
62548 /* Free memory returned from module_alloc. */
62549 void module_free(struct module *mod, void *module_region);
62550
62551+#ifdef CONFIG_PAX_KERNEXEC
62552+void module_free_exec(struct module *mod, void *module_region);
62553+#else
62554+#define module_free_exec(x, y) module_free((x), (y))
62555+#endif
62556+
62557 /* Apply the given relocation to the (simplified) ELF. Return -error
62558 or 0. */
62559 int apply_relocate(Elf_Shdr *sechdrs,
62560diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
62561index 944bc18..042d291 100644
62562--- a/include/linux/moduleparam.h
62563+++ b/include/linux/moduleparam.h
62564@@ -286,7 +286,7 @@ static inline void __kernel_param_unlock(void)
62565 * @len is usually just sizeof(string).
62566 */
62567 #define module_param_string(name, string, len, perm) \
62568- static const struct kparam_string __param_string_##name \
62569+ static const struct kparam_string __param_string_##name __used \
62570 = { len, string }; \
62571 __module_param_call(MODULE_PARAM_PREFIX, name, \
62572 &param_ops_string, \
62573@@ -424,7 +424,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
62574 */
62575 #define module_param_array_named(name, array, type, nump, perm) \
62576 param_check_##type(name, &(array)[0]); \
62577- static const struct kparam_array __param_arr_##name \
62578+ static const struct kparam_array __param_arr_##name __used \
62579 = { .max = ARRAY_SIZE(array), .num = nump, \
62580 .ops = &param_ops_##type, \
62581 .elemsize = sizeof(array[0]), .elem = array }; \
62582diff --git a/include/linux/namei.h b/include/linux/namei.h
62583index ffc0213..2c1f2cb 100644
62584--- a/include/linux/namei.h
62585+++ b/include/linux/namei.h
62586@@ -24,7 +24,7 @@ struct nameidata {
62587 unsigned seq;
62588 int last_type;
62589 unsigned depth;
62590- char *saved_names[MAX_NESTED_LINKS + 1];
62591+ const char *saved_names[MAX_NESTED_LINKS + 1];
62592
62593 /* Intent data */
62594 union {
62595@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
62596 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
62597 extern void unlock_rename(struct dentry *, struct dentry *);
62598
62599-static inline void nd_set_link(struct nameidata *nd, char *path)
62600+static inline void nd_set_link(struct nameidata *nd, const char *path)
62601 {
62602 nd->saved_names[nd->depth] = path;
62603 }
62604
62605-static inline char *nd_get_link(struct nameidata *nd)
62606+static inline const char *nd_get_link(const struct nameidata *nd)
62607 {
62608 return nd->saved_names[nd->depth];
62609 }
62610diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
62611index 33900a5..2072000 100644
62612--- a/include/linux/netdevice.h
62613+++ b/include/linux/netdevice.h
62614@@ -1003,6 +1003,7 @@ struct net_device_ops {
62615 int (*ndo_neigh_construct)(struct neighbour *n);
62616 void (*ndo_neigh_destroy)(struct neighbour *n);
62617 };
62618+typedef struct net_device_ops __no_const net_device_ops_no_const;
62619
62620 /*
62621 * The DEVICE structure.
62622@@ -1064,7 +1065,7 @@ struct net_device {
62623 int iflink;
62624
62625 struct net_device_stats stats;
62626- atomic_long_t rx_dropped; /* dropped packets by core network
62627+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
62628 * Do not use this in drivers.
62629 */
62630
62631diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
62632new file mode 100644
62633index 0000000..33f4af8
62634--- /dev/null
62635+++ b/include/linux/netfilter/xt_gradm.h
62636@@ -0,0 +1,9 @@
62637+#ifndef _LINUX_NETFILTER_XT_GRADM_H
62638+#define _LINUX_NETFILTER_XT_GRADM_H 1
62639+
62640+struct xt_gradm_mtinfo {
62641+ __u16 flags;
62642+ __u16 invflags;
62643+};
62644+
62645+#endif
62646diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
62647index c65a18a..0c05f3a 100644
62648--- a/include/linux/of_pdt.h
62649+++ b/include/linux/of_pdt.h
62650@@ -32,7 +32,7 @@ struct of_pdt_ops {
62651
62652 /* return 0 on success; fill in 'len' with number of bytes in path */
62653 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
62654-};
62655+} __no_const;
62656
62657 extern void *prom_early_alloc(unsigned long size);
62658
62659diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
62660index a4c5624..79d6d88 100644
62661--- a/include/linux/oprofile.h
62662+++ b/include/linux/oprofile.h
62663@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
62664 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
62665 char const * name, ulong * val);
62666
62667-/** Create a file for read-only access to an atomic_t. */
62668+/** Create a file for read-only access to an atomic_unchecked_t. */
62669 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
62670- char const * name, atomic_t * val);
62671+ char const * name, atomic_unchecked_t * val);
62672
62673 /** create a directory */
62674 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
62675diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
62676index ddbb6a9..be1680e 100644
62677--- a/include/linux/perf_event.h
62678+++ b/include/linux/perf_event.h
62679@@ -879,8 +879,8 @@ struct perf_event {
62680
62681 enum perf_event_active_state state;
62682 unsigned int attach_state;
62683- local64_t count;
62684- atomic64_t child_count;
62685+ local64_t count; /* PaX: fix it one day */
62686+ atomic64_unchecked_t child_count;
62687
62688 /*
62689 * These are the total time in nanoseconds that the event
62690@@ -931,8 +931,8 @@ struct perf_event {
62691 * These accumulate total time (in nanoseconds) that children
62692 * events have been enabled and running, respectively.
62693 */
62694- atomic64_t child_total_time_enabled;
62695- atomic64_t child_total_time_running;
62696+ atomic64_unchecked_t child_total_time_enabled;
62697+ atomic64_unchecked_t child_total_time_running;
62698
62699 /*
62700 * Protect attach/detach and child_list:
62701diff --git a/include/linux/personality.h b/include/linux/personality.h
62702index 8fc7dd1a..c19d89e 100644
62703--- a/include/linux/personality.h
62704+++ b/include/linux/personality.h
62705@@ -44,6 +44,7 @@ enum {
62706 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
62707 ADDR_NO_RANDOMIZE | \
62708 ADDR_COMPAT_LAYOUT | \
62709+ ADDR_LIMIT_3GB | \
62710 MMAP_PAGE_ZERO)
62711
62712 /*
62713diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
62714index e1ac1ce..0675fed 100644
62715--- a/include/linux/pipe_fs_i.h
62716+++ b/include/linux/pipe_fs_i.h
62717@@ -45,9 +45,9 @@ struct pipe_buffer {
62718 struct pipe_inode_info {
62719 wait_queue_head_t wait;
62720 unsigned int nrbufs, curbuf, buffers;
62721- unsigned int readers;
62722- unsigned int writers;
62723- unsigned int waiting_writers;
62724+ atomic_t readers;
62725+ atomic_t writers;
62726+ atomic_t waiting_writers;
62727 unsigned int r_counter;
62728 unsigned int w_counter;
62729 struct page *tmp_page;
62730diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
62731index 609daae..5392427 100644
62732--- a/include/linux/pm_runtime.h
62733+++ b/include/linux/pm_runtime.h
62734@@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
62735
62736 static inline void pm_runtime_mark_last_busy(struct device *dev)
62737 {
62738- ACCESS_ONCE(dev->power.last_busy) = jiffies;
62739+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
62740 }
62741
62742 #else /* !CONFIG_PM_RUNTIME */
62743diff --git a/include/linux/poison.h b/include/linux/poison.h
62744index 2110a81..13a11bb 100644
62745--- a/include/linux/poison.h
62746+++ b/include/linux/poison.h
62747@@ -19,8 +19,8 @@
62748 * under normal circumstances, used to verify that nobody uses
62749 * non-initialized list entries.
62750 */
62751-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
62752-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
62753+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
62754+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
62755
62756 /********** include/linux/timer.h **********/
62757 /*
62758diff --git a/include/linux/preempt.h b/include/linux/preempt.h
62759index 5a710b9..0b0dab9 100644
62760--- a/include/linux/preempt.h
62761+++ b/include/linux/preempt.h
62762@@ -126,7 +126,7 @@ struct preempt_ops {
62763 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
62764 void (*sched_out)(struct preempt_notifier *notifier,
62765 struct task_struct *next);
62766-};
62767+} __no_const;
62768
62769 /**
62770 * preempt_notifier - key for installing preemption notifiers
62771diff --git a/include/linux/printk.h b/include/linux/printk.h
62772index 0525927..a5388b6 100644
62773--- a/include/linux/printk.h
62774+++ b/include/linux/printk.h
62775@@ -94,6 +94,8 @@ void early_printk(const char *fmt, ...);
62776 extern int printk_needs_cpu(int cpu);
62777 extern void printk_tick(void);
62778
62779+extern int kptr_restrict;
62780+
62781 #ifdef CONFIG_PRINTK
62782 asmlinkage __printf(1, 0)
62783 int vprintk(const char *fmt, va_list args);
62784@@ -117,7 +119,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
62785
62786 extern int printk_delay_msec;
62787 extern int dmesg_restrict;
62788-extern int kptr_restrict;
62789
62790 void log_buf_kexec_setup(void);
62791 void __init setup_log_buf(int early);
62792diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
62793index 85c5073..51fac8b 100644
62794--- a/include/linux/proc_fs.h
62795+++ b/include/linux/proc_fs.h
62796@@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
62797 return proc_create_data(name, mode, parent, proc_fops, NULL);
62798 }
62799
62800+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
62801+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
62802+{
62803+#ifdef CONFIG_GRKERNSEC_PROC_USER
62804+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
62805+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62806+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
62807+#else
62808+ return proc_create_data(name, mode, parent, proc_fops, NULL);
62809+#endif
62810+}
62811+
62812 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
62813 umode_t mode, struct proc_dir_entry *base,
62814 read_proc_t *read_proc, void * data)
62815@@ -258,7 +270,7 @@ union proc_op {
62816 int (*proc_show)(struct seq_file *m,
62817 struct pid_namespace *ns, struct pid *pid,
62818 struct task_struct *task);
62819-};
62820+} __no_const;
62821
62822 struct ctl_table_header;
62823 struct ctl_table;
62824diff --git a/include/linux/random.h b/include/linux/random.h
62825index 8f74538..de61694 100644
62826--- a/include/linux/random.h
62827+++ b/include/linux/random.h
62828@@ -54,6 +54,10 @@ extern void add_input_randomness(unsigned int type, unsigned int code,
62829 unsigned int value);
62830 extern void add_interrupt_randomness(int irq);
62831
62832+#ifdef CONFIG_PAX_LATENT_ENTROPY
62833+extern void transfer_latent_entropy(void);
62834+#endif
62835+
62836 extern void get_random_bytes(void *buf, int nbytes);
62837 void generate_random_uuid(unsigned char uuid_out[16]);
62838
62839@@ -69,12 +73,17 @@ void srandom32(u32 seed);
62840
62841 u32 prandom32(struct rnd_state *);
62842
62843+static inline unsigned long pax_get_random_long(void)
62844+{
62845+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
62846+}
62847+
62848 /*
62849 * Handle minimum values for seeds
62850 */
62851 static inline u32 __seed(u32 x, u32 m)
62852 {
62853- return (x < m) ? x + m : x;
62854+ return (x <= m) ? x + m + 1 : x;
62855 }
62856
62857 /**
62858diff --git a/include/linux/reboot.h b/include/linux/reboot.h
62859index e0879a7..a12f962 100644
62860--- a/include/linux/reboot.h
62861+++ b/include/linux/reboot.h
62862@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
62863 * Architecture-specific implementations of sys_reboot commands.
62864 */
62865
62866-extern void machine_restart(char *cmd);
62867-extern void machine_halt(void);
62868-extern void machine_power_off(void);
62869+extern void machine_restart(char *cmd) __noreturn;
62870+extern void machine_halt(void) __noreturn;
62871+extern void machine_power_off(void) __noreturn;
62872
62873 extern void machine_shutdown(void);
62874 struct pt_regs;
62875@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
62876 */
62877
62878 extern void kernel_restart_prepare(char *cmd);
62879-extern void kernel_restart(char *cmd);
62880-extern void kernel_halt(void);
62881-extern void kernel_power_off(void);
62882+extern void kernel_restart(char *cmd) __noreturn;
62883+extern void kernel_halt(void) __noreturn;
62884+extern void kernel_power_off(void) __noreturn;
62885
62886 extern int C_A_D; /* for sysctl */
62887 void ctrl_alt_del(void);
62888@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
62889 * Emergency restart, callable from an interrupt handler.
62890 */
62891
62892-extern void emergency_restart(void);
62893+extern void emergency_restart(void) __noreturn;
62894 #include <asm/emergency-restart.h>
62895
62896 #endif
62897diff --git a/include/linux/relay.h b/include/linux/relay.h
62898index 91cacc3..b55ff74 100644
62899--- a/include/linux/relay.h
62900+++ b/include/linux/relay.h
62901@@ -160,7 +160,7 @@ struct rchan_callbacks
62902 * The callback should return 0 if successful, negative if not.
62903 */
62904 int (*remove_buf_file)(struct dentry *dentry);
62905-};
62906+} __no_const;
62907
62908 /*
62909 * CONFIG_RELAY kernel API, kernel/relay.c
62910diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
62911index 6fdf027..ff72610 100644
62912--- a/include/linux/rfkill.h
62913+++ b/include/linux/rfkill.h
62914@@ -147,6 +147,7 @@ struct rfkill_ops {
62915 void (*query)(struct rfkill *rfkill, void *data);
62916 int (*set_block)(void *data, bool blocked);
62917 };
62918+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
62919
62920 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
62921 /**
62922diff --git a/include/linux/rio.h b/include/linux/rio.h
62923index 4d50611..c6858a2 100644
62924--- a/include/linux/rio.h
62925+++ b/include/linux/rio.h
62926@@ -315,7 +315,7 @@ struct rio_ops {
62927 int mbox, void *buffer, size_t len);
62928 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
62929 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
62930-};
62931+} __no_const;
62932
62933 #define RIO_RESOURCE_MEM 0x00000100
62934 #define RIO_RESOURCE_DOORBELL 0x00000200
62935diff --git a/include/linux/rmap.h b/include/linux/rmap.h
62936index fd07c45..4676b8e 100644
62937--- a/include/linux/rmap.h
62938+++ b/include/linux/rmap.h
62939@@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
62940 void anon_vma_init(void); /* create anon_vma_cachep */
62941 int anon_vma_prepare(struct vm_area_struct *);
62942 void unlink_anon_vmas(struct vm_area_struct *);
62943-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
62944+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
62945 void anon_vma_moveto_tail(struct vm_area_struct *);
62946-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
62947+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
62948
62949 static inline void anon_vma_merge(struct vm_area_struct *vma,
62950 struct vm_area_struct *next)
62951diff --git a/include/linux/sched.h b/include/linux/sched.h
62952index 7b06169..c92adbe 100644
62953--- a/include/linux/sched.h
62954+++ b/include/linux/sched.h
62955@@ -100,6 +100,7 @@ struct bio_list;
62956 struct fs_struct;
62957 struct perf_event_context;
62958 struct blk_plug;
62959+struct linux_binprm;
62960
62961 /*
62962 * List of flags we want to share for kernel threads,
62963@@ -382,10 +383,13 @@ struct user_namespace;
62964 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
62965
62966 extern int sysctl_max_map_count;
62967+extern unsigned long sysctl_heap_stack_gap;
62968
62969 #include <linux/aio.h>
62970
62971 #ifdef CONFIG_MMU
62972+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
62973+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
62974 extern void arch_pick_mmap_layout(struct mm_struct *mm);
62975 extern unsigned long
62976 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
62977@@ -643,6 +647,17 @@ struct signal_struct {
62978 #ifdef CONFIG_TASKSTATS
62979 struct taskstats *stats;
62980 #endif
62981+
62982+#ifdef CONFIG_GRKERNSEC
62983+ u32 curr_ip;
62984+ u32 saved_ip;
62985+ u32 gr_saddr;
62986+ u32 gr_daddr;
62987+ u16 gr_sport;
62988+ u16 gr_dport;
62989+ u8 used_accept:1;
62990+#endif
62991+
62992 #ifdef CONFIG_AUDIT
62993 unsigned audit_tty;
62994 struct tty_audit_buf *tty_audit_buf;
62995@@ -726,6 +741,11 @@ struct user_struct {
62996 struct key *session_keyring; /* UID's default session keyring */
62997 #endif
62998
62999+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63000+ unsigned int banned;
63001+ unsigned long ban_expires;
63002+#endif
63003+
63004 /* Hash table maintenance information */
63005 struct hlist_node uidhash_node;
63006 uid_t uid;
63007@@ -1386,8 +1406,8 @@ struct task_struct {
63008 struct list_head thread_group;
63009
63010 struct completion *vfork_done; /* for vfork() */
63011- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
63012- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63013+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
63014+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63015
63016 cputime_t utime, stime, utimescaled, stimescaled;
63017 cputime_t gtime;
63018@@ -1403,13 +1423,6 @@ struct task_struct {
63019 struct task_cputime cputime_expires;
63020 struct list_head cpu_timers[3];
63021
63022-/* process credentials */
63023- const struct cred __rcu *real_cred; /* objective and real subjective task
63024- * credentials (COW) */
63025- const struct cred __rcu *cred; /* effective (overridable) subjective task
63026- * credentials (COW) */
63027- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
63028-
63029 char comm[TASK_COMM_LEN]; /* executable name excluding path
63030 - access with [gs]et_task_comm (which lock
63031 it with task_lock())
63032@@ -1426,8 +1439,16 @@ struct task_struct {
63033 #endif
63034 /* CPU-specific state of this task */
63035 struct thread_struct thread;
63036+/* thread_info moved to task_struct */
63037+#ifdef CONFIG_X86
63038+ struct thread_info tinfo;
63039+#endif
63040 /* filesystem information */
63041 struct fs_struct *fs;
63042+
63043+ const struct cred __rcu *cred; /* effective (overridable) subjective task
63044+ * credentials (COW) */
63045+
63046 /* open file information */
63047 struct files_struct *files;
63048 /* namespaces */
63049@@ -1469,6 +1490,11 @@ struct task_struct {
63050 struct rt_mutex_waiter *pi_blocked_on;
63051 #endif
63052
63053+/* process credentials */
63054+ const struct cred __rcu *real_cred; /* objective and real subjective task
63055+ * credentials (COW) */
63056+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
63057+
63058 #ifdef CONFIG_DEBUG_MUTEXES
63059 /* mutex deadlock detection */
63060 struct mutex_waiter *blocked_on;
63061@@ -1585,6 +1611,27 @@ struct task_struct {
63062 unsigned long default_timer_slack_ns;
63063
63064 struct list_head *scm_work_list;
63065+
63066+#ifdef CONFIG_GRKERNSEC
63067+ /* grsecurity */
63068+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63069+ u64 exec_id;
63070+#endif
63071+#ifdef CONFIG_GRKERNSEC_SETXID
63072+ const struct cred *delayed_cred;
63073+#endif
63074+ struct dentry *gr_chroot_dentry;
63075+ struct acl_subject_label *acl;
63076+ struct acl_role_label *role;
63077+ struct file *exec_file;
63078+ u16 acl_role_id;
63079+ /* is this the task that authenticated to the special role */
63080+ u8 acl_sp_role;
63081+ u8 is_writable;
63082+ u8 brute;
63083+ u8 gr_is_chrooted;
63084+#endif
63085+
63086 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
63087 /* Index of current stored address in ret_stack */
63088 int curr_ret_stack;
63089@@ -1619,6 +1666,51 @@ struct task_struct {
63090 #endif
63091 };
63092
63093+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
63094+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
63095+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
63096+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
63097+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
63098+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
63099+
63100+#ifdef CONFIG_PAX_SOFTMODE
63101+extern int pax_softmode;
63102+#endif
63103+
63104+extern int pax_check_flags(unsigned long *);
63105+
63106+/* if tsk != current then task_lock must be held on it */
63107+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
63108+static inline unsigned long pax_get_flags(struct task_struct *tsk)
63109+{
63110+ if (likely(tsk->mm))
63111+ return tsk->mm->pax_flags;
63112+ else
63113+ return 0UL;
63114+}
63115+
63116+/* if tsk != current then task_lock must be held on it */
63117+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
63118+{
63119+ if (likely(tsk->mm)) {
63120+ tsk->mm->pax_flags = flags;
63121+ return 0;
63122+ }
63123+ return -EINVAL;
63124+}
63125+#endif
63126+
63127+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
63128+extern void pax_set_initial_flags(struct linux_binprm *bprm);
63129+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
63130+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
63131+#endif
63132+
63133+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
63134+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
63135+extern void pax_report_refcount_overflow(struct pt_regs *regs);
63136+extern void check_object_size(const void *ptr, unsigned long n, bool to);
63137+
63138 /* Future-safe accessor for struct task_struct's cpus_allowed. */
63139 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
63140
63141@@ -2146,7 +2238,9 @@ void yield(void);
63142 extern struct exec_domain default_exec_domain;
63143
63144 union thread_union {
63145+#ifndef CONFIG_X86
63146 struct thread_info thread_info;
63147+#endif
63148 unsigned long stack[THREAD_SIZE/sizeof(long)];
63149 };
63150
63151@@ -2179,6 +2273,7 @@ extern struct pid_namespace init_pid_ns;
63152 */
63153
63154 extern struct task_struct *find_task_by_vpid(pid_t nr);
63155+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
63156 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
63157 struct pid_namespace *ns);
63158
63159@@ -2322,7 +2417,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
63160 extern void exit_itimers(struct signal_struct *);
63161 extern void flush_itimer_signals(void);
63162
63163-extern void do_group_exit(int);
63164+extern __noreturn void do_group_exit(int);
63165
63166 extern void daemonize(const char *, ...);
63167 extern int allow_signal(int);
63168@@ -2523,9 +2618,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
63169
63170 #endif
63171
63172-static inline int object_is_on_stack(void *obj)
63173+static inline int object_starts_on_stack(void *obj)
63174 {
63175- void *stack = task_stack_page(current);
63176+ const void *stack = task_stack_page(current);
63177
63178 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
63179 }
63180diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
63181index 899fbb4..1cb4138 100644
63182--- a/include/linux/screen_info.h
63183+++ b/include/linux/screen_info.h
63184@@ -43,7 +43,8 @@ struct screen_info {
63185 __u16 pages; /* 0x32 */
63186 __u16 vesa_attributes; /* 0x34 */
63187 __u32 capabilities; /* 0x36 */
63188- __u8 _reserved[6]; /* 0x3a */
63189+ __u16 vesapm_size; /* 0x3a */
63190+ __u8 _reserved[4]; /* 0x3c */
63191 } __attribute__((packed));
63192
63193 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
63194diff --git a/include/linux/security.h b/include/linux/security.h
63195index 673afbb..2b7454b 100644
63196--- a/include/linux/security.h
63197+++ b/include/linux/security.h
63198@@ -26,6 +26,7 @@
63199 #include <linux/capability.h>
63200 #include <linux/slab.h>
63201 #include <linux/err.h>
63202+#include <linux/grsecurity.h>
63203
63204 struct linux_binprm;
63205 struct cred;
63206diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
63207index fc61854..d7c490b 100644
63208--- a/include/linux/seq_file.h
63209+++ b/include/linux/seq_file.h
63210@@ -25,6 +25,9 @@ struct seq_file {
63211 struct mutex lock;
63212 const struct seq_operations *op;
63213 int poll_event;
63214+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63215+ u64 exec_id;
63216+#endif
63217 void *private;
63218 };
63219
63220@@ -34,6 +37,7 @@ struct seq_operations {
63221 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
63222 int (*show) (struct seq_file *m, void *v);
63223 };
63224+typedef struct seq_operations __no_const seq_operations_no_const;
63225
63226 #define SEQ_SKIP 1
63227
63228diff --git a/include/linux/shm.h b/include/linux/shm.h
63229index 92808b8..c28cac4 100644
63230--- a/include/linux/shm.h
63231+++ b/include/linux/shm.h
63232@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
63233
63234 /* The task created the shm object. NULL if the task is dead. */
63235 struct task_struct *shm_creator;
63236+#ifdef CONFIG_GRKERNSEC
63237+ time_t shm_createtime;
63238+ pid_t shm_lapid;
63239+#endif
63240 };
63241
63242 /* shm_mode upper byte flags */
63243diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
63244index c1bae8d..2dbcd31 100644
63245--- a/include/linux/skbuff.h
63246+++ b/include/linux/skbuff.h
63247@@ -663,7 +663,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
63248 */
63249 static inline int skb_queue_empty(const struct sk_buff_head *list)
63250 {
63251- return list->next == (struct sk_buff *)list;
63252+ return list->next == (const struct sk_buff *)list;
63253 }
63254
63255 /**
63256@@ -676,7 +676,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
63257 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63258 const struct sk_buff *skb)
63259 {
63260- return skb->next == (struct sk_buff *)list;
63261+ return skb->next == (const struct sk_buff *)list;
63262 }
63263
63264 /**
63265@@ -689,7 +689,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63266 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
63267 const struct sk_buff *skb)
63268 {
63269- return skb->prev == (struct sk_buff *)list;
63270+ return skb->prev == (const struct sk_buff *)list;
63271 }
63272
63273 /**
63274@@ -1584,7 +1584,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
63275 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
63276 */
63277 #ifndef NET_SKB_PAD
63278-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
63279+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
63280 #endif
63281
63282 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
63283diff --git a/include/linux/slab.h b/include/linux/slab.h
63284index a595dce..dfab0d2 100644
63285--- a/include/linux/slab.h
63286+++ b/include/linux/slab.h
63287@@ -11,12 +11,20 @@
63288
63289 #include <linux/gfp.h>
63290 #include <linux/types.h>
63291+#include <linux/err.h>
63292
63293 /*
63294 * Flags to pass to kmem_cache_create().
63295 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
63296 */
63297 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
63298+
63299+#ifdef CONFIG_PAX_USERCOPY_SLABS
63300+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
63301+#else
63302+#define SLAB_USERCOPY 0x00000000UL
63303+#endif
63304+
63305 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
63306 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
63307 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
63308@@ -87,10 +95,13 @@
63309 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
63310 * Both make kfree a no-op.
63311 */
63312-#define ZERO_SIZE_PTR ((void *)16)
63313+#define ZERO_SIZE_PTR \
63314+({ \
63315+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
63316+ (void *)(-MAX_ERRNO-1L); \
63317+})
63318
63319-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
63320- (unsigned long)ZERO_SIZE_PTR)
63321+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
63322
63323 /*
63324 * struct kmem_cache related prototypes
63325@@ -161,6 +172,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
63326 void kfree(const void *);
63327 void kzfree(const void *);
63328 size_t ksize(const void *);
63329+const char *check_heap_object(const void *ptr, unsigned long n, bool to);
63330+bool is_usercopy_object(const void *ptr);
63331
63332 /*
63333 * Allocator specific definitions. These are mainly used to establish optimized
63334@@ -240,6 +253,7 @@ size_t ksize(const void *);
63335 * for general use, and so are not documented here. For a full list of
63336 * potential flags, always refer to linux/gfp.h.
63337 */
63338+static void *kmalloc_array(size_t n, size_t size, gfp_t flags) __size_overflow(1, 2);
63339 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
63340 {
63341 if (size != 0 && n > ULONG_MAX / size)
63342@@ -298,7 +312,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
63343 */
63344 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
63345 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
63346-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
63347+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
63348 #define kmalloc_track_caller(size, flags) \
63349 __kmalloc_track_caller(size, flags, _RET_IP_)
63350 #else
63351@@ -317,7 +331,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
63352 */
63353 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
63354 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
63355-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
63356+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
63357 #define kmalloc_node_track_caller(size, flags, node) \
63358 __kmalloc_node_track_caller(size, flags, node, \
63359 _RET_IP_)
63360diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
63361index fbd1117..0a3d314 100644
63362--- a/include/linux/slab_def.h
63363+++ b/include/linux/slab_def.h
63364@@ -66,10 +66,10 @@ struct kmem_cache {
63365 unsigned long node_allocs;
63366 unsigned long node_frees;
63367 unsigned long node_overflow;
63368- atomic_t allochit;
63369- atomic_t allocmiss;
63370- atomic_t freehit;
63371- atomic_t freemiss;
63372+ atomic_unchecked_t allochit;
63373+ atomic_unchecked_t allocmiss;
63374+ atomic_unchecked_t freehit;
63375+ atomic_unchecked_t freemiss;
63376
63377 /*
63378 * If debugging is enabled, then the allocator can add additional
63379@@ -103,11 +103,16 @@ struct cache_sizes {
63380 #ifdef CONFIG_ZONE_DMA
63381 struct kmem_cache *cs_dmacachep;
63382 #endif
63383+
63384+#ifdef CONFIG_PAX_USERCOPY_SLABS
63385+ struct kmem_cache *cs_usercopycachep;
63386+#endif
63387+
63388 };
63389 extern struct cache_sizes malloc_sizes[];
63390
63391 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
63392-void *__kmalloc(size_t size, gfp_t flags);
63393+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
63394
63395 #ifdef CONFIG_TRACING
63396 extern void *kmem_cache_alloc_trace(size_t size,
63397@@ -150,6 +155,13 @@ found:
63398 cachep = malloc_sizes[i].cs_dmacachep;
63399 else
63400 #endif
63401+
63402+#ifdef CONFIG_PAX_USERCOPY_SLABS
63403+ if (flags & GFP_USERCOPY)
63404+ cachep = malloc_sizes[i].cs_usercopycachep;
63405+ else
63406+#endif
63407+
63408 cachep = malloc_sizes[i].cs_cachep;
63409
63410 ret = kmem_cache_alloc_trace(size, cachep, flags);
63411@@ -160,7 +172,7 @@ found:
63412 }
63413
63414 #ifdef CONFIG_NUMA
63415-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
63416+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63417 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
63418
63419 #ifdef CONFIG_TRACING
63420@@ -203,6 +215,13 @@ found:
63421 cachep = malloc_sizes[i].cs_dmacachep;
63422 else
63423 #endif
63424+
63425+#ifdef CONFIG_PAX_USERCOPY_SLABS
63426+ if (flags & GFP_USERCOPY)
63427+ cachep = malloc_sizes[i].cs_usercopycachep;
63428+ else
63429+#endif
63430+
63431 cachep = malloc_sizes[i].cs_cachep;
63432
63433 return kmem_cache_alloc_node_trace(size, cachep, flags, node);
63434diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
63435index 0ec00b3..39cb7fc 100644
63436--- a/include/linux/slob_def.h
63437+++ b/include/linux/slob_def.h
63438@@ -9,7 +9,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
63439 return kmem_cache_alloc_node(cachep, flags, -1);
63440 }
63441
63442-void *__kmalloc_node(size_t size, gfp_t flags, int node);
63443+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63444
63445 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
63446 {
63447@@ -29,6 +29,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
63448 return __kmalloc_node(size, flags, -1);
63449 }
63450
63451+static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
63452 static __always_inline void *__kmalloc(size_t size, gfp_t flags)
63453 {
63454 return kmalloc(size, flags);
63455diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
63456index c2f8c8b..be9e036 100644
63457--- a/include/linux/slub_def.h
63458+++ b/include/linux/slub_def.h
63459@@ -92,7 +92,7 @@ struct kmem_cache {
63460 struct kmem_cache_order_objects max;
63461 struct kmem_cache_order_objects min;
63462 gfp_t allocflags; /* gfp flags to use on each alloc */
63463- int refcount; /* Refcount for slab cache destroy */
63464+ atomic_t refcount; /* Refcount for slab cache destroy */
63465 void (*ctor)(void *);
63466 int inuse; /* Offset to metadata */
63467 int align; /* Alignment */
63468@@ -153,6 +153,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
63469 * Sorry that the following has to be that ugly but some versions of GCC
63470 * have trouble with constant propagation and loops.
63471 */
63472+static __always_inline int kmalloc_index(size_t size) __size_overflow(1);
63473 static __always_inline int kmalloc_index(size_t size)
63474 {
63475 if (!size)
63476@@ -218,7 +219,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
63477 }
63478
63479 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
63480-void *__kmalloc(size_t size, gfp_t flags);
63481+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
63482
63483 static __always_inline void *
63484 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
63485@@ -259,6 +260,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
63486 }
63487 #endif
63488
63489+static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
63490 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
63491 {
63492 unsigned int order = get_order(size);
63493@@ -284,7 +286,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
63494 }
63495
63496 #ifdef CONFIG_NUMA
63497-void *__kmalloc_node(size_t size, gfp_t flags, int node);
63498+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63499 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
63500
63501 #ifdef CONFIG_TRACING
63502diff --git a/include/linux/sonet.h b/include/linux/sonet.h
63503index de8832d..0147b46 100644
63504--- a/include/linux/sonet.h
63505+++ b/include/linux/sonet.h
63506@@ -61,7 +61,7 @@ struct sonet_stats {
63507 #include <linux/atomic.h>
63508
63509 struct k_sonet_stats {
63510-#define __HANDLE_ITEM(i) atomic_t i
63511+#define __HANDLE_ITEM(i) atomic_unchecked_t i
63512 __SONET_ITEMS
63513 #undef __HANDLE_ITEM
63514 };
63515diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
63516index 523547e..2cb7140 100644
63517--- a/include/linux/sunrpc/clnt.h
63518+++ b/include/linux/sunrpc/clnt.h
63519@@ -174,9 +174,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
63520 {
63521 switch (sap->sa_family) {
63522 case AF_INET:
63523- return ntohs(((struct sockaddr_in *)sap)->sin_port);
63524+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
63525 case AF_INET6:
63526- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
63527+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
63528 }
63529 return 0;
63530 }
63531@@ -209,7 +209,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
63532 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
63533 const struct sockaddr *src)
63534 {
63535- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
63536+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
63537 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
63538
63539 dsin->sin_family = ssin->sin_family;
63540@@ -312,7 +312,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
63541 if (sa->sa_family != AF_INET6)
63542 return 0;
63543
63544- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
63545+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
63546 }
63547
63548 #endif /* __KERNEL__ */
63549diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
63550index dc0c3cc..8503fb6 100644
63551--- a/include/linux/sunrpc/sched.h
63552+++ b/include/linux/sunrpc/sched.h
63553@@ -106,6 +106,7 @@ struct rpc_call_ops {
63554 void (*rpc_count_stats)(struct rpc_task *, void *);
63555 void (*rpc_release)(void *);
63556 };
63557+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
63558
63559 struct rpc_task_setup {
63560 struct rpc_task *task;
63561diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
63562index 0b8e3e6..33e0a01 100644
63563--- a/include/linux/sunrpc/svc_rdma.h
63564+++ b/include/linux/sunrpc/svc_rdma.h
63565@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
63566 extern unsigned int svcrdma_max_requests;
63567 extern unsigned int svcrdma_max_req_size;
63568
63569-extern atomic_t rdma_stat_recv;
63570-extern atomic_t rdma_stat_read;
63571-extern atomic_t rdma_stat_write;
63572-extern atomic_t rdma_stat_sq_starve;
63573-extern atomic_t rdma_stat_rq_starve;
63574-extern atomic_t rdma_stat_rq_poll;
63575-extern atomic_t rdma_stat_rq_prod;
63576-extern atomic_t rdma_stat_sq_poll;
63577-extern atomic_t rdma_stat_sq_prod;
63578+extern atomic_unchecked_t rdma_stat_recv;
63579+extern atomic_unchecked_t rdma_stat_read;
63580+extern atomic_unchecked_t rdma_stat_write;
63581+extern atomic_unchecked_t rdma_stat_sq_starve;
63582+extern atomic_unchecked_t rdma_stat_rq_starve;
63583+extern atomic_unchecked_t rdma_stat_rq_poll;
63584+extern atomic_unchecked_t rdma_stat_rq_prod;
63585+extern atomic_unchecked_t rdma_stat_sq_poll;
63586+extern atomic_unchecked_t rdma_stat_sq_prod;
63587
63588 #define RPCRDMA_VERSION 1
63589
63590diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
63591index c34b4c8..a65b67d 100644
63592--- a/include/linux/sysctl.h
63593+++ b/include/linux/sysctl.h
63594@@ -155,7 +155,11 @@ enum
63595 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
63596 };
63597
63598-
63599+#ifdef CONFIG_PAX_SOFTMODE
63600+enum {
63601+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
63602+};
63603+#endif
63604
63605 /* CTL_VM names: */
63606 enum
63607@@ -948,6 +952,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
63608
63609 extern int proc_dostring(struct ctl_table *, int,
63610 void __user *, size_t *, loff_t *);
63611+extern int proc_dostring_modpriv(struct ctl_table *, int,
63612+ void __user *, size_t *, loff_t *);
63613 extern int proc_dointvec(struct ctl_table *, int,
63614 void __user *, size_t *, loff_t *);
63615 extern int proc_dointvec_minmax(struct ctl_table *, int,
63616diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
63617index ff7dc08..893e1bd 100644
63618--- a/include/linux/tty_ldisc.h
63619+++ b/include/linux/tty_ldisc.h
63620@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
63621
63622 struct module *owner;
63623
63624- int refcount;
63625+ atomic_t refcount;
63626 };
63627
63628 struct tty_ldisc {
63629diff --git a/include/linux/types.h b/include/linux/types.h
63630index 7f480db..175c256 100644
63631--- a/include/linux/types.h
63632+++ b/include/linux/types.h
63633@@ -220,10 +220,26 @@ typedef struct {
63634 int counter;
63635 } atomic_t;
63636
63637+#ifdef CONFIG_PAX_REFCOUNT
63638+typedef struct {
63639+ int counter;
63640+} atomic_unchecked_t;
63641+#else
63642+typedef atomic_t atomic_unchecked_t;
63643+#endif
63644+
63645 #ifdef CONFIG_64BIT
63646 typedef struct {
63647 long counter;
63648 } atomic64_t;
63649+
63650+#ifdef CONFIG_PAX_REFCOUNT
63651+typedef struct {
63652+ long counter;
63653+} atomic64_unchecked_t;
63654+#else
63655+typedef atomic64_t atomic64_unchecked_t;
63656+#endif
63657 #endif
63658
63659 struct list_head {
63660diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
63661index 5ca0951..ab496a5 100644
63662--- a/include/linux/uaccess.h
63663+++ b/include/linux/uaccess.h
63664@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
63665 long ret; \
63666 mm_segment_t old_fs = get_fs(); \
63667 \
63668- set_fs(KERNEL_DS); \
63669 pagefault_disable(); \
63670- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
63671- pagefault_enable(); \
63672+ set_fs(KERNEL_DS); \
63673+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
63674 set_fs(old_fs); \
63675+ pagefault_enable(); \
63676 ret; \
63677 })
63678
63679diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
63680index 99c1b4d..bb94261 100644
63681--- a/include/linux/unaligned/access_ok.h
63682+++ b/include/linux/unaligned/access_ok.h
63683@@ -6,32 +6,32 @@
63684
63685 static inline u16 get_unaligned_le16(const void *p)
63686 {
63687- return le16_to_cpup((__le16 *)p);
63688+ return le16_to_cpup((const __le16 *)p);
63689 }
63690
63691 static inline u32 get_unaligned_le32(const void *p)
63692 {
63693- return le32_to_cpup((__le32 *)p);
63694+ return le32_to_cpup((const __le32 *)p);
63695 }
63696
63697 static inline u64 get_unaligned_le64(const void *p)
63698 {
63699- return le64_to_cpup((__le64 *)p);
63700+ return le64_to_cpup((const __le64 *)p);
63701 }
63702
63703 static inline u16 get_unaligned_be16(const void *p)
63704 {
63705- return be16_to_cpup((__be16 *)p);
63706+ return be16_to_cpup((const __be16 *)p);
63707 }
63708
63709 static inline u32 get_unaligned_be32(const void *p)
63710 {
63711- return be32_to_cpup((__be32 *)p);
63712+ return be32_to_cpup((const __be32 *)p);
63713 }
63714
63715 static inline u64 get_unaligned_be64(const void *p)
63716 {
63717- return be64_to_cpup((__be64 *)p);
63718+ return be64_to_cpup((const __be64 *)p);
63719 }
63720
63721 static inline void put_unaligned_le16(u16 val, void *p)
63722diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
63723index 547e59c..db6ad19 100644
63724--- a/include/linux/usb/renesas_usbhs.h
63725+++ b/include/linux/usb/renesas_usbhs.h
63726@@ -39,7 +39,7 @@ enum {
63727 */
63728 struct renesas_usbhs_driver_callback {
63729 int (*notify_hotplug)(struct platform_device *pdev);
63730-};
63731+} __no_const;
63732
63733 /*
63734 * callback functions for platform
63735@@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
63736 * VBUS control is needed for Host
63737 */
63738 int (*set_vbus)(struct platform_device *pdev, int enable);
63739-};
63740+} __no_const;
63741
63742 /*
63743 * parameters for renesas usbhs
63744diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
63745index 6f8fbcf..8259001 100644
63746--- a/include/linux/vermagic.h
63747+++ b/include/linux/vermagic.h
63748@@ -25,9 +25,35 @@
63749 #define MODULE_ARCH_VERMAGIC ""
63750 #endif
63751
63752+#ifdef CONFIG_PAX_REFCOUNT
63753+#define MODULE_PAX_REFCOUNT "REFCOUNT "
63754+#else
63755+#define MODULE_PAX_REFCOUNT ""
63756+#endif
63757+
63758+#ifdef CONSTIFY_PLUGIN
63759+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
63760+#else
63761+#define MODULE_CONSTIFY_PLUGIN ""
63762+#endif
63763+
63764+#ifdef STACKLEAK_PLUGIN
63765+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
63766+#else
63767+#define MODULE_STACKLEAK_PLUGIN ""
63768+#endif
63769+
63770+#ifdef CONFIG_GRKERNSEC
63771+#define MODULE_GRSEC "GRSEC "
63772+#else
63773+#define MODULE_GRSEC ""
63774+#endif
63775+
63776 #define VERMAGIC_STRING \
63777 UTS_RELEASE " " \
63778 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
63779 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
63780- MODULE_ARCH_VERMAGIC
63781+ MODULE_ARCH_VERMAGIC \
63782+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
63783+ MODULE_GRSEC
63784
63785diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
63786index dcdfc2b..ec79ab5 100644
63787--- a/include/linux/vmalloc.h
63788+++ b/include/linux/vmalloc.h
63789@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
63790 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
63791 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
63792 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
63793+
63794+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
63795+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
63796+#endif
63797+
63798 /* bits [20..32] reserved for arch specific ioremap internals */
63799
63800 /*
63801@@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
63802 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
63803 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
63804 unsigned long start, unsigned long end, gfp_t gfp_mask,
63805- pgprot_t prot, int node, void *caller);
63806+ pgprot_t prot, int node, void *caller) __size_overflow(1);
63807 extern void vfree(const void *addr);
63808
63809 extern void *vmap(struct page **pages, unsigned int count,
63810@@ -123,8 +128,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
63811 extern void free_vm_area(struct vm_struct *area);
63812
63813 /* for /dev/kmem */
63814-extern long vread(char *buf, char *addr, unsigned long count);
63815-extern long vwrite(char *buf, char *addr, unsigned long count);
63816+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
63817+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
63818
63819 /*
63820 * Internals. Dont't use..
63821diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
63822index 65efb92..137adbb 100644
63823--- a/include/linux/vmstat.h
63824+++ b/include/linux/vmstat.h
63825@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
63826 /*
63827 * Zone based page accounting with per cpu differentials.
63828 */
63829-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63830+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63831
63832 static inline void zone_page_state_add(long x, struct zone *zone,
63833 enum zone_stat_item item)
63834 {
63835- atomic_long_add(x, &zone->vm_stat[item]);
63836- atomic_long_add(x, &vm_stat[item]);
63837+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
63838+ atomic_long_add_unchecked(x, &vm_stat[item]);
63839 }
63840
63841 static inline unsigned long global_page_state(enum zone_stat_item item)
63842 {
63843- long x = atomic_long_read(&vm_stat[item]);
63844+ long x = atomic_long_read_unchecked(&vm_stat[item]);
63845 #ifdef CONFIG_SMP
63846 if (x < 0)
63847 x = 0;
63848@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
63849 static inline unsigned long zone_page_state(struct zone *zone,
63850 enum zone_stat_item item)
63851 {
63852- long x = atomic_long_read(&zone->vm_stat[item]);
63853+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63854 #ifdef CONFIG_SMP
63855 if (x < 0)
63856 x = 0;
63857@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
63858 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
63859 enum zone_stat_item item)
63860 {
63861- long x = atomic_long_read(&zone->vm_stat[item]);
63862+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63863
63864 #ifdef CONFIG_SMP
63865 int cpu;
63866@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
63867
63868 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
63869 {
63870- atomic_long_inc(&zone->vm_stat[item]);
63871- atomic_long_inc(&vm_stat[item]);
63872+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
63873+ atomic_long_inc_unchecked(&vm_stat[item]);
63874 }
63875
63876 static inline void __inc_zone_page_state(struct page *page,
63877@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
63878
63879 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
63880 {
63881- atomic_long_dec(&zone->vm_stat[item]);
63882- atomic_long_dec(&vm_stat[item]);
63883+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
63884+ atomic_long_dec_unchecked(&vm_stat[item]);
63885 }
63886
63887 static inline void __dec_zone_page_state(struct page *page,
63888diff --git a/include/linux/xattr.h b/include/linux/xattr.h
63889index e5d1220..ef6e406 100644
63890--- a/include/linux/xattr.h
63891+++ b/include/linux/xattr.h
63892@@ -57,6 +57,11 @@
63893 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
63894 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
63895
63896+/* User namespace */
63897+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
63898+#define XATTR_PAX_FLAGS_SUFFIX "flags"
63899+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
63900+
63901 #ifdef __KERNEL__
63902
63903 #include <linux/types.h>
63904diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
63905index 4aeff96..b378cdc 100644
63906--- a/include/media/saa7146_vv.h
63907+++ b/include/media/saa7146_vv.h
63908@@ -163,7 +163,7 @@ struct saa7146_ext_vv
63909 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
63910
63911 /* the extension can override this */
63912- struct v4l2_ioctl_ops ops;
63913+ v4l2_ioctl_ops_no_const ops;
63914 /* pointer to the saa7146 core ops */
63915 const struct v4l2_ioctl_ops *core_ops;
63916
63917diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
63918index 96d2221..2292f89 100644
63919--- a/include/media/v4l2-dev.h
63920+++ b/include/media/v4l2-dev.h
63921@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
63922
63923
63924 struct v4l2_file_operations {
63925- struct module *owner;
63926+ struct module * const owner;
63927 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
63928 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
63929 unsigned int (*poll) (struct file *, struct poll_table_struct *);
63930@@ -71,6 +71,7 @@ struct v4l2_file_operations {
63931 int (*open) (struct file *);
63932 int (*release) (struct file *);
63933 };
63934+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
63935
63936 /*
63937 * Newer version of video_device, handled by videodev2.c
63938diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
63939index 3cb939c..f23c6bb 100644
63940--- a/include/media/v4l2-ioctl.h
63941+++ b/include/media/v4l2-ioctl.h
63942@@ -281,7 +281,7 @@ struct v4l2_ioctl_ops {
63943 long (*vidioc_default) (struct file *file, void *fh,
63944 bool valid_prio, int cmd, void *arg);
63945 };
63946-
63947+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
63948
63949 /* v4l debugging and diagnostics */
63950
63951diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
63952index 6db8ecf..8c23861 100644
63953--- a/include/net/caif/caif_hsi.h
63954+++ b/include/net/caif/caif_hsi.h
63955@@ -98,7 +98,7 @@ struct cfhsi_drv {
63956 void (*rx_done_cb) (struct cfhsi_drv *drv);
63957 void (*wake_up_cb) (struct cfhsi_drv *drv);
63958 void (*wake_down_cb) (struct cfhsi_drv *drv);
63959-};
63960+} __no_const;
63961
63962 /* Structure implemented by HSI device. */
63963 struct cfhsi_dev {
63964diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
63965index 9e5425b..8136ffc 100644
63966--- a/include/net/caif/cfctrl.h
63967+++ b/include/net/caif/cfctrl.h
63968@@ -52,7 +52,7 @@ struct cfctrl_rsp {
63969 void (*radioset_rsp)(void);
63970 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
63971 struct cflayer *client_layer);
63972-};
63973+} __no_const;
63974
63975 /* Link Setup Parameters for CAIF-Links. */
63976 struct cfctrl_link_param {
63977@@ -101,8 +101,8 @@ struct cfctrl_request_info {
63978 struct cfctrl {
63979 struct cfsrvl serv;
63980 struct cfctrl_rsp res;
63981- atomic_t req_seq_no;
63982- atomic_t rsp_seq_no;
63983+ atomic_unchecked_t req_seq_no;
63984+ atomic_unchecked_t rsp_seq_no;
63985 struct list_head list;
63986 /* Protects from simultaneous access to first_req list */
63987 spinlock_t info_list_lock;
63988diff --git a/include/net/flow.h b/include/net/flow.h
63989index 6c469db..7743b8e 100644
63990--- a/include/net/flow.h
63991+++ b/include/net/flow.h
63992@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
63993
63994 extern void flow_cache_flush(void);
63995 extern void flow_cache_flush_deferred(void);
63996-extern atomic_t flow_cache_genid;
63997+extern atomic_unchecked_t flow_cache_genid;
63998
63999 #endif
64000diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
64001index 2040bff..f4c0733 100644
64002--- a/include/net/inetpeer.h
64003+++ b/include/net/inetpeer.h
64004@@ -51,8 +51,8 @@ struct inet_peer {
64005 */
64006 union {
64007 struct {
64008- atomic_t rid; /* Frag reception counter */
64009- atomic_t ip_id_count; /* IP ID for the next packet */
64010+ atomic_unchecked_t rid; /* Frag reception counter */
64011+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
64012 __u32 tcp_ts;
64013 __u32 tcp_ts_stamp;
64014 };
64015@@ -118,11 +118,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
64016 more++;
64017 inet_peer_refcheck(p);
64018 do {
64019- old = atomic_read(&p->ip_id_count);
64020+ old = atomic_read_unchecked(&p->ip_id_count);
64021 new = old + more;
64022 if (!new)
64023 new = 1;
64024- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
64025+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
64026 return new;
64027 }
64028
64029diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
64030index 10422ef..662570f 100644
64031--- a/include/net/ip_fib.h
64032+++ b/include/net/ip_fib.h
64033@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
64034
64035 #define FIB_RES_SADDR(net, res) \
64036 ((FIB_RES_NH(res).nh_saddr_genid == \
64037- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
64038+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
64039 FIB_RES_NH(res).nh_saddr : \
64040 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
64041 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
64042diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
64043index 72522f0..2965e05 100644
64044--- a/include/net/ip_vs.h
64045+++ b/include/net/ip_vs.h
64046@@ -510,7 +510,7 @@ struct ip_vs_conn {
64047 struct ip_vs_conn *control; /* Master control connection */
64048 atomic_t n_control; /* Number of controlled ones */
64049 struct ip_vs_dest *dest; /* real server */
64050- atomic_t in_pkts; /* incoming packet counter */
64051+ atomic_unchecked_t in_pkts; /* incoming packet counter */
64052
64053 /* packet transmitter for different forwarding methods. If it
64054 mangles the packet, it must return NF_DROP or better NF_STOLEN,
64055@@ -648,7 +648,7 @@ struct ip_vs_dest {
64056 __be16 port; /* port number of the server */
64057 union nf_inet_addr addr; /* IP address of the server */
64058 volatile unsigned flags; /* dest status flags */
64059- atomic_t conn_flags; /* flags to copy to conn */
64060+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
64061 atomic_t weight; /* server weight */
64062
64063 atomic_t refcnt; /* reference counter */
64064@@ -1356,7 +1356,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
64065 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
64066
64067 if (!ct || !nf_ct_is_untracked(ct)) {
64068- nf_reset(skb);
64069+ nf_conntrack_put(skb->nfct);
64070 skb->nfct = &nf_ct_untracked_get()->ct_general;
64071 skb->nfctinfo = IP_CT_NEW;
64072 nf_conntrack_get(skb->nfct);
64073diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
64074index 69b610a..fe3962c 100644
64075--- a/include/net/irda/ircomm_core.h
64076+++ b/include/net/irda/ircomm_core.h
64077@@ -51,7 +51,7 @@ typedef struct {
64078 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
64079 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
64080 struct ircomm_info *);
64081-} call_t;
64082+} __no_const call_t;
64083
64084 struct ircomm_cb {
64085 irda_queue_t queue;
64086diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
64087index 59ba38bc..d515662 100644
64088--- a/include/net/irda/ircomm_tty.h
64089+++ b/include/net/irda/ircomm_tty.h
64090@@ -35,6 +35,7 @@
64091 #include <linux/termios.h>
64092 #include <linux/timer.h>
64093 #include <linux/tty.h> /* struct tty_struct */
64094+#include <asm/local.h>
64095
64096 #include <net/irda/irias_object.h>
64097 #include <net/irda/ircomm_core.h>
64098@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
64099 unsigned short close_delay;
64100 unsigned short closing_wait; /* time to wait before closing */
64101
64102- int open_count;
64103- int blocked_open; /* # of blocked opens */
64104+ local_t open_count;
64105+ local_t blocked_open; /* # of blocked opens */
64106
64107 /* Protect concurent access to :
64108 * o self->open_count
64109diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
64110index cc7c197..9f2da2a 100644
64111--- a/include/net/iucv/af_iucv.h
64112+++ b/include/net/iucv/af_iucv.h
64113@@ -141,7 +141,7 @@ struct iucv_sock {
64114 struct iucv_sock_list {
64115 struct hlist_head head;
64116 rwlock_t lock;
64117- atomic_t autobind_name;
64118+ atomic_unchecked_t autobind_name;
64119 };
64120
64121 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
64122diff --git a/include/net/neighbour.h b/include/net/neighbour.h
64123index 34c996f..bb3b4d4 100644
64124--- a/include/net/neighbour.h
64125+++ b/include/net/neighbour.h
64126@@ -123,7 +123,7 @@ struct neigh_ops {
64127 void (*error_report)(struct neighbour *, struct sk_buff *);
64128 int (*output)(struct neighbour *, struct sk_buff *);
64129 int (*connected_output)(struct neighbour *, struct sk_buff *);
64130-};
64131+} __do_const;
64132
64133 struct pneigh_entry {
64134 struct pneigh_entry *next;
64135diff --git a/include/net/netlink.h b/include/net/netlink.h
64136index f394fe5..fd073f9 100644
64137--- a/include/net/netlink.h
64138+++ b/include/net/netlink.h
64139@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
64140 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
64141 {
64142 if (mark)
64143- skb_trim(skb, (unsigned char *) mark - skb->data);
64144+ skb_trim(skb, (const unsigned char *) mark - skb->data);
64145 }
64146
64147 /**
64148diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
64149index bbd023a..97c6d0d 100644
64150--- a/include/net/netns/ipv4.h
64151+++ b/include/net/netns/ipv4.h
64152@@ -57,8 +57,8 @@ struct netns_ipv4 {
64153 unsigned int sysctl_ping_group_range[2];
64154 long sysctl_tcp_mem[3];
64155
64156- atomic_t rt_genid;
64157- atomic_t dev_addr_genid;
64158+ atomic_unchecked_t rt_genid;
64159+ atomic_unchecked_t dev_addr_genid;
64160
64161 #ifdef CONFIG_IP_MROUTE
64162 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
64163diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
64164index a2ef814..31a8e3f 100644
64165--- a/include/net/sctp/sctp.h
64166+++ b/include/net/sctp/sctp.h
64167@@ -318,9 +318,9 @@ do { \
64168
64169 #else /* SCTP_DEBUG */
64170
64171-#define SCTP_DEBUG_PRINTK(whatever...)
64172-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
64173-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
64174+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
64175+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
64176+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
64177 #define SCTP_ENABLE_DEBUG
64178 #define SCTP_DISABLE_DEBUG
64179 #define SCTP_ASSERT(expr, str, func)
64180diff --git a/include/net/sock.h b/include/net/sock.h
64181index 5a0a58a..2e3d4d0 100644
64182--- a/include/net/sock.h
64183+++ b/include/net/sock.h
64184@@ -302,7 +302,7 @@ struct sock {
64185 #ifdef CONFIG_RPS
64186 __u32 sk_rxhash;
64187 #endif
64188- atomic_t sk_drops;
64189+ atomic_unchecked_t sk_drops;
64190 int sk_rcvbuf;
64191
64192 struct sk_filter __rcu *sk_filter;
64193@@ -1691,7 +1691,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
64194 }
64195
64196 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
64197- char __user *from, char *to,
64198+ char __user *from, unsigned char *to,
64199 int copy, int offset)
64200 {
64201 if (skb->ip_summed == CHECKSUM_NONE) {
64202diff --git a/include/net/tcp.h b/include/net/tcp.h
64203index f75a04d..702cf06 100644
64204--- a/include/net/tcp.h
64205+++ b/include/net/tcp.h
64206@@ -1425,7 +1425,7 @@ struct tcp_seq_afinfo {
64207 char *name;
64208 sa_family_t family;
64209 const struct file_operations *seq_fops;
64210- struct seq_operations seq_ops;
64211+ seq_operations_no_const seq_ops;
64212 };
64213
64214 struct tcp_iter_state {
64215diff --git a/include/net/udp.h b/include/net/udp.h
64216index 5d606d9..e879f7b 100644
64217--- a/include/net/udp.h
64218+++ b/include/net/udp.h
64219@@ -244,7 +244,7 @@ struct udp_seq_afinfo {
64220 sa_family_t family;
64221 struct udp_table *udp_table;
64222 const struct file_operations *seq_fops;
64223- struct seq_operations seq_ops;
64224+ seq_operations_no_const seq_ops;
64225 };
64226
64227 struct udp_iter_state {
64228diff --git a/include/net/xfrm.h b/include/net/xfrm.h
64229index 96239e7..c85b032 100644
64230--- a/include/net/xfrm.h
64231+++ b/include/net/xfrm.h
64232@@ -505,7 +505,7 @@ struct xfrm_policy {
64233 struct timer_list timer;
64234
64235 struct flow_cache_object flo;
64236- atomic_t genid;
64237+ atomic_unchecked_t genid;
64238 u32 priority;
64239 u32 index;
64240 struct xfrm_mark mark;
64241diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
64242index 1a046b1..ee0bef0 100644
64243--- a/include/rdma/iw_cm.h
64244+++ b/include/rdma/iw_cm.h
64245@@ -122,7 +122,7 @@ struct iw_cm_verbs {
64246 int backlog);
64247
64248 int (*destroy_listen)(struct iw_cm_id *cm_id);
64249-};
64250+} __no_const;
64251
64252 /**
64253 * iw_create_cm_id - Create an IW CM identifier.
64254diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
64255index 8f9dfba..610ab6c 100644
64256--- a/include/scsi/libfc.h
64257+++ b/include/scsi/libfc.h
64258@@ -756,6 +756,7 @@ struct libfc_function_template {
64259 */
64260 void (*disc_stop_final) (struct fc_lport *);
64261 };
64262+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
64263
64264 /**
64265 * struct fc_disc - Discovery context
64266@@ -861,7 +862,7 @@ struct fc_lport {
64267 struct fc_vport *vport;
64268
64269 /* Operational Information */
64270- struct libfc_function_template tt;
64271+ libfc_function_template_no_const tt;
64272 u8 link_up;
64273 u8 qfull;
64274 enum fc_lport_state state;
64275diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
64276index ba96988..ecf2eb9 100644
64277--- a/include/scsi/scsi_device.h
64278+++ b/include/scsi/scsi_device.h
64279@@ -163,9 +163,9 @@ struct scsi_device {
64280 unsigned int max_device_blocked; /* what device_blocked counts down from */
64281 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
64282
64283- atomic_t iorequest_cnt;
64284- atomic_t iodone_cnt;
64285- atomic_t ioerr_cnt;
64286+ atomic_unchecked_t iorequest_cnt;
64287+ atomic_unchecked_t iodone_cnt;
64288+ atomic_unchecked_t ioerr_cnt;
64289
64290 struct device sdev_gendev,
64291 sdev_dev;
64292diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
64293index 719faf1..d1154d4 100644
64294--- a/include/scsi/scsi_transport_fc.h
64295+++ b/include/scsi/scsi_transport_fc.h
64296@@ -739,7 +739,7 @@ struct fc_function_template {
64297 unsigned long show_host_system_hostname:1;
64298
64299 unsigned long disable_target_scan:1;
64300-};
64301+} __do_const;
64302
64303
64304 /**
64305diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
64306index 030b87c..98a6954 100644
64307--- a/include/sound/ak4xxx-adda.h
64308+++ b/include/sound/ak4xxx-adda.h
64309@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
64310 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
64311 unsigned char val);
64312 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
64313-};
64314+} __no_const;
64315
64316 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
64317
64318diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
64319index 8c05e47..2b5df97 100644
64320--- a/include/sound/hwdep.h
64321+++ b/include/sound/hwdep.h
64322@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
64323 struct snd_hwdep_dsp_status *status);
64324 int (*dsp_load)(struct snd_hwdep *hw,
64325 struct snd_hwdep_dsp_image *image);
64326-};
64327+} __no_const;
64328
64329 struct snd_hwdep {
64330 struct snd_card *card;
64331diff --git a/include/sound/info.h b/include/sound/info.h
64332index 9ca1a49..aba1728 100644
64333--- a/include/sound/info.h
64334+++ b/include/sound/info.h
64335@@ -44,7 +44,7 @@ struct snd_info_entry_text {
64336 struct snd_info_buffer *buffer);
64337 void (*write)(struct snd_info_entry *entry,
64338 struct snd_info_buffer *buffer);
64339-};
64340+} __no_const;
64341
64342 struct snd_info_entry_ops {
64343 int (*open)(struct snd_info_entry *entry,
64344diff --git a/include/sound/pcm.h b/include/sound/pcm.h
64345index 0d11128..814178e 100644
64346--- a/include/sound/pcm.h
64347+++ b/include/sound/pcm.h
64348@@ -81,6 +81,7 @@ struct snd_pcm_ops {
64349 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
64350 int (*ack)(struct snd_pcm_substream *substream);
64351 };
64352+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
64353
64354 /*
64355 *
64356diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
64357index af1b49e..a5d55a5 100644
64358--- a/include/sound/sb16_csp.h
64359+++ b/include/sound/sb16_csp.h
64360@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
64361 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
64362 int (*csp_stop) (struct snd_sb_csp * p);
64363 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
64364-};
64365+} __no_const;
64366
64367 /*
64368 * CSP private data
64369diff --git a/include/sound/soc.h b/include/sound/soc.h
64370index 2ebf787..0276839 100644
64371--- a/include/sound/soc.h
64372+++ b/include/sound/soc.h
64373@@ -711,7 +711,7 @@ struct snd_soc_platform_driver {
64374 /* platform IO - used for platform DAPM */
64375 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
64376 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
64377-};
64378+} __do_const;
64379
64380 struct snd_soc_platform {
64381 const char *name;
64382@@ -887,7 +887,7 @@ struct snd_soc_pcm_runtime {
64383 struct snd_soc_dai_link *dai_link;
64384 struct mutex pcm_mutex;
64385 enum snd_soc_pcm_subclass pcm_subclass;
64386- struct snd_pcm_ops ops;
64387+ snd_pcm_ops_no_const ops;
64388
64389 unsigned int complete:1;
64390 unsigned int dev_registered:1;
64391diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
64392index 4119966..1a4671c 100644
64393--- a/include/sound/ymfpci.h
64394+++ b/include/sound/ymfpci.h
64395@@ -358,7 +358,7 @@ struct snd_ymfpci {
64396 spinlock_t reg_lock;
64397 spinlock_t voice_lock;
64398 wait_queue_head_t interrupt_sleep;
64399- atomic_t interrupt_sleep_count;
64400+ atomic_unchecked_t interrupt_sleep_count;
64401 struct snd_info_entry *proc_entry;
64402 const struct firmware *dsp_microcode;
64403 const struct firmware *controller_microcode;
64404diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
64405index aaccc5f..092d568 100644
64406--- a/include/target/target_core_base.h
64407+++ b/include/target/target_core_base.h
64408@@ -447,7 +447,7 @@ struct t10_reservation_ops {
64409 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
64410 int (*t10_pr_register)(struct se_cmd *);
64411 int (*t10_pr_clear)(struct se_cmd *);
64412-};
64413+} __no_const;
64414
64415 struct t10_reservation {
64416 /* Reservation effects all target ports */
64417@@ -576,7 +576,7 @@ struct se_cmd {
64418 atomic_t t_se_count;
64419 atomic_t t_task_cdbs_left;
64420 atomic_t t_task_cdbs_ex_left;
64421- atomic_t t_task_cdbs_sent;
64422+ atomic_unchecked_t t_task_cdbs_sent;
64423 unsigned int transport_state;
64424 #define CMD_T_ABORTED (1 << 0)
64425 #define CMD_T_ACTIVE (1 << 1)
64426@@ -802,7 +802,7 @@ struct se_device {
64427 spinlock_t stats_lock;
64428 /* Active commands on this virtual SE device */
64429 atomic_t simple_cmds;
64430- atomic_t dev_ordered_id;
64431+ atomic_unchecked_t dev_ordered_id;
64432 atomic_t execute_tasks;
64433 atomic_t dev_ordered_sync;
64434 atomic_t dev_qf_count;
64435diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
64436new file mode 100644
64437index 0000000..2efe49d
64438--- /dev/null
64439+++ b/include/trace/events/fs.h
64440@@ -0,0 +1,53 @@
64441+#undef TRACE_SYSTEM
64442+#define TRACE_SYSTEM fs
64443+
64444+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
64445+#define _TRACE_FS_H
64446+
64447+#include <linux/fs.h>
64448+#include <linux/tracepoint.h>
64449+
64450+TRACE_EVENT(do_sys_open,
64451+
64452+ TP_PROTO(char *filename, int flags, int mode),
64453+
64454+ TP_ARGS(filename, flags, mode),
64455+
64456+ TP_STRUCT__entry(
64457+ __string( filename, filename )
64458+ __field( int, flags )
64459+ __field( int, mode )
64460+ ),
64461+
64462+ TP_fast_assign(
64463+ __assign_str(filename, filename);
64464+ __entry->flags = flags;
64465+ __entry->mode = mode;
64466+ ),
64467+
64468+ TP_printk("\"%s\" %x %o",
64469+ __get_str(filename), __entry->flags, __entry->mode)
64470+);
64471+
64472+TRACE_EVENT(open_exec,
64473+
64474+ TP_PROTO(const char *filename),
64475+
64476+ TP_ARGS(filename),
64477+
64478+ TP_STRUCT__entry(
64479+ __string( filename, filename )
64480+ ),
64481+
64482+ TP_fast_assign(
64483+ __assign_str(filename, filename);
64484+ ),
64485+
64486+ TP_printk("\"%s\"",
64487+ __get_str(filename))
64488+);
64489+
64490+#endif /* _TRACE_FS_H */
64491+
64492+/* This part must be outside protection */
64493+#include <trace/define_trace.h>
64494diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
64495index 1c09820..7f5ec79 100644
64496--- a/include/trace/events/irq.h
64497+++ b/include/trace/events/irq.h
64498@@ -36,7 +36,7 @@ struct softirq_action;
64499 */
64500 TRACE_EVENT(irq_handler_entry,
64501
64502- TP_PROTO(int irq, struct irqaction *action),
64503+ TP_PROTO(int irq, const struct irqaction *action),
64504
64505 TP_ARGS(irq, action),
64506
64507@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
64508 */
64509 TRACE_EVENT(irq_handler_exit,
64510
64511- TP_PROTO(int irq, struct irqaction *action, int ret),
64512+ TP_PROTO(int irq, const struct irqaction *action, int ret),
64513
64514 TP_ARGS(irq, action, ret),
64515
64516diff --git a/include/video/udlfb.h b/include/video/udlfb.h
64517index f9466fa..f4e2b81 100644
64518--- a/include/video/udlfb.h
64519+++ b/include/video/udlfb.h
64520@@ -53,10 +53,10 @@ struct dlfb_data {
64521 u32 pseudo_palette[256];
64522 int blank_mode; /*one of FB_BLANK_ */
64523 /* blit-only rendering path metrics, exposed through sysfs */
64524- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64525- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
64526- atomic_t bytes_sent; /* to usb, after compression including overhead */
64527- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
64528+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64529+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
64530+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
64531+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
64532 };
64533
64534 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
64535diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
64536index 0993a22..32ba2fe 100644
64537--- a/include/video/uvesafb.h
64538+++ b/include/video/uvesafb.h
64539@@ -177,6 +177,7 @@ struct uvesafb_par {
64540 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
64541 u8 pmi_setpal; /* PMI for palette changes */
64542 u16 *pmi_base; /* protected mode interface location */
64543+ u8 *pmi_code; /* protected mode code location */
64544 void *pmi_start;
64545 void *pmi_pal;
64546 u8 *vbe_state_orig; /*
64547diff --git a/init/Kconfig b/init/Kconfig
64548index 6cfd71d..16006e6 100644
64549--- a/init/Kconfig
64550+++ b/init/Kconfig
64551@@ -790,6 +790,7 @@ endif # CGROUPS
64552
64553 config CHECKPOINT_RESTORE
64554 bool "Checkpoint/restore support" if EXPERT
64555+ depends on !GRKERNSEC
64556 default n
64557 help
64558 Enables additional kernel features in a sake of checkpoint/restore.
64559@@ -1240,7 +1241,7 @@ config SLUB_DEBUG
64560
64561 config COMPAT_BRK
64562 bool "Disable heap randomization"
64563- default y
64564+ default n
64565 help
64566 Randomizing heap placement makes heap exploits harder, but it
64567 also breaks ancient binaries (including anything libc5 based).
64568@@ -1423,7 +1424,7 @@ config INIT_ALL_POSSIBLE
64569 config STOP_MACHINE
64570 bool
64571 default y
64572- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
64573+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
64574 help
64575 Need stop_machine() primitive.
64576
64577diff --git a/init/do_mounts.c b/init/do_mounts.c
64578index 42b0707..c06eef4 100644
64579--- a/init/do_mounts.c
64580+++ b/init/do_mounts.c
64581@@ -326,11 +326,11 @@ static void __init get_fs_names(char *page)
64582 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
64583 {
64584 struct super_block *s;
64585- int err = sys_mount(name, "/root", fs, flags, data);
64586+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
64587 if (err)
64588 return err;
64589
64590- sys_chdir((const char __user __force *)"/root");
64591+ sys_chdir((const char __force_user *)"/root");
64592 s = current->fs->pwd.dentry->d_sb;
64593 ROOT_DEV = s->s_dev;
64594 printk(KERN_INFO
64595@@ -450,18 +450,18 @@ void __init change_floppy(char *fmt, ...)
64596 va_start(args, fmt);
64597 vsprintf(buf, fmt, args);
64598 va_end(args);
64599- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
64600+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
64601 if (fd >= 0) {
64602 sys_ioctl(fd, FDEJECT, 0);
64603 sys_close(fd);
64604 }
64605 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
64606- fd = sys_open("/dev/console", O_RDWR, 0);
64607+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
64608 if (fd >= 0) {
64609 sys_ioctl(fd, TCGETS, (long)&termios);
64610 termios.c_lflag &= ~ICANON;
64611 sys_ioctl(fd, TCSETSF, (long)&termios);
64612- sys_read(fd, &c, 1);
64613+ sys_read(fd, (char __user *)&c, 1);
64614 termios.c_lflag |= ICANON;
64615 sys_ioctl(fd, TCSETSF, (long)&termios);
64616 sys_close(fd);
64617@@ -555,6 +555,6 @@ void __init prepare_namespace(void)
64618 mount_root();
64619 out:
64620 devtmpfs_mount("dev");
64621- sys_mount(".", "/", NULL, MS_MOVE, NULL);
64622- sys_chroot((const char __user __force *)".");
64623+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64624+ sys_chroot((const char __force_user *)".");
64625 }
64626diff --git a/init/do_mounts.h b/init/do_mounts.h
64627index f5b978a..69dbfe8 100644
64628--- a/init/do_mounts.h
64629+++ b/init/do_mounts.h
64630@@ -15,15 +15,15 @@ extern int root_mountflags;
64631
64632 static inline int create_dev(char *name, dev_t dev)
64633 {
64634- sys_unlink(name);
64635- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
64636+ sys_unlink((char __force_user *)name);
64637+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
64638 }
64639
64640 #if BITS_PER_LONG == 32
64641 static inline u32 bstat(char *name)
64642 {
64643 struct stat64 stat;
64644- if (sys_stat64(name, &stat) != 0)
64645+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
64646 return 0;
64647 if (!S_ISBLK(stat.st_mode))
64648 return 0;
64649@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
64650 static inline u32 bstat(char *name)
64651 {
64652 struct stat stat;
64653- if (sys_newstat(name, &stat) != 0)
64654+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
64655 return 0;
64656 if (!S_ISBLK(stat.st_mode))
64657 return 0;
64658diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
64659index 9047330..de0d1fb 100644
64660--- a/init/do_mounts_initrd.c
64661+++ b/init/do_mounts_initrd.c
64662@@ -43,13 +43,13 @@ static void __init handle_initrd(void)
64663 create_dev("/dev/root.old", Root_RAM0);
64664 /* mount initrd on rootfs' /root */
64665 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
64666- sys_mkdir("/old", 0700);
64667- root_fd = sys_open("/", 0, 0);
64668- old_fd = sys_open("/old", 0, 0);
64669+ sys_mkdir((const char __force_user *)"/old", 0700);
64670+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
64671+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
64672 /* move initrd over / and chdir/chroot in initrd root */
64673- sys_chdir("/root");
64674- sys_mount(".", "/", NULL, MS_MOVE, NULL);
64675- sys_chroot(".");
64676+ sys_chdir((const char __force_user *)"/root");
64677+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64678+ sys_chroot((const char __force_user *)".");
64679
64680 /*
64681 * In case that a resume from disk is carried out by linuxrc or one of
64682@@ -66,15 +66,15 @@ static void __init handle_initrd(void)
64683
64684 /* move initrd to rootfs' /old */
64685 sys_fchdir(old_fd);
64686- sys_mount("/", ".", NULL, MS_MOVE, NULL);
64687+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
64688 /* switch root and cwd back to / of rootfs */
64689 sys_fchdir(root_fd);
64690- sys_chroot(".");
64691+ sys_chroot((const char __force_user *)".");
64692 sys_close(old_fd);
64693 sys_close(root_fd);
64694
64695 if (new_decode_dev(real_root_dev) == Root_RAM0) {
64696- sys_chdir("/old");
64697+ sys_chdir((const char __force_user *)"/old");
64698 return;
64699 }
64700
64701@@ -82,17 +82,17 @@ static void __init handle_initrd(void)
64702 mount_root();
64703
64704 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
64705- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
64706+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
64707 if (!error)
64708 printk("okay\n");
64709 else {
64710- int fd = sys_open("/dev/root.old", O_RDWR, 0);
64711+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
64712 if (error == -ENOENT)
64713 printk("/initrd does not exist. Ignored.\n");
64714 else
64715 printk("failed\n");
64716 printk(KERN_NOTICE "Unmounting old root\n");
64717- sys_umount("/old", MNT_DETACH);
64718+ sys_umount((char __force_user *)"/old", MNT_DETACH);
64719 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
64720 if (fd < 0) {
64721 error = fd;
64722@@ -115,11 +115,11 @@ int __init initrd_load(void)
64723 * mounted in the normal path.
64724 */
64725 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
64726- sys_unlink("/initrd.image");
64727+ sys_unlink((const char __force_user *)"/initrd.image");
64728 handle_initrd();
64729 return 1;
64730 }
64731 }
64732- sys_unlink("/initrd.image");
64733+ sys_unlink((const char __force_user *)"/initrd.image");
64734 return 0;
64735 }
64736diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
64737index 32c4799..c27ee74 100644
64738--- a/init/do_mounts_md.c
64739+++ b/init/do_mounts_md.c
64740@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
64741 partitioned ? "_d" : "", minor,
64742 md_setup_args[ent].device_names);
64743
64744- fd = sys_open(name, 0, 0);
64745+ fd = sys_open((char __force_user *)name, 0, 0);
64746 if (fd < 0) {
64747 printk(KERN_ERR "md: open failed - cannot start "
64748 "array %s\n", name);
64749@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
64750 * array without it
64751 */
64752 sys_close(fd);
64753- fd = sys_open(name, 0, 0);
64754+ fd = sys_open((char __force_user *)name, 0, 0);
64755 sys_ioctl(fd, BLKRRPART, 0);
64756 }
64757 sys_close(fd);
64758@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
64759
64760 wait_for_device_probe();
64761
64762- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
64763+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
64764 if (fd >= 0) {
64765 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
64766 sys_close(fd);
64767diff --git a/init/initramfs.c b/init/initramfs.c
64768index 8216c30..25e8e32 100644
64769--- a/init/initramfs.c
64770+++ b/init/initramfs.c
64771@@ -74,7 +74,7 @@ static void __init free_hash(void)
64772 }
64773 }
64774
64775-static long __init do_utime(char __user *filename, time_t mtime)
64776+static long __init do_utime(__force char __user *filename, time_t mtime)
64777 {
64778 struct timespec t[2];
64779
64780@@ -109,7 +109,7 @@ static void __init dir_utime(void)
64781 struct dir_entry *de, *tmp;
64782 list_for_each_entry_safe(de, tmp, &dir_list, list) {
64783 list_del(&de->list);
64784- do_utime(de->name, de->mtime);
64785+ do_utime((char __force_user *)de->name, de->mtime);
64786 kfree(de->name);
64787 kfree(de);
64788 }
64789@@ -271,7 +271,7 @@ static int __init maybe_link(void)
64790 if (nlink >= 2) {
64791 char *old = find_link(major, minor, ino, mode, collected);
64792 if (old)
64793- return (sys_link(old, collected) < 0) ? -1 : 1;
64794+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
64795 }
64796 return 0;
64797 }
64798@@ -280,11 +280,11 @@ static void __init clean_path(char *path, umode_t mode)
64799 {
64800 struct stat st;
64801
64802- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
64803+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
64804 if (S_ISDIR(st.st_mode))
64805- sys_rmdir(path);
64806+ sys_rmdir((char __force_user *)path);
64807 else
64808- sys_unlink(path);
64809+ sys_unlink((char __force_user *)path);
64810 }
64811 }
64812
64813@@ -305,7 +305,7 @@ static int __init do_name(void)
64814 int openflags = O_WRONLY|O_CREAT;
64815 if (ml != 1)
64816 openflags |= O_TRUNC;
64817- wfd = sys_open(collected, openflags, mode);
64818+ wfd = sys_open((char __force_user *)collected, openflags, mode);
64819
64820 if (wfd >= 0) {
64821 sys_fchown(wfd, uid, gid);
64822@@ -317,17 +317,17 @@ static int __init do_name(void)
64823 }
64824 }
64825 } else if (S_ISDIR(mode)) {
64826- sys_mkdir(collected, mode);
64827- sys_chown(collected, uid, gid);
64828- sys_chmod(collected, mode);
64829+ sys_mkdir((char __force_user *)collected, mode);
64830+ sys_chown((char __force_user *)collected, uid, gid);
64831+ sys_chmod((char __force_user *)collected, mode);
64832 dir_add(collected, mtime);
64833 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
64834 S_ISFIFO(mode) || S_ISSOCK(mode)) {
64835 if (maybe_link() == 0) {
64836- sys_mknod(collected, mode, rdev);
64837- sys_chown(collected, uid, gid);
64838- sys_chmod(collected, mode);
64839- do_utime(collected, mtime);
64840+ sys_mknod((char __force_user *)collected, mode, rdev);
64841+ sys_chown((char __force_user *)collected, uid, gid);
64842+ sys_chmod((char __force_user *)collected, mode);
64843+ do_utime((char __force_user *)collected, mtime);
64844 }
64845 }
64846 return 0;
64847@@ -336,15 +336,15 @@ static int __init do_name(void)
64848 static int __init do_copy(void)
64849 {
64850 if (count >= body_len) {
64851- sys_write(wfd, victim, body_len);
64852+ sys_write(wfd, (char __force_user *)victim, body_len);
64853 sys_close(wfd);
64854- do_utime(vcollected, mtime);
64855+ do_utime((char __force_user *)vcollected, mtime);
64856 kfree(vcollected);
64857 eat(body_len);
64858 state = SkipIt;
64859 return 0;
64860 } else {
64861- sys_write(wfd, victim, count);
64862+ sys_write(wfd, (char __force_user *)victim, count);
64863 body_len -= count;
64864 eat(count);
64865 return 1;
64866@@ -355,9 +355,9 @@ static int __init do_symlink(void)
64867 {
64868 collected[N_ALIGN(name_len) + body_len] = '\0';
64869 clean_path(collected, 0);
64870- sys_symlink(collected + N_ALIGN(name_len), collected);
64871- sys_lchown(collected, uid, gid);
64872- do_utime(collected, mtime);
64873+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
64874+ sys_lchown((char __force_user *)collected, uid, gid);
64875+ do_utime((char __force_user *)collected, mtime);
64876 state = SkipIt;
64877 next_state = Reset;
64878 return 0;
64879diff --git a/init/main.c b/init/main.c
64880index b08c5f7..bf65a52 100644
64881--- a/init/main.c
64882+++ b/init/main.c
64883@@ -95,6 +95,8 @@ static inline void mark_rodata_ro(void) { }
64884 extern void tc_init(void);
64885 #endif
64886
64887+extern void grsecurity_init(void);
64888+
64889 /*
64890 * Debug helper: via this flag we know that we are in 'early bootup code'
64891 * where only the boot processor is running with IRQ disabled. This means
64892@@ -148,6 +150,49 @@ static int __init set_reset_devices(char *str)
64893
64894 __setup("reset_devices", set_reset_devices);
64895
64896+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
64897+extern char pax_enter_kernel_user[];
64898+extern char pax_exit_kernel_user[];
64899+extern pgdval_t clone_pgd_mask;
64900+#endif
64901+
64902+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
64903+static int __init setup_pax_nouderef(char *str)
64904+{
64905+#ifdef CONFIG_X86_32
64906+ unsigned int cpu;
64907+ struct desc_struct *gdt;
64908+
64909+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
64910+ gdt = get_cpu_gdt_table(cpu);
64911+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
64912+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
64913+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
64914+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
64915+ }
64916+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
64917+#else
64918+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
64919+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
64920+ clone_pgd_mask = ~(pgdval_t)0UL;
64921+#endif
64922+
64923+ return 0;
64924+}
64925+early_param("pax_nouderef", setup_pax_nouderef);
64926+#endif
64927+
64928+#ifdef CONFIG_PAX_SOFTMODE
64929+int pax_softmode;
64930+
64931+static int __init setup_pax_softmode(char *str)
64932+{
64933+ get_option(&str, &pax_softmode);
64934+ return 1;
64935+}
64936+__setup("pax_softmode=", setup_pax_softmode);
64937+#endif
64938+
64939 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
64940 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
64941 static const char *panic_later, *panic_param;
64942@@ -674,6 +719,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
64943 {
64944 int count = preempt_count();
64945 int ret;
64946+ const char *msg1 = "", *msg2 = "";
64947
64948 if (initcall_debug)
64949 ret = do_one_initcall_debug(fn);
64950@@ -686,15 +732,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
64951 sprintf(msgbuf, "error code %d ", ret);
64952
64953 if (preempt_count() != count) {
64954- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
64955+ msg1 = " preemption imbalance";
64956 preempt_count() = count;
64957 }
64958 if (irqs_disabled()) {
64959- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
64960+ msg2 = " disabled interrupts";
64961 local_irq_enable();
64962 }
64963- if (msgbuf[0]) {
64964- printk("initcall %pF returned with %s\n", fn, msgbuf);
64965+ if (msgbuf[0] || *msg1 || *msg2) {
64966+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
64967 }
64968
64969 return ret;
64970@@ -747,8 +793,14 @@ static void __init do_initcall_level(int level)
64971 level, level,
64972 repair_env_string);
64973
64974- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
64975+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
64976 do_one_initcall(*fn);
64977+
64978+#ifdef CONFIG_PAX_LATENT_ENTROPY
64979+ transfer_latent_entropy();
64980+#endif
64981+
64982+ }
64983 }
64984
64985 static void __init do_initcalls(void)
64986@@ -782,8 +834,14 @@ static void __init do_pre_smp_initcalls(void)
64987 {
64988 initcall_t *fn;
64989
64990- for (fn = __initcall_start; fn < __initcall0_start; fn++)
64991+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
64992 do_one_initcall(*fn);
64993+
64994+#ifdef CONFIG_PAX_LATENT_ENTROPY
64995+ transfer_latent_entropy();
64996+#endif
64997+
64998+ }
64999 }
65000
65001 static void run_init_process(const char *init_filename)
65002@@ -865,7 +923,7 @@ static int __init kernel_init(void * unused)
65003 do_basic_setup();
65004
65005 /* Open the /dev/console on the rootfs, this should never fail */
65006- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
65007+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
65008 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
65009
65010 (void) sys_dup(0);
65011@@ -878,11 +936,13 @@ static int __init kernel_init(void * unused)
65012 if (!ramdisk_execute_command)
65013 ramdisk_execute_command = "/init";
65014
65015- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
65016+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
65017 ramdisk_execute_command = NULL;
65018 prepare_namespace();
65019 }
65020
65021+ grsecurity_init();
65022+
65023 /*
65024 * Ok, we have completed the initial bootup, and
65025 * we're essentially up and running. Get rid of the
65026diff --git a/ipc/mqueue.c b/ipc/mqueue.c
65027index 28bd64d..c66b72a 100644
65028--- a/ipc/mqueue.c
65029+++ b/ipc/mqueue.c
65030@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
65031 mq_bytes = (mq_msg_tblsz +
65032 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
65033
65034+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
65035 spin_lock(&mq_lock);
65036 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
65037 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
65038diff --git a/ipc/msg.c b/ipc/msg.c
65039index 7385de2..a8180e08 100644
65040--- a/ipc/msg.c
65041+++ b/ipc/msg.c
65042@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
65043 return security_msg_queue_associate(msq, msgflg);
65044 }
65045
65046+static struct ipc_ops msg_ops = {
65047+ .getnew = newque,
65048+ .associate = msg_security,
65049+ .more_checks = NULL
65050+};
65051+
65052 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
65053 {
65054 struct ipc_namespace *ns;
65055- struct ipc_ops msg_ops;
65056 struct ipc_params msg_params;
65057
65058 ns = current->nsproxy->ipc_ns;
65059
65060- msg_ops.getnew = newque;
65061- msg_ops.associate = msg_security;
65062- msg_ops.more_checks = NULL;
65063-
65064 msg_params.key = key;
65065 msg_params.flg = msgflg;
65066
65067diff --git a/ipc/sem.c b/ipc/sem.c
65068index 5215a81..cfc0cac 100644
65069--- a/ipc/sem.c
65070+++ b/ipc/sem.c
65071@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
65072 return 0;
65073 }
65074
65075+static struct ipc_ops sem_ops = {
65076+ .getnew = newary,
65077+ .associate = sem_security,
65078+ .more_checks = sem_more_checks
65079+};
65080+
65081 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
65082 {
65083 struct ipc_namespace *ns;
65084- struct ipc_ops sem_ops;
65085 struct ipc_params sem_params;
65086
65087 ns = current->nsproxy->ipc_ns;
65088@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
65089 if (nsems < 0 || nsems > ns->sc_semmsl)
65090 return -EINVAL;
65091
65092- sem_ops.getnew = newary;
65093- sem_ops.associate = sem_security;
65094- sem_ops.more_checks = sem_more_checks;
65095-
65096 sem_params.key = key;
65097 sem_params.flg = semflg;
65098 sem_params.u.nsems = nsems;
65099diff --git a/ipc/shm.c b/ipc/shm.c
65100index 406c5b2..bc66d67 100644
65101--- a/ipc/shm.c
65102+++ b/ipc/shm.c
65103@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
65104 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
65105 #endif
65106
65107+#ifdef CONFIG_GRKERNSEC
65108+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
65109+ const time_t shm_createtime, const uid_t cuid,
65110+ const int shmid);
65111+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
65112+ const time_t shm_createtime);
65113+#endif
65114+
65115 void shm_init_ns(struct ipc_namespace *ns)
65116 {
65117 ns->shm_ctlmax = SHMMAX;
65118@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
65119 shp->shm_lprid = 0;
65120 shp->shm_atim = shp->shm_dtim = 0;
65121 shp->shm_ctim = get_seconds();
65122+#ifdef CONFIG_GRKERNSEC
65123+ {
65124+ struct timespec timeval;
65125+ do_posix_clock_monotonic_gettime(&timeval);
65126+
65127+ shp->shm_createtime = timeval.tv_sec;
65128+ }
65129+#endif
65130 shp->shm_segsz = size;
65131 shp->shm_nattch = 0;
65132 shp->shm_file = file;
65133@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
65134 return 0;
65135 }
65136
65137+static struct ipc_ops shm_ops = {
65138+ .getnew = newseg,
65139+ .associate = shm_security,
65140+ .more_checks = shm_more_checks
65141+};
65142+
65143 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
65144 {
65145 struct ipc_namespace *ns;
65146- struct ipc_ops shm_ops;
65147 struct ipc_params shm_params;
65148
65149 ns = current->nsproxy->ipc_ns;
65150
65151- shm_ops.getnew = newseg;
65152- shm_ops.associate = shm_security;
65153- shm_ops.more_checks = shm_more_checks;
65154-
65155 shm_params.key = key;
65156 shm_params.flg = shmflg;
65157 shm_params.u.size = size;
65158@@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
65159 f_mode = FMODE_READ | FMODE_WRITE;
65160 }
65161 if (shmflg & SHM_EXEC) {
65162+
65163+#ifdef CONFIG_PAX_MPROTECT
65164+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
65165+ goto out;
65166+#endif
65167+
65168 prot |= PROT_EXEC;
65169 acc_mode |= S_IXUGO;
65170 }
65171@@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
65172 if (err)
65173 goto out_unlock;
65174
65175+#ifdef CONFIG_GRKERNSEC
65176+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
65177+ shp->shm_perm.cuid, shmid) ||
65178+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
65179+ err = -EACCES;
65180+ goto out_unlock;
65181+ }
65182+#endif
65183+
65184 path = shp->shm_file->f_path;
65185 path_get(&path);
65186 shp->shm_nattch++;
65187+#ifdef CONFIG_GRKERNSEC
65188+ shp->shm_lapid = current->pid;
65189+#endif
65190 size = i_size_read(path.dentry->d_inode);
65191 shm_unlock(shp);
65192
65193diff --git a/kernel/acct.c b/kernel/acct.c
65194index 02e6167..54824f7 100644
65195--- a/kernel/acct.c
65196+++ b/kernel/acct.c
65197@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
65198 */
65199 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
65200 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
65201- file->f_op->write(file, (char *)&ac,
65202+ file->f_op->write(file, (char __force_user *)&ac,
65203 sizeof(acct_t), &file->f_pos);
65204 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
65205 set_fs(fs);
65206diff --git a/kernel/audit.c b/kernel/audit.c
65207index 1c7f2c6..9ba5359 100644
65208--- a/kernel/audit.c
65209+++ b/kernel/audit.c
65210@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
65211 3) suppressed due to audit_rate_limit
65212 4) suppressed due to audit_backlog_limit
65213 */
65214-static atomic_t audit_lost = ATOMIC_INIT(0);
65215+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
65216
65217 /* The netlink socket. */
65218 static struct sock *audit_sock;
65219@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
65220 unsigned long now;
65221 int print;
65222
65223- atomic_inc(&audit_lost);
65224+ atomic_inc_unchecked(&audit_lost);
65225
65226 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
65227
65228@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
65229 printk(KERN_WARNING
65230 "audit: audit_lost=%d audit_rate_limit=%d "
65231 "audit_backlog_limit=%d\n",
65232- atomic_read(&audit_lost),
65233+ atomic_read_unchecked(&audit_lost),
65234 audit_rate_limit,
65235 audit_backlog_limit);
65236 audit_panic(message);
65237@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
65238 status_set.pid = audit_pid;
65239 status_set.rate_limit = audit_rate_limit;
65240 status_set.backlog_limit = audit_backlog_limit;
65241- status_set.lost = atomic_read(&audit_lost);
65242+ status_set.lost = atomic_read_unchecked(&audit_lost);
65243 status_set.backlog = skb_queue_len(&audit_skb_queue);
65244 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
65245 &status_set, sizeof(status_set));
65246diff --git a/kernel/auditsc.c b/kernel/auditsc.c
65247index af1de0f..06dfe57 100644
65248--- a/kernel/auditsc.c
65249+++ b/kernel/auditsc.c
65250@@ -2288,7 +2288,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
65251 }
65252
65253 /* global counter which is incremented every time something logs in */
65254-static atomic_t session_id = ATOMIC_INIT(0);
65255+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
65256
65257 /**
65258 * audit_set_loginuid - set current task's audit_context loginuid
65259@@ -2312,7 +2312,7 @@ int audit_set_loginuid(uid_t loginuid)
65260 return -EPERM;
65261 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
65262
65263- sessionid = atomic_inc_return(&session_id);
65264+ sessionid = atomic_inc_return_unchecked(&session_id);
65265 if (context && context->in_syscall) {
65266 struct audit_buffer *ab;
65267
65268diff --git a/kernel/capability.c b/kernel/capability.c
65269index 3f1adb6..c564db0 100644
65270--- a/kernel/capability.c
65271+++ b/kernel/capability.c
65272@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
65273 * before modification is attempted and the application
65274 * fails.
65275 */
65276+ if (tocopy > ARRAY_SIZE(kdata))
65277+ return -EFAULT;
65278+
65279 if (copy_to_user(dataptr, kdata, tocopy
65280 * sizeof(struct __user_cap_data_struct))) {
65281 return -EFAULT;
65282@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
65283 int ret;
65284
65285 rcu_read_lock();
65286- ret = security_capable(__task_cred(t), ns, cap);
65287+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
65288+ gr_task_is_capable(t, __task_cred(t), cap);
65289 rcu_read_unlock();
65290
65291- return (ret == 0);
65292+ return ret;
65293 }
65294
65295 /**
65296@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
65297 int ret;
65298
65299 rcu_read_lock();
65300- ret = security_capable_noaudit(__task_cred(t), ns, cap);
65301+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
65302 rcu_read_unlock();
65303
65304- return (ret == 0);
65305+ return ret;
65306 }
65307
65308 /**
65309@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
65310 BUG();
65311 }
65312
65313- if (security_capable(current_cred(), ns, cap) == 0) {
65314+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
65315 current->flags |= PF_SUPERPRIV;
65316 return true;
65317 }
65318@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
65319 }
65320 EXPORT_SYMBOL(ns_capable);
65321
65322+bool ns_capable_nolog(struct user_namespace *ns, int cap)
65323+{
65324+ if (unlikely(!cap_valid(cap))) {
65325+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
65326+ BUG();
65327+ }
65328+
65329+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
65330+ current->flags |= PF_SUPERPRIV;
65331+ return true;
65332+ }
65333+ return false;
65334+}
65335+EXPORT_SYMBOL(ns_capable_nolog);
65336+
65337 /**
65338 * capable - Determine if the current task has a superior capability in effect
65339 * @cap: The capability to be tested for
65340@@ -408,6 +427,12 @@ bool capable(int cap)
65341 }
65342 EXPORT_SYMBOL(capable);
65343
65344+bool capable_nolog(int cap)
65345+{
65346+ return ns_capable_nolog(&init_user_ns, cap);
65347+}
65348+EXPORT_SYMBOL(capable_nolog);
65349+
65350 /**
65351 * nsown_capable - Check superior capability to one's own user_ns
65352 * @cap: The capability in question
65353diff --git a/kernel/compat.c b/kernel/compat.c
65354index d2c67aa..a629b2e 100644
65355--- a/kernel/compat.c
65356+++ b/kernel/compat.c
65357@@ -13,6 +13,7 @@
65358
65359 #include <linux/linkage.h>
65360 #include <linux/compat.h>
65361+#include <linux/module.h>
65362 #include <linux/errno.h>
65363 #include <linux/time.h>
65364 #include <linux/signal.h>
65365@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
65366 mm_segment_t oldfs;
65367 long ret;
65368
65369- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
65370+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
65371 oldfs = get_fs();
65372 set_fs(KERNEL_DS);
65373 ret = hrtimer_nanosleep_restart(restart);
65374@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
65375 oldfs = get_fs();
65376 set_fs(KERNEL_DS);
65377 ret = hrtimer_nanosleep(&tu,
65378- rmtp ? (struct timespec __user *)&rmt : NULL,
65379+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
65380 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
65381 set_fs(oldfs);
65382
65383@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
65384 mm_segment_t old_fs = get_fs();
65385
65386 set_fs(KERNEL_DS);
65387- ret = sys_sigpending((old_sigset_t __user *) &s);
65388+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
65389 set_fs(old_fs);
65390 if (ret == 0)
65391 ret = put_user(s, set);
65392@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
65393 mm_segment_t old_fs = get_fs();
65394
65395 set_fs(KERNEL_DS);
65396- ret = sys_old_getrlimit(resource, &r);
65397+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
65398 set_fs(old_fs);
65399
65400 if (!ret) {
65401@@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
65402 mm_segment_t old_fs = get_fs();
65403
65404 set_fs(KERNEL_DS);
65405- ret = sys_getrusage(who, (struct rusage __user *) &r);
65406+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
65407 set_fs(old_fs);
65408
65409 if (ret)
65410@@ -550,8 +551,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
65411 set_fs (KERNEL_DS);
65412 ret = sys_wait4(pid,
65413 (stat_addr ?
65414- (unsigned int __user *) &status : NULL),
65415- options, (struct rusage __user *) &r);
65416+ (unsigned int __force_user *) &status : NULL),
65417+ options, (struct rusage __force_user *) &r);
65418 set_fs (old_fs);
65419
65420 if (ret > 0) {
65421@@ -576,8 +577,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
65422 memset(&info, 0, sizeof(info));
65423
65424 set_fs(KERNEL_DS);
65425- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
65426- uru ? (struct rusage __user *)&ru : NULL);
65427+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
65428+ uru ? (struct rusage __force_user *)&ru : NULL);
65429 set_fs(old_fs);
65430
65431 if ((ret < 0) || (info.si_signo == 0))
65432@@ -707,8 +708,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
65433 oldfs = get_fs();
65434 set_fs(KERNEL_DS);
65435 err = sys_timer_settime(timer_id, flags,
65436- (struct itimerspec __user *) &newts,
65437- (struct itimerspec __user *) &oldts);
65438+ (struct itimerspec __force_user *) &newts,
65439+ (struct itimerspec __force_user *) &oldts);
65440 set_fs(oldfs);
65441 if (!err && old && put_compat_itimerspec(old, &oldts))
65442 return -EFAULT;
65443@@ -725,7 +726,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
65444 oldfs = get_fs();
65445 set_fs(KERNEL_DS);
65446 err = sys_timer_gettime(timer_id,
65447- (struct itimerspec __user *) &ts);
65448+ (struct itimerspec __force_user *) &ts);
65449 set_fs(oldfs);
65450 if (!err && put_compat_itimerspec(setting, &ts))
65451 return -EFAULT;
65452@@ -744,7 +745,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
65453 oldfs = get_fs();
65454 set_fs(KERNEL_DS);
65455 err = sys_clock_settime(which_clock,
65456- (struct timespec __user *) &ts);
65457+ (struct timespec __force_user *) &ts);
65458 set_fs(oldfs);
65459 return err;
65460 }
65461@@ -759,7 +760,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
65462 oldfs = get_fs();
65463 set_fs(KERNEL_DS);
65464 err = sys_clock_gettime(which_clock,
65465- (struct timespec __user *) &ts);
65466+ (struct timespec __force_user *) &ts);
65467 set_fs(oldfs);
65468 if (!err && put_compat_timespec(&ts, tp))
65469 return -EFAULT;
65470@@ -779,7 +780,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
65471
65472 oldfs = get_fs();
65473 set_fs(KERNEL_DS);
65474- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
65475+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
65476 set_fs(oldfs);
65477
65478 err = compat_put_timex(utp, &txc);
65479@@ -799,7 +800,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
65480 oldfs = get_fs();
65481 set_fs(KERNEL_DS);
65482 err = sys_clock_getres(which_clock,
65483- (struct timespec __user *) &ts);
65484+ (struct timespec __force_user *) &ts);
65485 set_fs(oldfs);
65486 if (!err && tp && put_compat_timespec(&ts, tp))
65487 return -EFAULT;
65488@@ -811,9 +812,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
65489 long err;
65490 mm_segment_t oldfs;
65491 struct timespec tu;
65492- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
65493+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
65494
65495- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
65496+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
65497 oldfs = get_fs();
65498 set_fs(KERNEL_DS);
65499 err = clock_nanosleep_restart(restart);
65500@@ -845,8 +846,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
65501 oldfs = get_fs();
65502 set_fs(KERNEL_DS);
65503 err = sys_clock_nanosleep(which_clock, flags,
65504- (struct timespec __user *) &in,
65505- (struct timespec __user *) &out);
65506+ (struct timespec __force_user *) &in,
65507+ (struct timespec __force_user *) &out);
65508 set_fs(oldfs);
65509
65510 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
65511diff --git a/kernel/configs.c b/kernel/configs.c
65512index 42e8fa0..9e7406b 100644
65513--- a/kernel/configs.c
65514+++ b/kernel/configs.c
65515@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
65516 struct proc_dir_entry *entry;
65517
65518 /* create the current config file */
65519+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
65520+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
65521+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
65522+ &ikconfig_file_ops);
65523+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65524+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
65525+ &ikconfig_file_ops);
65526+#endif
65527+#else
65528 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
65529 &ikconfig_file_ops);
65530+#endif
65531+
65532 if (!entry)
65533 return -ENOMEM;
65534
65535diff --git a/kernel/cred.c b/kernel/cred.c
65536index e70683d..27761b6 100644
65537--- a/kernel/cred.c
65538+++ b/kernel/cred.c
65539@@ -205,6 +205,15 @@ void exit_creds(struct task_struct *tsk)
65540 validate_creds(cred);
65541 put_cred(cred);
65542 }
65543+
65544+#ifdef CONFIG_GRKERNSEC_SETXID
65545+ cred = (struct cred *) tsk->delayed_cred;
65546+ if (cred) {
65547+ tsk->delayed_cred = NULL;
65548+ validate_creds(cred);
65549+ put_cred(cred);
65550+ }
65551+#endif
65552 }
65553
65554 /**
65555@@ -473,7 +482,7 @@ error_put:
65556 * Always returns 0 thus allowing this function to be tail-called at the end
65557 * of, say, sys_setgid().
65558 */
65559-int commit_creds(struct cred *new)
65560+static int __commit_creds(struct cred *new)
65561 {
65562 struct task_struct *task = current;
65563 const struct cred *old = task->real_cred;
65564@@ -492,6 +501,8 @@ int commit_creds(struct cred *new)
65565
65566 get_cred(new); /* we will require a ref for the subj creds too */
65567
65568+ gr_set_role_label(task, new->uid, new->gid);
65569+
65570 /* dumpability changes */
65571 if (old->euid != new->euid ||
65572 old->egid != new->egid ||
65573@@ -541,6 +552,101 @@ int commit_creds(struct cred *new)
65574 put_cred(old);
65575 return 0;
65576 }
65577+#ifdef CONFIG_GRKERNSEC_SETXID
65578+extern int set_user(struct cred *new);
65579+
65580+void gr_delayed_cred_worker(void)
65581+{
65582+ const struct cred *new = current->delayed_cred;
65583+ struct cred *ncred;
65584+
65585+ current->delayed_cred = NULL;
65586+
65587+ if (current_uid() && new != NULL) {
65588+ // from doing get_cred on it when queueing this
65589+ put_cred(new);
65590+ return;
65591+ } else if (new == NULL)
65592+ return;
65593+
65594+ ncred = prepare_creds();
65595+ if (!ncred)
65596+ goto die;
65597+ // uids
65598+ ncred->uid = new->uid;
65599+ ncred->euid = new->euid;
65600+ ncred->suid = new->suid;
65601+ ncred->fsuid = new->fsuid;
65602+ // gids
65603+ ncred->gid = new->gid;
65604+ ncred->egid = new->egid;
65605+ ncred->sgid = new->sgid;
65606+ ncred->fsgid = new->fsgid;
65607+ // groups
65608+ if (set_groups(ncred, new->group_info) < 0) {
65609+ abort_creds(ncred);
65610+ goto die;
65611+ }
65612+ // caps
65613+ ncred->securebits = new->securebits;
65614+ ncred->cap_inheritable = new->cap_inheritable;
65615+ ncred->cap_permitted = new->cap_permitted;
65616+ ncred->cap_effective = new->cap_effective;
65617+ ncred->cap_bset = new->cap_bset;
65618+
65619+ if (set_user(ncred)) {
65620+ abort_creds(ncred);
65621+ goto die;
65622+ }
65623+
65624+ // from doing get_cred on it when queueing this
65625+ put_cred(new);
65626+
65627+ __commit_creds(ncred);
65628+ return;
65629+die:
65630+ // from doing get_cred on it when queueing this
65631+ put_cred(new);
65632+ do_group_exit(SIGKILL);
65633+}
65634+#endif
65635+
65636+int commit_creds(struct cred *new)
65637+{
65638+#ifdef CONFIG_GRKERNSEC_SETXID
65639+ int ret;
65640+ int schedule_it = 0;
65641+ struct task_struct *t;
65642+
65643+ /* we won't get called with tasklist_lock held for writing
65644+ and interrupts disabled as the cred struct in that case is
65645+ init_cred
65646+ */
65647+ if (grsec_enable_setxid && !current_is_single_threaded() &&
65648+ !current_uid() && new->uid) {
65649+ schedule_it = 1;
65650+ }
65651+ ret = __commit_creds(new);
65652+ if (schedule_it) {
65653+ rcu_read_lock();
65654+ read_lock(&tasklist_lock);
65655+ for (t = next_thread(current); t != current;
65656+ t = next_thread(t)) {
65657+ if (t->delayed_cred == NULL) {
65658+ t->delayed_cred = get_cred(new);
65659+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
65660+ set_tsk_need_resched(t);
65661+ }
65662+ }
65663+ read_unlock(&tasklist_lock);
65664+ rcu_read_unlock();
65665+ }
65666+ return ret;
65667+#else
65668+ return __commit_creds(new);
65669+#endif
65670+}
65671+
65672 EXPORT_SYMBOL(commit_creds);
65673
65674 /**
65675diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
65676index 0557f24..1a00d9a 100644
65677--- a/kernel/debug/debug_core.c
65678+++ b/kernel/debug/debug_core.c
65679@@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
65680 */
65681 static atomic_t masters_in_kgdb;
65682 static atomic_t slaves_in_kgdb;
65683-static atomic_t kgdb_break_tasklet_var;
65684+static atomic_unchecked_t kgdb_break_tasklet_var;
65685 atomic_t kgdb_setting_breakpoint;
65686
65687 struct task_struct *kgdb_usethread;
65688@@ -132,7 +132,7 @@ int kgdb_single_step;
65689 static pid_t kgdb_sstep_pid;
65690
65691 /* to keep track of the CPU which is doing the single stepping*/
65692-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65693+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65694
65695 /*
65696 * If you are debugging a problem where roundup (the collection of
65697@@ -540,7 +540,7 @@ return_normal:
65698 * kernel will only try for the value of sstep_tries before
65699 * giving up and continuing on.
65700 */
65701- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
65702+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
65703 (kgdb_info[cpu].task &&
65704 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
65705 atomic_set(&kgdb_active, -1);
65706@@ -634,8 +634,8 @@ cpu_master_loop:
65707 }
65708
65709 kgdb_restore:
65710- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
65711- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
65712+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
65713+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
65714 if (kgdb_info[sstep_cpu].task)
65715 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
65716 else
65717@@ -861,18 +861,18 @@ static void kgdb_unregister_callbacks(void)
65718 static void kgdb_tasklet_bpt(unsigned long ing)
65719 {
65720 kgdb_breakpoint();
65721- atomic_set(&kgdb_break_tasklet_var, 0);
65722+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
65723 }
65724
65725 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
65726
65727 void kgdb_schedule_breakpoint(void)
65728 {
65729- if (atomic_read(&kgdb_break_tasklet_var) ||
65730+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
65731 atomic_read(&kgdb_active) != -1 ||
65732 atomic_read(&kgdb_setting_breakpoint))
65733 return;
65734- atomic_inc(&kgdb_break_tasklet_var);
65735+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
65736 tasklet_schedule(&kgdb_tasklet_breakpoint);
65737 }
65738 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
65739diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
65740index 67b847d..93834dd 100644
65741--- a/kernel/debug/kdb/kdb_main.c
65742+++ b/kernel/debug/kdb/kdb_main.c
65743@@ -1983,7 +1983,7 @@ static int kdb_lsmod(int argc, const char **argv)
65744 list_for_each_entry(mod, kdb_modules, list) {
65745
65746 kdb_printf("%-20s%8u 0x%p ", mod->name,
65747- mod->core_size, (void *)mod);
65748+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
65749 #ifdef CONFIG_MODULE_UNLOAD
65750 kdb_printf("%4ld ", module_refcount(mod));
65751 #endif
65752@@ -1993,7 +1993,7 @@ static int kdb_lsmod(int argc, const char **argv)
65753 kdb_printf(" (Loading)");
65754 else
65755 kdb_printf(" (Live)");
65756- kdb_printf(" 0x%p", mod->module_core);
65757+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
65758
65759 #ifdef CONFIG_MODULE_UNLOAD
65760 {
65761diff --git a/kernel/events/core.c b/kernel/events/core.c
65762index fd126f8..70b755b 100644
65763--- a/kernel/events/core.c
65764+++ b/kernel/events/core.c
65765@@ -181,7 +181,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
65766 return 0;
65767 }
65768
65769-static atomic64_t perf_event_id;
65770+static atomic64_unchecked_t perf_event_id;
65771
65772 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
65773 enum event_type_t event_type);
65774@@ -2659,7 +2659,7 @@ static void __perf_event_read(void *info)
65775
65776 static inline u64 perf_event_count(struct perf_event *event)
65777 {
65778- return local64_read(&event->count) + atomic64_read(&event->child_count);
65779+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
65780 }
65781
65782 static u64 perf_event_read(struct perf_event *event)
65783@@ -2983,9 +2983,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
65784 mutex_lock(&event->child_mutex);
65785 total += perf_event_read(event);
65786 *enabled += event->total_time_enabled +
65787- atomic64_read(&event->child_total_time_enabled);
65788+ atomic64_read_unchecked(&event->child_total_time_enabled);
65789 *running += event->total_time_running +
65790- atomic64_read(&event->child_total_time_running);
65791+ atomic64_read_unchecked(&event->child_total_time_running);
65792
65793 list_for_each_entry(child, &event->child_list, child_list) {
65794 total += perf_event_read(child);
65795@@ -3393,10 +3393,10 @@ void perf_event_update_userpage(struct perf_event *event)
65796 userpg->offset -= local64_read(&event->hw.prev_count);
65797
65798 userpg->time_enabled = enabled +
65799- atomic64_read(&event->child_total_time_enabled);
65800+ atomic64_read_unchecked(&event->child_total_time_enabled);
65801
65802 userpg->time_running = running +
65803- atomic64_read(&event->child_total_time_running);
65804+ atomic64_read_unchecked(&event->child_total_time_running);
65805
65806 arch_perf_update_userpage(userpg, now);
65807
65808@@ -3829,11 +3829,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
65809 values[n++] = perf_event_count(event);
65810 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
65811 values[n++] = enabled +
65812- atomic64_read(&event->child_total_time_enabled);
65813+ atomic64_read_unchecked(&event->child_total_time_enabled);
65814 }
65815 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
65816 values[n++] = running +
65817- atomic64_read(&event->child_total_time_running);
65818+ atomic64_read_unchecked(&event->child_total_time_running);
65819 }
65820 if (read_format & PERF_FORMAT_ID)
65821 values[n++] = primary_event_id(event);
65822@@ -4511,12 +4511,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
65823 * need to add enough zero bytes after the string to handle
65824 * the 64bit alignment we do later.
65825 */
65826- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
65827+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
65828 if (!buf) {
65829 name = strncpy(tmp, "//enomem", sizeof(tmp));
65830 goto got_name;
65831 }
65832- name = d_path(&file->f_path, buf, PATH_MAX);
65833+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
65834 if (IS_ERR(name)) {
65835 name = strncpy(tmp, "//toolong", sizeof(tmp));
65836 goto got_name;
65837@@ -5929,7 +5929,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
65838 event->parent = parent_event;
65839
65840 event->ns = get_pid_ns(current->nsproxy->pid_ns);
65841- event->id = atomic64_inc_return(&perf_event_id);
65842+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
65843
65844 event->state = PERF_EVENT_STATE_INACTIVE;
65845
65846@@ -6491,10 +6491,10 @@ static void sync_child_event(struct perf_event *child_event,
65847 /*
65848 * Add back the child's count to the parent's count:
65849 */
65850- atomic64_add(child_val, &parent_event->child_count);
65851- atomic64_add(child_event->total_time_enabled,
65852+ atomic64_add_unchecked(child_val, &parent_event->child_count);
65853+ atomic64_add_unchecked(child_event->total_time_enabled,
65854 &parent_event->child_total_time_enabled);
65855- atomic64_add(child_event->total_time_running,
65856+ atomic64_add_unchecked(child_event->total_time_running,
65857 &parent_event->child_total_time_running);
65858
65859 /*
65860diff --git a/kernel/exit.c b/kernel/exit.c
65861index 9d81012..d7911f1 100644
65862--- a/kernel/exit.c
65863+++ b/kernel/exit.c
65864@@ -59,6 +59,10 @@
65865 #include <asm/pgtable.h>
65866 #include <asm/mmu_context.h>
65867
65868+#ifdef CONFIG_GRKERNSEC
65869+extern rwlock_t grsec_exec_file_lock;
65870+#endif
65871+
65872 static void exit_mm(struct task_struct * tsk);
65873
65874 static void __unhash_process(struct task_struct *p, bool group_dead)
65875@@ -170,6 +174,10 @@ void release_task(struct task_struct * p)
65876 struct task_struct *leader;
65877 int zap_leader;
65878 repeat:
65879+#ifdef CONFIG_NET
65880+ gr_del_task_from_ip_table(p);
65881+#endif
65882+
65883 /* don't need to get the RCU readlock here - the process is dead and
65884 * can't be modifying its own credentials. But shut RCU-lockdep up */
65885 rcu_read_lock();
65886@@ -382,7 +390,7 @@ int allow_signal(int sig)
65887 * know it'll be handled, so that they don't get converted to
65888 * SIGKILL or just silently dropped.
65889 */
65890- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
65891+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
65892 recalc_sigpending();
65893 spin_unlock_irq(&current->sighand->siglock);
65894 return 0;
65895@@ -418,6 +426,17 @@ void daemonize(const char *name, ...)
65896 vsnprintf(current->comm, sizeof(current->comm), name, args);
65897 va_end(args);
65898
65899+#ifdef CONFIG_GRKERNSEC
65900+ write_lock(&grsec_exec_file_lock);
65901+ if (current->exec_file) {
65902+ fput(current->exec_file);
65903+ current->exec_file = NULL;
65904+ }
65905+ write_unlock(&grsec_exec_file_lock);
65906+#endif
65907+
65908+ gr_set_kernel_label(current);
65909+
65910 /*
65911 * If we were started as result of loading a module, close all of the
65912 * user space pages. We don't need them, and if we didn't close them
65913@@ -901,6 +920,8 @@ void do_exit(long code)
65914 struct task_struct *tsk = current;
65915 int group_dead;
65916
65917+ set_fs(USER_DS);
65918+
65919 profile_task_exit(tsk);
65920
65921 WARN_ON(blk_needs_flush_plug(tsk));
65922@@ -917,7 +938,6 @@ void do_exit(long code)
65923 * mm_release()->clear_child_tid() from writing to a user-controlled
65924 * kernel address.
65925 */
65926- set_fs(USER_DS);
65927
65928 ptrace_event(PTRACE_EVENT_EXIT, code);
65929
65930@@ -978,6 +998,9 @@ void do_exit(long code)
65931 tsk->exit_code = code;
65932 taskstats_exit(tsk, group_dead);
65933
65934+ gr_acl_handle_psacct(tsk, code);
65935+ gr_acl_handle_exit();
65936+
65937 exit_mm(tsk);
65938
65939 if (group_dead)
65940@@ -1094,7 +1117,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
65941 * Take down every thread in the group. This is called by fatal signals
65942 * as well as by sys_exit_group (below).
65943 */
65944-void
65945+__noreturn void
65946 do_group_exit(int exit_code)
65947 {
65948 struct signal_struct *sig = current->signal;
65949diff --git a/kernel/fork.c b/kernel/fork.c
65950index 8163333..aee97f3 100644
65951--- a/kernel/fork.c
65952+++ b/kernel/fork.c
65953@@ -274,19 +274,24 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
65954 }
65955
65956 err = arch_dup_task_struct(tsk, orig);
65957- if (err)
65958- goto out;
65959
65960+ /*
65961+ * We defer looking at err, because we will need this setup
65962+ * for the clean up path to work correctly.
65963+ */
65964 tsk->stack = ti;
65965-
65966 setup_thread_stack(tsk, orig);
65967+
65968+ if (err)
65969+ goto out;
65970+
65971 clear_user_return_notifier(tsk);
65972 clear_tsk_need_resched(tsk);
65973 stackend = end_of_stack(tsk);
65974 *stackend = STACK_END_MAGIC; /* for overflow detection */
65975
65976 #ifdef CONFIG_CC_STACKPROTECTOR
65977- tsk->stack_canary = get_random_int();
65978+ tsk->stack_canary = pax_get_random_long();
65979 #endif
65980
65981 /*
65982@@ -310,13 +315,78 @@ out:
65983 }
65984
65985 #ifdef CONFIG_MMU
65986+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
65987+{
65988+ struct vm_area_struct *tmp;
65989+ unsigned long charge;
65990+ struct mempolicy *pol;
65991+ struct file *file;
65992+
65993+ charge = 0;
65994+ if (mpnt->vm_flags & VM_ACCOUNT) {
65995+ unsigned long len;
65996+ len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65997+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
65998+ goto fail_nomem;
65999+ charge = len;
66000+ }
66001+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
66002+ if (!tmp)
66003+ goto fail_nomem;
66004+ *tmp = *mpnt;
66005+ tmp->vm_mm = mm;
66006+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
66007+ pol = mpol_dup(vma_policy(mpnt));
66008+ if (IS_ERR(pol))
66009+ goto fail_nomem_policy;
66010+ vma_set_policy(tmp, pol);
66011+ if (anon_vma_fork(tmp, mpnt))
66012+ goto fail_nomem_anon_vma_fork;
66013+ tmp->vm_flags &= ~VM_LOCKED;
66014+ tmp->vm_next = tmp->vm_prev = NULL;
66015+ tmp->vm_mirror = NULL;
66016+ file = tmp->vm_file;
66017+ if (file) {
66018+ struct inode *inode = file->f_path.dentry->d_inode;
66019+ struct address_space *mapping = file->f_mapping;
66020+
66021+ get_file(file);
66022+ if (tmp->vm_flags & VM_DENYWRITE)
66023+ atomic_dec(&inode->i_writecount);
66024+ mutex_lock(&mapping->i_mmap_mutex);
66025+ if (tmp->vm_flags & VM_SHARED)
66026+ mapping->i_mmap_writable++;
66027+ flush_dcache_mmap_lock(mapping);
66028+ /* insert tmp into the share list, just after mpnt */
66029+ vma_prio_tree_add(tmp, mpnt);
66030+ flush_dcache_mmap_unlock(mapping);
66031+ mutex_unlock(&mapping->i_mmap_mutex);
66032+ }
66033+
66034+ /*
66035+ * Clear hugetlb-related page reserves for children. This only
66036+ * affects MAP_PRIVATE mappings. Faults generated by the child
66037+ * are not guaranteed to succeed, even if read-only
66038+ */
66039+ if (is_vm_hugetlb_page(tmp))
66040+ reset_vma_resv_huge_pages(tmp);
66041+
66042+ return tmp;
66043+
66044+fail_nomem_anon_vma_fork:
66045+ mpol_put(pol);
66046+fail_nomem_policy:
66047+ kmem_cache_free(vm_area_cachep, tmp);
66048+fail_nomem:
66049+ vm_unacct_memory(charge);
66050+ return NULL;
66051+}
66052+
66053 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66054 {
66055 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
66056 struct rb_node **rb_link, *rb_parent;
66057 int retval;
66058- unsigned long charge;
66059- struct mempolicy *pol;
66060
66061 down_write(&oldmm->mmap_sem);
66062 flush_cache_dup_mm(oldmm);
66063@@ -328,8 +398,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66064 mm->locked_vm = 0;
66065 mm->mmap = NULL;
66066 mm->mmap_cache = NULL;
66067- mm->free_area_cache = oldmm->mmap_base;
66068- mm->cached_hole_size = ~0UL;
66069+ mm->free_area_cache = oldmm->free_area_cache;
66070+ mm->cached_hole_size = oldmm->cached_hole_size;
66071 mm->map_count = 0;
66072 cpumask_clear(mm_cpumask(mm));
66073 mm->mm_rb = RB_ROOT;
66074@@ -345,8 +415,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66075
66076 prev = NULL;
66077 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
66078- struct file *file;
66079-
66080 if (mpnt->vm_flags & VM_DONTCOPY) {
66081 long pages = vma_pages(mpnt);
66082 mm->total_vm -= pages;
66083@@ -354,54 +422,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66084 -pages);
66085 continue;
66086 }
66087- charge = 0;
66088- if (mpnt->vm_flags & VM_ACCOUNT) {
66089- unsigned long len;
66090- len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
66091- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
66092- goto fail_nomem;
66093- charge = len;
66094+ tmp = dup_vma(mm, oldmm, mpnt);
66095+ if (!tmp) {
66096+ retval = -ENOMEM;
66097+ goto out;
66098 }
66099- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
66100- if (!tmp)
66101- goto fail_nomem;
66102- *tmp = *mpnt;
66103- INIT_LIST_HEAD(&tmp->anon_vma_chain);
66104- pol = mpol_dup(vma_policy(mpnt));
66105- retval = PTR_ERR(pol);
66106- if (IS_ERR(pol))
66107- goto fail_nomem_policy;
66108- vma_set_policy(tmp, pol);
66109- tmp->vm_mm = mm;
66110- if (anon_vma_fork(tmp, mpnt))
66111- goto fail_nomem_anon_vma_fork;
66112- tmp->vm_flags &= ~VM_LOCKED;
66113- tmp->vm_next = tmp->vm_prev = NULL;
66114- file = tmp->vm_file;
66115- if (file) {
66116- struct inode *inode = file->f_path.dentry->d_inode;
66117- struct address_space *mapping = file->f_mapping;
66118-
66119- get_file(file);
66120- if (tmp->vm_flags & VM_DENYWRITE)
66121- atomic_dec(&inode->i_writecount);
66122- mutex_lock(&mapping->i_mmap_mutex);
66123- if (tmp->vm_flags & VM_SHARED)
66124- mapping->i_mmap_writable++;
66125- flush_dcache_mmap_lock(mapping);
66126- /* insert tmp into the share list, just after mpnt */
66127- vma_prio_tree_add(tmp, mpnt);
66128- flush_dcache_mmap_unlock(mapping);
66129- mutex_unlock(&mapping->i_mmap_mutex);
66130- }
66131-
66132- /*
66133- * Clear hugetlb-related page reserves for children. This only
66134- * affects MAP_PRIVATE mappings. Faults generated by the child
66135- * are not guaranteed to succeed, even if read-only
66136- */
66137- if (is_vm_hugetlb_page(tmp))
66138- reset_vma_resv_huge_pages(tmp);
66139
66140 /*
66141 * Link in the new vma and copy the page table entries.
66142@@ -424,6 +449,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66143 if (retval)
66144 goto out;
66145 }
66146+
66147+#ifdef CONFIG_PAX_SEGMEXEC
66148+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
66149+ struct vm_area_struct *mpnt_m;
66150+
66151+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
66152+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
66153+
66154+ if (!mpnt->vm_mirror)
66155+ continue;
66156+
66157+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
66158+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
66159+ mpnt->vm_mirror = mpnt_m;
66160+ } else {
66161+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
66162+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
66163+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
66164+ mpnt->vm_mirror->vm_mirror = mpnt;
66165+ }
66166+ }
66167+ BUG_ON(mpnt_m);
66168+ }
66169+#endif
66170+
66171 /* a new mm has just been created */
66172 arch_dup_mmap(oldmm, mm);
66173 retval = 0;
66174@@ -432,14 +482,6 @@ out:
66175 flush_tlb_mm(oldmm);
66176 up_write(&oldmm->mmap_sem);
66177 return retval;
66178-fail_nomem_anon_vma_fork:
66179- mpol_put(pol);
66180-fail_nomem_policy:
66181- kmem_cache_free(vm_area_cachep, tmp);
66182-fail_nomem:
66183- retval = -ENOMEM;
66184- vm_unacct_memory(charge);
66185- goto out;
66186 }
66187
66188 static inline int mm_alloc_pgd(struct mm_struct *mm)
66189@@ -676,8 +718,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
66190 return ERR_PTR(err);
66191
66192 mm = get_task_mm(task);
66193- if (mm && mm != current->mm &&
66194- !ptrace_may_access(task, mode)) {
66195+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
66196+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
66197 mmput(mm);
66198 mm = ERR_PTR(-EACCES);
66199 }
66200@@ -899,13 +941,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
66201 spin_unlock(&fs->lock);
66202 return -EAGAIN;
66203 }
66204- fs->users++;
66205+ atomic_inc(&fs->users);
66206 spin_unlock(&fs->lock);
66207 return 0;
66208 }
66209 tsk->fs = copy_fs_struct(fs);
66210 if (!tsk->fs)
66211 return -ENOMEM;
66212+ gr_set_chroot_entries(tsk, &tsk->fs->root);
66213 return 0;
66214 }
66215
66216@@ -1172,6 +1215,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
66217 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
66218 #endif
66219 retval = -EAGAIN;
66220+
66221+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
66222+
66223 if (atomic_read(&p->real_cred->user->processes) >=
66224 task_rlimit(p, RLIMIT_NPROC)) {
66225 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
66226@@ -1392,6 +1438,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
66227 /* Need tasklist lock for parent etc handling! */
66228 write_lock_irq(&tasklist_lock);
66229
66230+ /* synchronizes with gr_set_acls() */
66231+ gr_copy_label(p);
66232+
66233 /* CLONE_PARENT re-uses the old parent */
66234 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
66235 p->real_parent = current->real_parent;
66236@@ -1502,6 +1551,8 @@ bad_fork_cleanup_count:
66237 bad_fork_free:
66238 free_task(p);
66239 fork_out:
66240+ gr_log_forkfail(retval);
66241+
66242 return ERR_PTR(retval);
66243 }
66244
66245@@ -1602,6 +1653,8 @@ long do_fork(unsigned long clone_flags,
66246 if (clone_flags & CLONE_PARENT_SETTID)
66247 put_user(nr, parent_tidptr);
66248
66249+ gr_handle_brute_check();
66250+
66251 if (clone_flags & CLONE_VFORK) {
66252 p->vfork_done = &vfork;
66253 init_completion(&vfork);
66254@@ -1700,7 +1753,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
66255 return 0;
66256
66257 /* don't need lock here; in the worst case we'll do useless copy */
66258- if (fs->users == 1)
66259+ if (atomic_read(&fs->users) == 1)
66260 return 0;
66261
66262 *new_fsp = copy_fs_struct(fs);
66263@@ -1789,7 +1842,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
66264 fs = current->fs;
66265 spin_lock(&fs->lock);
66266 current->fs = new_fs;
66267- if (--fs->users)
66268+ gr_set_chroot_entries(current, &current->fs->root);
66269+ if (atomic_dec_return(&fs->users))
66270 new_fs = NULL;
66271 else
66272 new_fs = fs;
66273diff --git a/kernel/futex.c b/kernel/futex.c
66274index e2b0fb9..db818ac 100644
66275--- a/kernel/futex.c
66276+++ b/kernel/futex.c
66277@@ -54,6 +54,7 @@
66278 #include <linux/mount.h>
66279 #include <linux/pagemap.h>
66280 #include <linux/syscalls.h>
66281+#include <linux/ptrace.h>
66282 #include <linux/signal.h>
66283 #include <linux/export.h>
66284 #include <linux/magic.h>
66285@@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
66286 struct page *page, *page_head;
66287 int err, ro = 0;
66288
66289+#ifdef CONFIG_PAX_SEGMEXEC
66290+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
66291+ return -EFAULT;
66292+#endif
66293+
66294 /*
66295 * The futex address must be "naturally" aligned.
66296 */
66297@@ -2711,6 +2717,7 @@ static int __init futex_init(void)
66298 {
66299 u32 curval;
66300 int i;
66301+ mm_segment_t oldfs;
66302
66303 /*
66304 * This will fail and we want it. Some arch implementations do
66305@@ -2722,8 +2729,11 @@ static int __init futex_init(void)
66306 * implementation, the non-functional ones will return
66307 * -ENOSYS.
66308 */
66309+ oldfs = get_fs();
66310+ set_fs(USER_DS);
66311 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
66312 futex_cmpxchg_enabled = 1;
66313+ set_fs(oldfs);
66314
66315 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
66316 plist_head_init(&futex_queues[i].chain);
66317diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
66318index 9b22d03..6295b62 100644
66319--- a/kernel/gcov/base.c
66320+++ b/kernel/gcov/base.c
66321@@ -102,11 +102,6 @@ void gcov_enable_events(void)
66322 }
66323
66324 #ifdef CONFIG_MODULES
66325-static inline int within(void *addr, void *start, unsigned long size)
66326-{
66327- return ((addr >= start) && (addr < start + size));
66328-}
66329-
66330 /* Update list and generate events when modules are unloaded. */
66331 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66332 void *data)
66333@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66334 prev = NULL;
66335 /* Remove entries located in module from linked list. */
66336 for (info = gcov_info_head; info; info = info->next) {
66337- if (within(info, mod->module_core, mod->core_size)) {
66338+ if (within_module_core_rw((unsigned long)info, mod)) {
66339 if (prev)
66340 prev->next = info->next;
66341 else
66342diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
66343index 6db7a5e..25b6648 100644
66344--- a/kernel/hrtimer.c
66345+++ b/kernel/hrtimer.c
66346@@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
66347 local_irq_restore(flags);
66348 }
66349
66350-static void run_hrtimer_softirq(struct softirq_action *h)
66351+static void run_hrtimer_softirq(void)
66352 {
66353 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
66354
66355diff --git a/kernel/jump_label.c b/kernel/jump_label.c
66356index 4304919..408c4c0 100644
66357--- a/kernel/jump_label.c
66358+++ b/kernel/jump_label.c
66359@@ -13,6 +13,7 @@
66360 #include <linux/sort.h>
66361 #include <linux/err.h>
66362 #include <linux/static_key.h>
66363+#include <linux/mm.h>
66364
66365 #ifdef HAVE_JUMP_LABEL
66366
66367@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
66368
66369 size = (((unsigned long)stop - (unsigned long)start)
66370 / sizeof(struct jump_entry));
66371+ pax_open_kernel();
66372 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
66373+ pax_close_kernel();
66374 }
66375
66376 static void jump_label_update(struct static_key *key, int enable);
66377@@ -356,10 +359,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
66378 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
66379 struct jump_entry *iter;
66380
66381+ pax_open_kernel();
66382 for (iter = iter_start; iter < iter_stop; iter++) {
66383 if (within_module_init(iter->code, mod))
66384 iter->code = 0;
66385 }
66386+ pax_close_kernel();
66387 }
66388
66389 static int
66390diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
66391index 079f1d3..4e80e69 100644
66392--- a/kernel/kallsyms.c
66393+++ b/kernel/kallsyms.c
66394@@ -11,6 +11,9 @@
66395 * Changed the compression method from stem compression to "table lookup"
66396 * compression (see scripts/kallsyms.c for a more complete description)
66397 */
66398+#ifdef CONFIG_GRKERNSEC_HIDESYM
66399+#define __INCLUDED_BY_HIDESYM 1
66400+#endif
66401 #include <linux/kallsyms.h>
66402 #include <linux/module.h>
66403 #include <linux/init.h>
66404@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
66405
66406 static inline int is_kernel_inittext(unsigned long addr)
66407 {
66408+ if (system_state != SYSTEM_BOOTING)
66409+ return 0;
66410+
66411 if (addr >= (unsigned long)_sinittext
66412 && addr <= (unsigned long)_einittext)
66413 return 1;
66414 return 0;
66415 }
66416
66417+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66418+#ifdef CONFIG_MODULES
66419+static inline int is_module_text(unsigned long addr)
66420+{
66421+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
66422+ return 1;
66423+
66424+ addr = ktla_ktva(addr);
66425+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
66426+}
66427+#else
66428+static inline int is_module_text(unsigned long addr)
66429+{
66430+ return 0;
66431+}
66432+#endif
66433+#endif
66434+
66435 static inline int is_kernel_text(unsigned long addr)
66436 {
66437 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
66438@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
66439
66440 static inline int is_kernel(unsigned long addr)
66441 {
66442+
66443+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66444+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
66445+ return 1;
66446+
66447+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
66448+#else
66449 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
66450+#endif
66451+
66452 return 1;
66453 return in_gate_area_no_mm(addr);
66454 }
66455
66456 static int is_ksym_addr(unsigned long addr)
66457 {
66458+
66459+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66460+ if (is_module_text(addr))
66461+ return 0;
66462+#endif
66463+
66464 if (all_var)
66465 return is_kernel(addr);
66466
66467@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
66468
66469 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
66470 {
66471- iter->name[0] = '\0';
66472 iter->nameoff = get_symbol_offset(new_pos);
66473 iter->pos = new_pos;
66474 }
66475@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
66476 {
66477 struct kallsym_iter *iter = m->private;
66478
66479+#ifdef CONFIG_GRKERNSEC_HIDESYM
66480+ if (current_uid())
66481+ return 0;
66482+#endif
66483+
66484 /* Some debugging symbols have no name. Ignore them. */
66485 if (!iter->name[0])
66486 return 0;
66487@@ -515,11 +558,22 @@ static int s_show(struct seq_file *m, void *p)
66488 */
66489 type = iter->exported ? toupper(iter->type) :
66490 tolower(iter->type);
66491+
66492+#ifdef CONFIG_GRKERNSEC_HIDESYM
66493+ seq_printf(m, "%pP %c %s\t[%s]\n", (void *)iter->value,
66494+ type, iter->name, iter->module_name);
66495+#else
66496 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
66497 type, iter->name, iter->module_name);
66498+#endif
66499 } else
66500+#ifdef CONFIG_GRKERNSEC_HIDESYM
66501+ seq_printf(m, "%pP %c %s\n", (void *)iter->value,
66502+ iter->type, iter->name);
66503+#else
66504 seq_printf(m, "%pK %c %s\n", (void *)iter->value,
66505 iter->type, iter->name);
66506+#endif
66507 return 0;
66508 }
66509
66510@@ -540,7 +594,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
66511 struct kallsym_iter *iter;
66512 int ret;
66513
66514- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
66515+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
66516 if (!iter)
66517 return -ENOMEM;
66518 reset_iter(iter, 0);
66519diff --git a/kernel/kexec.c b/kernel/kexec.c
66520index 4e2e472..cd0c7ae 100644
66521--- a/kernel/kexec.c
66522+++ b/kernel/kexec.c
66523@@ -1046,7 +1046,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
66524 unsigned long flags)
66525 {
66526 struct compat_kexec_segment in;
66527- struct kexec_segment out, __user *ksegments;
66528+ struct kexec_segment out;
66529+ struct kexec_segment __user *ksegments;
66530 unsigned long i, result;
66531
66532 /* Don't allow clients that don't understand the native
66533diff --git a/kernel/kmod.c b/kernel/kmod.c
66534index 05698a7..a4c1e3a 100644
66535--- a/kernel/kmod.c
66536+++ b/kernel/kmod.c
66537@@ -66,7 +66,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
66538 kfree(info->argv);
66539 }
66540
66541-static int call_modprobe(char *module_name, int wait)
66542+static int call_modprobe(char *module_name, char *module_param, int wait)
66543 {
66544 static char *envp[] = {
66545 "HOME=/",
66546@@ -75,7 +75,7 @@ static int call_modprobe(char *module_name, int wait)
66547 NULL
66548 };
66549
66550- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
66551+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
66552 if (!argv)
66553 goto out;
66554
66555@@ -87,7 +87,8 @@ static int call_modprobe(char *module_name, int wait)
66556 argv[1] = "-q";
66557 argv[2] = "--";
66558 argv[3] = module_name; /* check free_modprobe_argv() */
66559- argv[4] = NULL;
66560+ argv[4] = module_param;
66561+ argv[5] = NULL;
66562
66563 return call_usermodehelper_fns(modprobe_path, argv, envp,
66564 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
66565@@ -112,9 +113,8 @@ out:
66566 * If module auto-loading support is disabled then this function
66567 * becomes a no-operation.
66568 */
66569-int __request_module(bool wait, const char *fmt, ...)
66570+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
66571 {
66572- va_list args;
66573 char module_name[MODULE_NAME_LEN];
66574 unsigned int max_modprobes;
66575 int ret;
66576@@ -122,9 +122,7 @@ int __request_module(bool wait, const char *fmt, ...)
66577 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
66578 static int kmod_loop_msg;
66579
66580- va_start(args, fmt);
66581- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
66582- va_end(args);
66583+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
66584 if (ret >= MODULE_NAME_LEN)
66585 return -ENAMETOOLONG;
66586
66587@@ -132,6 +130,20 @@ int __request_module(bool wait, const char *fmt, ...)
66588 if (ret)
66589 return ret;
66590
66591+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66592+ if (!current_uid()) {
66593+ /* hack to workaround consolekit/udisks stupidity */
66594+ read_lock(&tasklist_lock);
66595+ if (!strcmp(current->comm, "mount") &&
66596+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
66597+ read_unlock(&tasklist_lock);
66598+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
66599+ return -EPERM;
66600+ }
66601+ read_unlock(&tasklist_lock);
66602+ }
66603+#endif
66604+
66605 /* If modprobe needs a service that is in a module, we get a recursive
66606 * loop. Limit the number of running kmod threads to max_threads/2 or
66607 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
66608@@ -160,11 +172,52 @@ int __request_module(bool wait, const char *fmt, ...)
66609
66610 trace_module_request(module_name, wait, _RET_IP_);
66611
66612- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
66613+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
66614
66615 atomic_dec(&kmod_concurrent);
66616 return ret;
66617 }
66618+
66619+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
66620+{
66621+ va_list args;
66622+ int ret;
66623+
66624+ va_start(args, fmt);
66625+ ret = ____request_module(wait, module_param, fmt, args);
66626+ va_end(args);
66627+
66628+ return ret;
66629+}
66630+
66631+int __request_module(bool wait, const char *fmt, ...)
66632+{
66633+ va_list args;
66634+ int ret;
66635+
66636+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66637+ if (current_uid()) {
66638+ char module_param[MODULE_NAME_LEN];
66639+
66640+ memset(module_param, 0, sizeof(module_param));
66641+
66642+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
66643+
66644+ va_start(args, fmt);
66645+ ret = ____request_module(wait, module_param, fmt, args);
66646+ va_end(args);
66647+
66648+ return ret;
66649+ }
66650+#endif
66651+
66652+ va_start(args, fmt);
66653+ ret = ____request_module(wait, NULL, fmt, args);
66654+ va_end(args);
66655+
66656+ return ret;
66657+}
66658+
66659 EXPORT_SYMBOL(__request_module);
66660 #endif /* CONFIG_MODULES */
66661
66662@@ -267,7 +320,7 @@ static int wait_for_helper(void *data)
66663 *
66664 * Thus the __user pointer cast is valid here.
66665 */
66666- sys_wait4(pid, (int __user *)&ret, 0, NULL);
66667+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
66668
66669 /*
66670 * If ret is 0, either ____call_usermodehelper failed and the
66671diff --git a/kernel/kprobes.c b/kernel/kprobes.c
66672index c62b854..cb67968 100644
66673--- a/kernel/kprobes.c
66674+++ b/kernel/kprobes.c
66675@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
66676 * kernel image and loaded module images reside. This is required
66677 * so x86_64 can correctly handle the %rip-relative fixups.
66678 */
66679- kip->insns = module_alloc(PAGE_SIZE);
66680+ kip->insns = module_alloc_exec(PAGE_SIZE);
66681 if (!kip->insns) {
66682 kfree(kip);
66683 return NULL;
66684@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
66685 */
66686 if (!list_is_singular(&kip->list)) {
66687 list_del(&kip->list);
66688- module_free(NULL, kip->insns);
66689+ module_free_exec(NULL, kip->insns);
66690 kfree(kip);
66691 }
66692 return 1;
66693@@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
66694 {
66695 int i, err = 0;
66696 unsigned long offset = 0, size = 0;
66697- char *modname, namebuf[128];
66698+ char *modname, namebuf[KSYM_NAME_LEN];
66699 const char *symbol_name;
66700 void *addr;
66701 struct kprobe_blackpoint *kb;
66702@@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
66703 const char *sym = NULL;
66704 unsigned int i = *(loff_t *) v;
66705 unsigned long offset = 0;
66706- char *modname, namebuf[128];
66707+ char *modname, namebuf[KSYM_NAME_LEN];
66708
66709 head = &kprobe_table[i];
66710 preempt_disable();
66711diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
66712index 4e316e1..5501eef 100644
66713--- a/kernel/ksysfs.c
66714+++ b/kernel/ksysfs.c
66715@@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
66716 {
66717 if (count+1 > UEVENT_HELPER_PATH_LEN)
66718 return -ENOENT;
66719+ if (!capable(CAP_SYS_ADMIN))
66720+ return -EPERM;
66721 memcpy(uevent_helper, buf, count);
66722 uevent_helper[count] = '\0';
66723 if (count && uevent_helper[count-1] == '\n')
66724diff --git a/kernel/lockdep.c b/kernel/lockdep.c
66725index ea9ee45..67ebc8f 100644
66726--- a/kernel/lockdep.c
66727+++ b/kernel/lockdep.c
66728@@ -590,6 +590,10 @@ static int static_obj(void *obj)
66729 end = (unsigned long) &_end,
66730 addr = (unsigned long) obj;
66731
66732+#ifdef CONFIG_PAX_KERNEXEC
66733+ start = ktla_ktva(start);
66734+#endif
66735+
66736 /*
66737 * static variable?
66738 */
66739@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
66740 if (!static_obj(lock->key)) {
66741 debug_locks_off();
66742 printk("INFO: trying to register non-static key.\n");
66743+ printk("lock:%pS key:%pS.\n", lock, lock->key);
66744 printk("the code is fine but needs lockdep annotation.\n");
66745 printk("turning off the locking correctness validator.\n");
66746 dump_stack();
66747@@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
66748 if (!class)
66749 return 0;
66750 }
66751- atomic_inc((atomic_t *)&class->ops);
66752+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
66753 if (very_verbose(class)) {
66754 printk("\nacquire class [%p] %s", class->key, class->name);
66755 if (class->name_version > 1)
66756diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
66757index 91c32a0..b2c71c5 100644
66758--- a/kernel/lockdep_proc.c
66759+++ b/kernel/lockdep_proc.c
66760@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
66761
66762 static void print_name(struct seq_file *m, struct lock_class *class)
66763 {
66764- char str[128];
66765+ char str[KSYM_NAME_LEN];
66766 const char *name = class->name;
66767
66768 if (!name) {
66769diff --git a/kernel/module.c b/kernel/module.c
66770index 78ac6ec..e87db0e 100644
66771--- a/kernel/module.c
66772+++ b/kernel/module.c
66773@@ -58,6 +58,7 @@
66774 #include <linux/jump_label.h>
66775 #include <linux/pfn.h>
66776 #include <linux/bsearch.h>
66777+#include <linux/grsecurity.h>
66778
66779 #define CREATE_TRACE_POINTS
66780 #include <trace/events/module.h>
66781@@ -114,7 +115,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
66782
66783 /* Bounds of module allocation, for speeding __module_address.
66784 * Protected by module_mutex. */
66785-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
66786+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
66787+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
66788
66789 int register_module_notifier(struct notifier_block * nb)
66790 {
66791@@ -278,7 +280,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66792 return true;
66793
66794 list_for_each_entry_rcu(mod, &modules, list) {
66795- struct symsearch arr[] = {
66796+ struct symsearch modarr[] = {
66797 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
66798 NOT_GPL_ONLY, false },
66799 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
66800@@ -300,7 +302,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66801 #endif
66802 };
66803
66804- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
66805+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
66806 return true;
66807 }
66808 return false;
66809@@ -432,7 +434,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
66810 static int percpu_modalloc(struct module *mod,
66811 unsigned long size, unsigned long align)
66812 {
66813- if (align > PAGE_SIZE) {
66814+ if (align-1 >= PAGE_SIZE) {
66815 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
66816 mod->name, align, PAGE_SIZE);
66817 align = PAGE_SIZE;
66818@@ -1032,7 +1034,7 @@ struct module_attribute module_uevent =
66819 static ssize_t show_coresize(struct module_attribute *mattr,
66820 struct module_kobject *mk, char *buffer)
66821 {
66822- return sprintf(buffer, "%u\n", mk->mod->core_size);
66823+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
66824 }
66825
66826 static struct module_attribute modinfo_coresize =
66827@@ -1041,7 +1043,7 @@ static struct module_attribute modinfo_coresize =
66828 static ssize_t show_initsize(struct module_attribute *mattr,
66829 struct module_kobject *mk, char *buffer)
66830 {
66831- return sprintf(buffer, "%u\n", mk->mod->init_size);
66832+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
66833 }
66834
66835 static struct module_attribute modinfo_initsize =
66836@@ -1255,7 +1257,7 @@ resolve_symbol_wait(struct module *mod,
66837 */
66838 #ifdef CONFIG_SYSFS
66839
66840-#ifdef CONFIG_KALLSYMS
66841+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66842 static inline bool sect_empty(const Elf_Shdr *sect)
66843 {
66844 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
66845@@ -1721,21 +1723,21 @@ static void set_section_ro_nx(void *base,
66846
66847 static void unset_module_core_ro_nx(struct module *mod)
66848 {
66849- set_page_attributes(mod->module_core + mod->core_text_size,
66850- mod->module_core + mod->core_size,
66851+ set_page_attributes(mod->module_core_rw,
66852+ mod->module_core_rw + mod->core_size_rw,
66853 set_memory_x);
66854- set_page_attributes(mod->module_core,
66855- mod->module_core + mod->core_ro_size,
66856+ set_page_attributes(mod->module_core_rx,
66857+ mod->module_core_rx + mod->core_size_rx,
66858 set_memory_rw);
66859 }
66860
66861 static void unset_module_init_ro_nx(struct module *mod)
66862 {
66863- set_page_attributes(mod->module_init + mod->init_text_size,
66864- mod->module_init + mod->init_size,
66865+ set_page_attributes(mod->module_init_rw,
66866+ mod->module_init_rw + mod->init_size_rw,
66867 set_memory_x);
66868- set_page_attributes(mod->module_init,
66869- mod->module_init + mod->init_ro_size,
66870+ set_page_attributes(mod->module_init_rx,
66871+ mod->module_init_rx + mod->init_size_rx,
66872 set_memory_rw);
66873 }
66874
66875@@ -1746,14 +1748,14 @@ void set_all_modules_text_rw(void)
66876
66877 mutex_lock(&module_mutex);
66878 list_for_each_entry_rcu(mod, &modules, list) {
66879- if ((mod->module_core) && (mod->core_text_size)) {
66880- set_page_attributes(mod->module_core,
66881- mod->module_core + mod->core_text_size,
66882+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
66883+ set_page_attributes(mod->module_core_rx,
66884+ mod->module_core_rx + mod->core_size_rx,
66885 set_memory_rw);
66886 }
66887- if ((mod->module_init) && (mod->init_text_size)) {
66888- set_page_attributes(mod->module_init,
66889- mod->module_init + mod->init_text_size,
66890+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
66891+ set_page_attributes(mod->module_init_rx,
66892+ mod->module_init_rx + mod->init_size_rx,
66893 set_memory_rw);
66894 }
66895 }
66896@@ -1767,14 +1769,14 @@ void set_all_modules_text_ro(void)
66897
66898 mutex_lock(&module_mutex);
66899 list_for_each_entry_rcu(mod, &modules, list) {
66900- if ((mod->module_core) && (mod->core_text_size)) {
66901- set_page_attributes(mod->module_core,
66902- mod->module_core + mod->core_text_size,
66903+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
66904+ set_page_attributes(mod->module_core_rx,
66905+ mod->module_core_rx + mod->core_size_rx,
66906 set_memory_ro);
66907 }
66908- if ((mod->module_init) && (mod->init_text_size)) {
66909- set_page_attributes(mod->module_init,
66910- mod->module_init + mod->init_text_size,
66911+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
66912+ set_page_attributes(mod->module_init_rx,
66913+ mod->module_init_rx + mod->init_size_rx,
66914 set_memory_ro);
66915 }
66916 }
66917@@ -1820,16 +1822,19 @@ static void free_module(struct module *mod)
66918
66919 /* This may be NULL, but that's OK */
66920 unset_module_init_ro_nx(mod);
66921- module_free(mod, mod->module_init);
66922+ module_free(mod, mod->module_init_rw);
66923+ module_free_exec(mod, mod->module_init_rx);
66924 kfree(mod->args);
66925 percpu_modfree(mod);
66926
66927 /* Free lock-classes: */
66928- lockdep_free_key_range(mod->module_core, mod->core_size);
66929+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
66930+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
66931
66932 /* Finally, free the core (containing the module structure) */
66933 unset_module_core_ro_nx(mod);
66934- module_free(mod, mod->module_core);
66935+ module_free_exec(mod, mod->module_core_rx);
66936+ module_free(mod, mod->module_core_rw);
66937
66938 #ifdef CONFIG_MPU
66939 update_protections(current->mm);
66940@@ -1899,9 +1904,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66941 int ret = 0;
66942 const struct kernel_symbol *ksym;
66943
66944+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66945+ int is_fs_load = 0;
66946+ int register_filesystem_found = 0;
66947+ char *p;
66948+
66949+ p = strstr(mod->args, "grsec_modharden_fs");
66950+ if (p) {
66951+ char *endptr = p + strlen("grsec_modharden_fs");
66952+ /* copy \0 as well */
66953+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
66954+ is_fs_load = 1;
66955+ }
66956+#endif
66957+
66958 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
66959 const char *name = info->strtab + sym[i].st_name;
66960
66961+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66962+ /* it's a real shame this will never get ripped and copied
66963+ upstream! ;(
66964+ */
66965+ if (is_fs_load && !strcmp(name, "register_filesystem"))
66966+ register_filesystem_found = 1;
66967+#endif
66968+
66969 switch (sym[i].st_shndx) {
66970 case SHN_COMMON:
66971 /* We compiled with -fno-common. These are not
66972@@ -1922,7 +1949,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66973 ksym = resolve_symbol_wait(mod, info, name);
66974 /* Ok if resolved. */
66975 if (ksym && !IS_ERR(ksym)) {
66976+ pax_open_kernel();
66977 sym[i].st_value = ksym->value;
66978+ pax_close_kernel();
66979 break;
66980 }
66981
66982@@ -1941,11 +1970,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66983 secbase = (unsigned long)mod_percpu(mod);
66984 else
66985 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
66986+ pax_open_kernel();
66987 sym[i].st_value += secbase;
66988+ pax_close_kernel();
66989 break;
66990 }
66991 }
66992
66993+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66994+ if (is_fs_load && !register_filesystem_found) {
66995+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
66996+ ret = -EPERM;
66997+ }
66998+#endif
66999+
67000 return ret;
67001 }
67002
67003@@ -2049,22 +2087,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
67004 || s->sh_entsize != ~0UL
67005 || strstarts(sname, ".init"))
67006 continue;
67007- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
67008+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
67009+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
67010+ else
67011+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
67012 pr_debug("\t%s\n", sname);
67013 }
67014- switch (m) {
67015- case 0: /* executable */
67016- mod->core_size = debug_align(mod->core_size);
67017- mod->core_text_size = mod->core_size;
67018- break;
67019- case 1: /* RO: text and ro-data */
67020- mod->core_size = debug_align(mod->core_size);
67021- mod->core_ro_size = mod->core_size;
67022- break;
67023- case 3: /* whole core */
67024- mod->core_size = debug_align(mod->core_size);
67025- break;
67026- }
67027 }
67028
67029 pr_debug("Init section allocation order:\n");
67030@@ -2078,23 +2106,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
67031 || s->sh_entsize != ~0UL
67032 || !strstarts(sname, ".init"))
67033 continue;
67034- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
67035- | INIT_OFFSET_MASK);
67036+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
67037+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
67038+ else
67039+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
67040+ s->sh_entsize |= INIT_OFFSET_MASK;
67041 pr_debug("\t%s\n", sname);
67042 }
67043- switch (m) {
67044- case 0: /* executable */
67045- mod->init_size = debug_align(mod->init_size);
67046- mod->init_text_size = mod->init_size;
67047- break;
67048- case 1: /* RO: text and ro-data */
67049- mod->init_size = debug_align(mod->init_size);
67050- mod->init_ro_size = mod->init_size;
67051- break;
67052- case 3: /* whole init */
67053- mod->init_size = debug_align(mod->init_size);
67054- break;
67055- }
67056 }
67057 }
67058
67059@@ -2266,7 +2284,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
67060
67061 /* Put symbol section at end of init part of module. */
67062 symsect->sh_flags |= SHF_ALLOC;
67063- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
67064+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
67065 info->index.sym) | INIT_OFFSET_MASK;
67066 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
67067
67068@@ -2281,13 +2299,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
67069 }
67070
67071 /* Append room for core symbols at end of core part. */
67072- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
67073- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
67074- mod->core_size += strtab_size;
67075+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
67076+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
67077+ mod->core_size_rx += strtab_size;
67078
67079 /* Put string table section at end of init part of module. */
67080 strsect->sh_flags |= SHF_ALLOC;
67081- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
67082+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
67083 info->index.str) | INIT_OFFSET_MASK;
67084 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
67085 }
67086@@ -2305,12 +2323,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
67087 /* Make sure we get permanent strtab: don't use info->strtab. */
67088 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
67089
67090+ pax_open_kernel();
67091+
67092 /* Set types up while we still have access to sections. */
67093 for (i = 0; i < mod->num_symtab; i++)
67094 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
67095
67096- mod->core_symtab = dst = mod->module_core + info->symoffs;
67097- mod->core_strtab = s = mod->module_core + info->stroffs;
67098+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
67099+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
67100 src = mod->symtab;
67101 *dst = *src;
67102 *s++ = 0;
67103@@ -2323,6 +2343,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
67104 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
67105 }
67106 mod->core_num_syms = ndst;
67107+
67108+ pax_close_kernel();
67109 }
67110 #else
67111 static inline void layout_symtab(struct module *mod, struct load_info *info)
67112@@ -2356,17 +2378,33 @@ void * __weak module_alloc(unsigned long size)
67113 return size == 0 ? NULL : vmalloc_exec(size);
67114 }
67115
67116-static void *module_alloc_update_bounds(unsigned long size)
67117+static void *module_alloc_update_bounds_rw(unsigned long size)
67118 {
67119 void *ret = module_alloc(size);
67120
67121 if (ret) {
67122 mutex_lock(&module_mutex);
67123 /* Update module bounds. */
67124- if ((unsigned long)ret < module_addr_min)
67125- module_addr_min = (unsigned long)ret;
67126- if ((unsigned long)ret + size > module_addr_max)
67127- module_addr_max = (unsigned long)ret + size;
67128+ if ((unsigned long)ret < module_addr_min_rw)
67129+ module_addr_min_rw = (unsigned long)ret;
67130+ if ((unsigned long)ret + size > module_addr_max_rw)
67131+ module_addr_max_rw = (unsigned long)ret + size;
67132+ mutex_unlock(&module_mutex);
67133+ }
67134+ return ret;
67135+}
67136+
67137+static void *module_alloc_update_bounds_rx(unsigned long size)
67138+{
67139+ void *ret = module_alloc_exec(size);
67140+
67141+ if (ret) {
67142+ mutex_lock(&module_mutex);
67143+ /* Update module bounds. */
67144+ if ((unsigned long)ret < module_addr_min_rx)
67145+ module_addr_min_rx = (unsigned long)ret;
67146+ if ((unsigned long)ret + size > module_addr_max_rx)
67147+ module_addr_max_rx = (unsigned long)ret + size;
67148 mutex_unlock(&module_mutex);
67149 }
67150 return ret;
67151@@ -2543,8 +2581,14 @@ static struct module *setup_load_info(struct load_info *info)
67152 static int check_modinfo(struct module *mod, struct load_info *info)
67153 {
67154 const char *modmagic = get_modinfo(info, "vermagic");
67155+ const char *license = get_modinfo(info, "license");
67156 int err;
67157
67158+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
67159+ if (!license || !license_is_gpl_compatible(license))
67160+ return -ENOEXEC;
67161+#endif
67162+
67163 /* This is allowed: modprobe --force will invalidate it. */
67164 if (!modmagic) {
67165 err = try_to_force_load(mod, "bad vermagic");
67166@@ -2567,7 +2611,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
67167 }
67168
67169 /* Set up license info based on the info section */
67170- set_license(mod, get_modinfo(info, "license"));
67171+ set_license(mod, license);
67172
67173 return 0;
67174 }
67175@@ -2661,7 +2705,7 @@ static int move_module(struct module *mod, struct load_info *info)
67176 void *ptr;
67177
67178 /* Do the allocs. */
67179- ptr = module_alloc_update_bounds(mod->core_size);
67180+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
67181 /*
67182 * The pointer to this block is stored in the module structure
67183 * which is inside the block. Just mark it as not being a
67184@@ -2671,23 +2715,50 @@ static int move_module(struct module *mod, struct load_info *info)
67185 if (!ptr)
67186 return -ENOMEM;
67187
67188- memset(ptr, 0, mod->core_size);
67189- mod->module_core = ptr;
67190+ memset(ptr, 0, mod->core_size_rw);
67191+ mod->module_core_rw = ptr;
67192
67193- ptr = module_alloc_update_bounds(mod->init_size);
67194+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
67195 /*
67196 * The pointer to this block is stored in the module structure
67197 * which is inside the block. This block doesn't need to be
67198 * scanned as it contains data and code that will be freed
67199 * after the module is initialized.
67200 */
67201- kmemleak_ignore(ptr);
67202- if (!ptr && mod->init_size) {
67203- module_free(mod, mod->module_core);
67204+ kmemleak_not_leak(ptr);
67205+ if (!ptr && mod->init_size_rw) {
67206+ module_free(mod, mod->module_core_rw);
67207 return -ENOMEM;
67208 }
67209- memset(ptr, 0, mod->init_size);
67210- mod->module_init = ptr;
67211+ memset(ptr, 0, mod->init_size_rw);
67212+ mod->module_init_rw = ptr;
67213+
67214+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
67215+ kmemleak_not_leak(ptr);
67216+ if (!ptr) {
67217+ module_free(mod, mod->module_init_rw);
67218+ module_free(mod, mod->module_core_rw);
67219+ return -ENOMEM;
67220+ }
67221+
67222+ pax_open_kernel();
67223+ memset(ptr, 0, mod->core_size_rx);
67224+ pax_close_kernel();
67225+ mod->module_core_rx = ptr;
67226+
67227+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
67228+ kmemleak_not_leak(ptr);
67229+ if (!ptr && mod->init_size_rx) {
67230+ module_free_exec(mod, mod->module_core_rx);
67231+ module_free(mod, mod->module_init_rw);
67232+ module_free(mod, mod->module_core_rw);
67233+ return -ENOMEM;
67234+ }
67235+
67236+ pax_open_kernel();
67237+ memset(ptr, 0, mod->init_size_rx);
67238+ pax_close_kernel();
67239+ mod->module_init_rx = ptr;
67240
67241 /* Transfer each section which specifies SHF_ALLOC */
67242 pr_debug("final section addresses:\n");
67243@@ -2698,16 +2769,45 @@ static int move_module(struct module *mod, struct load_info *info)
67244 if (!(shdr->sh_flags & SHF_ALLOC))
67245 continue;
67246
67247- if (shdr->sh_entsize & INIT_OFFSET_MASK)
67248- dest = mod->module_init
67249- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67250- else
67251- dest = mod->module_core + shdr->sh_entsize;
67252+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
67253+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
67254+ dest = mod->module_init_rw
67255+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67256+ else
67257+ dest = mod->module_init_rx
67258+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67259+ } else {
67260+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
67261+ dest = mod->module_core_rw + shdr->sh_entsize;
67262+ else
67263+ dest = mod->module_core_rx + shdr->sh_entsize;
67264+ }
67265+
67266+ if (shdr->sh_type != SHT_NOBITS) {
67267+
67268+#ifdef CONFIG_PAX_KERNEXEC
67269+#ifdef CONFIG_X86_64
67270+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
67271+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
67272+#endif
67273+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
67274+ pax_open_kernel();
67275+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
67276+ pax_close_kernel();
67277+ } else
67278+#endif
67279
67280- if (shdr->sh_type != SHT_NOBITS)
67281 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
67282+ }
67283 /* Update sh_addr to point to copy in image. */
67284- shdr->sh_addr = (unsigned long)dest;
67285+
67286+#ifdef CONFIG_PAX_KERNEXEC
67287+ if (shdr->sh_flags & SHF_EXECINSTR)
67288+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
67289+ else
67290+#endif
67291+
67292+ shdr->sh_addr = (unsigned long)dest;
67293 pr_debug("\t0x%lx %s\n",
67294 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
67295 }
67296@@ -2758,12 +2858,12 @@ static void flush_module_icache(const struct module *mod)
67297 * Do it before processing of module parameters, so the module
67298 * can provide parameter accessor functions of its own.
67299 */
67300- if (mod->module_init)
67301- flush_icache_range((unsigned long)mod->module_init,
67302- (unsigned long)mod->module_init
67303- + mod->init_size);
67304- flush_icache_range((unsigned long)mod->module_core,
67305- (unsigned long)mod->module_core + mod->core_size);
67306+ if (mod->module_init_rx)
67307+ flush_icache_range((unsigned long)mod->module_init_rx,
67308+ (unsigned long)mod->module_init_rx
67309+ + mod->init_size_rx);
67310+ flush_icache_range((unsigned long)mod->module_core_rx,
67311+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
67312
67313 set_fs(old_fs);
67314 }
67315@@ -2833,8 +2933,10 @@ out:
67316 static void module_deallocate(struct module *mod, struct load_info *info)
67317 {
67318 percpu_modfree(mod);
67319- module_free(mod, mod->module_init);
67320- module_free(mod, mod->module_core);
67321+ module_free_exec(mod, mod->module_init_rx);
67322+ module_free_exec(mod, mod->module_core_rx);
67323+ module_free(mod, mod->module_init_rw);
67324+ module_free(mod, mod->module_core_rw);
67325 }
67326
67327 int __weak module_finalize(const Elf_Ehdr *hdr,
67328@@ -2898,9 +3000,38 @@ static struct module *load_module(void __user *umod,
67329 if (err)
67330 goto free_unload;
67331
67332+ /* Now copy in args */
67333+ mod->args = strndup_user(uargs, ~0UL >> 1);
67334+ if (IS_ERR(mod->args)) {
67335+ err = PTR_ERR(mod->args);
67336+ goto free_unload;
67337+ }
67338+
67339 /* Set up MODINFO_ATTR fields */
67340 setup_modinfo(mod, &info);
67341
67342+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67343+ {
67344+ char *p, *p2;
67345+
67346+ if (strstr(mod->args, "grsec_modharden_netdev")) {
67347+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
67348+ err = -EPERM;
67349+ goto free_modinfo;
67350+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
67351+ p += strlen("grsec_modharden_normal");
67352+ p2 = strstr(p, "_");
67353+ if (p2) {
67354+ *p2 = '\0';
67355+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
67356+ *p2 = '_';
67357+ }
67358+ err = -EPERM;
67359+ goto free_modinfo;
67360+ }
67361+ }
67362+#endif
67363+
67364 /* Fix up syms, so that st_value is a pointer to location. */
67365 err = simplify_symbols(mod, &info);
67366 if (err < 0)
67367@@ -2916,13 +3047,6 @@ static struct module *load_module(void __user *umod,
67368
67369 flush_module_icache(mod);
67370
67371- /* Now copy in args */
67372- mod->args = strndup_user(uargs, ~0UL >> 1);
67373- if (IS_ERR(mod->args)) {
67374- err = PTR_ERR(mod->args);
67375- goto free_arch_cleanup;
67376- }
67377-
67378 /* Mark state as coming so strong_try_module_get() ignores us. */
67379 mod->state = MODULE_STATE_COMING;
67380
67381@@ -2980,11 +3104,10 @@ static struct module *load_module(void __user *umod,
67382 unlock:
67383 mutex_unlock(&module_mutex);
67384 synchronize_sched();
67385- kfree(mod->args);
67386- free_arch_cleanup:
67387 module_arch_cleanup(mod);
67388 free_modinfo:
67389 free_modinfo(mod);
67390+ kfree(mod->args);
67391 free_unload:
67392 module_unload_free(mod);
67393 free_module:
67394@@ -3025,16 +3148,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
67395 MODULE_STATE_COMING, mod);
67396
67397 /* Set RO and NX regions for core */
67398- set_section_ro_nx(mod->module_core,
67399- mod->core_text_size,
67400- mod->core_ro_size,
67401- mod->core_size);
67402+ set_section_ro_nx(mod->module_core_rx,
67403+ mod->core_size_rx,
67404+ mod->core_size_rx,
67405+ mod->core_size_rx);
67406
67407 /* Set RO and NX regions for init */
67408- set_section_ro_nx(mod->module_init,
67409- mod->init_text_size,
67410- mod->init_ro_size,
67411- mod->init_size);
67412+ set_section_ro_nx(mod->module_init_rx,
67413+ mod->init_size_rx,
67414+ mod->init_size_rx,
67415+ mod->init_size_rx);
67416
67417 do_mod_ctors(mod);
67418 /* Start the module */
67419@@ -3080,11 +3203,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
67420 mod->strtab = mod->core_strtab;
67421 #endif
67422 unset_module_init_ro_nx(mod);
67423- module_free(mod, mod->module_init);
67424- mod->module_init = NULL;
67425- mod->init_size = 0;
67426- mod->init_ro_size = 0;
67427- mod->init_text_size = 0;
67428+ module_free(mod, mod->module_init_rw);
67429+ module_free_exec(mod, mod->module_init_rx);
67430+ mod->module_init_rw = NULL;
67431+ mod->module_init_rx = NULL;
67432+ mod->init_size_rw = 0;
67433+ mod->init_size_rx = 0;
67434 mutex_unlock(&module_mutex);
67435
67436 return 0;
67437@@ -3115,10 +3239,16 @@ static const char *get_ksymbol(struct module *mod,
67438 unsigned long nextval;
67439
67440 /* At worse, next value is at end of module */
67441- if (within_module_init(addr, mod))
67442- nextval = (unsigned long)mod->module_init+mod->init_text_size;
67443+ if (within_module_init_rx(addr, mod))
67444+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
67445+ else if (within_module_init_rw(addr, mod))
67446+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
67447+ else if (within_module_core_rx(addr, mod))
67448+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
67449+ else if (within_module_core_rw(addr, mod))
67450+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
67451 else
67452- nextval = (unsigned long)mod->module_core+mod->core_text_size;
67453+ return NULL;
67454
67455 /* Scan for closest preceding symbol, and next symbol. (ELF
67456 starts real symbols at 1). */
67457@@ -3353,7 +3483,7 @@ static int m_show(struct seq_file *m, void *p)
67458 char buf[8];
67459
67460 seq_printf(m, "%s %u",
67461- mod->name, mod->init_size + mod->core_size);
67462+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
67463 print_unload_info(m, mod);
67464
67465 /* Informative for users. */
67466@@ -3362,7 +3492,7 @@ static int m_show(struct seq_file *m, void *p)
67467 mod->state == MODULE_STATE_COMING ? "Loading":
67468 "Live");
67469 /* Used by oprofile and other similar tools. */
67470- seq_printf(m, " 0x%pK", mod->module_core);
67471+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
67472
67473 /* Taints info */
67474 if (mod->taints)
67475@@ -3398,7 +3528,17 @@ static const struct file_operations proc_modules_operations = {
67476
67477 static int __init proc_modules_init(void)
67478 {
67479+#ifndef CONFIG_GRKERNSEC_HIDESYM
67480+#ifdef CONFIG_GRKERNSEC_PROC_USER
67481+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67482+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67483+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
67484+#else
67485 proc_create("modules", 0, NULL, &proc_modules_operations);
67486+#endif
67487+#else
67488+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67489+#endif
67490 return 0;
67491 }
67492 module_init(proc_modules_init);
67493@@ -3457,12 +3597,12 @@ struct module *__module_address(unsigned long addr)
67494 {
67495 struct module *mod;
67496
67497- if (addr < module_addr_min || addr > module_addr_max)
67498+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
67499+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
67500 return NULL;
67501
67502 list_for_each_entry_rcu(mod, &modules, list)
67503- if (within_module_core(addr, mod)
67504- || within_module_init(addr, mod))
67505+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
67506 return mod;
67507 return NULL;
67508 }
67509@@ -3496,11 +3636,20 @@ bool is_module_text_address(unsigned long addr)
67510 */
67511 struct module *__module_text_address(unsigned long addr)
67512 {
67513- struct module *mod = __module_address(addr);
67514+ struct module *mod;
67515+
67516+#ifdef CONFIG_X86_32
67517+ addr = ktla_ktva(addr);
67518+#endif
67519+
67520+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
67521+ return NULL;
67522+
67523+ mod = __module_address(addr);
67524+
67525 if (mod) {
67526 /* Make sure it's within the text section. */
67527- if (!within(addr, mod->module_init, mod->init_text_size)
67528- && !within(addr, mod->module_core, mod->core_text_size))
67529+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
67530 mod = NULL;
67531 }
67532 return mod;
67533diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
67534index 7e3443f..b2a1e6b 100644
67535--- a/kernel/mutex-debug.c
67536+++ b/kernel/mutex-debug.c
67537@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
67538 }
67539
67540 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67541- struct thread_info *ti)
67542+ struct task_struct *task)
67543 {
67544 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
67545
67546 /* Mark the current thread as blocked on the lock: */
67547- ti->task->blocked_on = waiter;
67548+ task->blocked_on = waiter;
67549 }
67550
67551 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67552- struct thread_info *ti)
67553+ struct task_struct *task)
67554 {
67555 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
67556- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
67557- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
67558- ti->task->blocked_on = NULL;
67559+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
67560+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
67561+ task->blocked_on = NULL;
67562
67563 list_del_init(&waiter->list);
67564 waiter->task = NULL;
67565diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
67566index 0799fd3..d06ae3b 100644
67567--- a/kernel/mutex-debug.h
67568+++ b/kernel/mutex-debug.h
67569@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
67570 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
67571 extern void debug_mutex_add_waiter(struct mutex *lock,
67572 struct mutex_waiter *waiter,
67573- struct thread_info *ti);
67574+ struct task_struct *task);
67575 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67576- struct thread_info *ti);
67577+ struct task_struct *task);
67578 extern void debug_mutex_unlock(struct mutex *lock);
67579 extern void debug_mutex_init(struct mutex *lock, const char *name,
67580 struct lock_class_key *key);
67581diff --git a/kernel/mutex.c b/kernel/mutex.c
67582index a307cc9..27fd2e9 100644
67583--- a/kernel/mutex.c
67584+++ b/kernel/mutex.c
67585@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67586 spin_lock_mutex(&lock->wait_lock, flags);
67587
67588 debug_mutex_lock_common(lock, &waiter);
67589- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
67590+ debug_mutex_add_waiter(lock, &waiter, task);
67591
67592 /* add waiting tasks to the end of the waitqueue (FIFO): */
67593 list_add_tail(&waiter.list, &lock->wait_list);
67594@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67595 * TASK_UNINTERRUPTIBLE case.)
67596 */
67597 if (unlikely(signal_pending_state(state, task))) {
67598- mutex_remove_waiter(lock, &waiter,
67599- task_thread_info(task));
67600+ mutex_remove_waiter(lock, &waiter, task);
67601 mutex_release(&lock->dep_map, 1, ip);
67602 spin_unlock_mutex(&lock->wait_lock, flags);
67603
67604@@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67605 done:
67606 lock_acquired(&lock->dep_map, ip);
67607 /* got the lock - rejoice! */
67608- mutex_remove_waiter(lock, &waiter, current_thread_info());
67609+ mutex_remove_waiter(lock, &waiter, task);
67610 mutex_set_owner(lock);
67611
67612 /* set it to 0 if there are no waiters left: */
67613diff --git a/kernel/panic.c b/kernel/panic.c
67614index 9ed023b..e49543e 100644
67615--- a/kernel/panic.c
67616+++ b/kernel/panic.c
67617@@ -402,7 +402,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
67618 const char *board;
67619
67620 printk(KERN_WARNING "------------[ cut here ]------------\n");
67621- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
67622+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
67623 board = dmi_get_system_info(DMI_PRODUCT_NAME);
67624 if (board)
67625 printk(KERN_WARNING "Hardware name: %s\n", board);
67626@@ -457,7 +457,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
67627 */
67628 void __stack_chk_fail(void)
67629 {
67630- panic("stack-protector: Kernel stack is corrupted in: %p\n",
67631+ dump_stack();
67632+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
67633 __builtin_return_address(0));
67634 }
67635 EXPORT_SYMBOL(__stack_chk_fail);
67636diff --git a/kernel/pid.c b/kernel/pid.c
67637index 9f08dfa..6765c40 100644
67638--- a/kernel/pid.c
67639+++ b/kernel/pid.c
67640@@ -33,6 +33,7 @@
67641 #include <linux/rculist.h>
67642 #include <linux/bootmem.h>
67643 #include <linux/hash.h>
67644+#include <linux/security.h>
67645 #include <linux/pid_namespace.h>
67646 #include <linux/init_task.h>
67647 #include <linux/syscalls.h>
67648@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
67649
67650 int pid_max = PID_MAX_DEFAULT;
67651
67652-#define RESERVED_PIDS 300
67653+#define RESERVED_PIDS 500
67654
67655 int pid_max_min = RESERVED_PIDS + 1;
67656 int pid_max_max = PID_MAX_LIMIT;
67657@@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
67658 */
67659 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
67660 {
67661+ struct task_struct *task;
67662+
67663 rcu_lockdep_assert(rcu_read_lock_held(),
67664 "find_task_by_pid_ns() needs rcu_read_lock()"
67665 " protection");
67666- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67667+
67668+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67669+
67670+ if (gr_pid_is_chrooted(task))
67671+ return NULL;
67672+
67673+ return task;
67674 }
67675
67676 struct task_struct *find_task_by_vpid(pid_t vnr)
67677@@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
67678 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
67679 }
67680
67681+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
67682+{
67683+ rcu_lockdep_assert(rcu_read_lock_held(),
67684+ "find_task_by_pid_ns() needs rcu_read_lock()"
67685+ " protection");
67686+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
67687+}
67688+
67689 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
67690 {
67691 struct pid *pid;
67692diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
67693index 125cb67..a4d1c30 100644
67694--- a/kernel/posix-cpu-timers.c
67695+++ b/kernel/posix-cpu-timers.c
67696@@ -6,6 +6,7 @@
67697 #include <linux/posix-timers.h>
67698 #include <linux/errno.h>
67699 #include <linux/math64.h>
67700+#include <linux/security.h>
67701 #include <asm/uaccess.h>
67702 #include <linux/kernel_stat.h>
67703 #include <trace/events/timer.h>
67704@@ -1578,14 +1579,14 @@ struct k_clock clock_posix_cpu = {
67705
67706 static __init int init_posix_cpu_timers(void)
67707 {
67708- struct k_clock process = {
67709+ static struct k_clock process = {
67710 .clock_getres = process_cpu_clock_getres,
67711 .clock_get = process_cpu_clock_get,
67712 .timer_create = process_cpu_timer_create,
67713 .nsleep = process_cpu_nsleep,
67714 .nsleep_restart = process_cpu_nsleep_restart,
67715 };
67716- struct k_clock thread = {
67717+ static struct k_clock thread = {
67718 .clock_getres = thread_cpu_clock_getres,
67719 .clock_get = thread_cpu_clock_get,
67720 .timer_create = thread_cpu_timer_create,
67721diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
67722index 69185ae..cc2847a 100644
67723--- a/kernel/posix-timers.c
67724+++ b/kernel/posix-timers.c
67725@@ -43,6 +43,7 @@
67726 #include <linux/idr.h>
67727 #include <linux/posix-clock.h>
67728 #include <linux/posix-timers.h>
67729+#include <linux/grsecurity.h>
67730 #include <linux/syscalls.h>
67731 #include <linux/wait.h>
67732 #include <linux/workqueue.h>
67733@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
67734 * which we beg off on and pass to do_sys_settimeofday().
67735 */
67736
67737-static struct k_clock posix_clocks[MAX_CLOCKS];
67738+static struct k_clock *posix_clocks[MAX_CLOCKS];
67739
67740 /*
67741 * These ones are defined below.
67742@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
67743 */
67744 static __init int init_posix_timers(void)
67745 {
67746- struct k_clock clock_realtime = {
67747+ static struct k_clock clock_realtime = {
67748 .clock_getres = hrtimer_get_res,
67749 .clock_get = posix_clock_realtime_get,
67750 .clock_set = posix_clock_realtime_set,
67751@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
67752 .timer_get = common_timer_get,
67753 .timer_del = common_timer_del,
67754 };
67755- struct k_clock clock_monotonic = {
67756+ static struct k_clock clock_monotonic = {
67757 .clock_getres = hrtimer_get_res,
67758 .clock_get = posix_ktime_get_ts,
67759 .nsleep = common_nsleep,
67760@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
67761 .timer_get = common_timer_get,
67762 .timer_del = common_timer_del,
67763 };
67764- struct k_clock clock_monotonic_raw = {
67765+ static struct k_clock clock_monotonic_raw = {
67766 .clock_getres = hrtimer_get_res,
67767 .clock_get = posix_get_monotonic_raw,
67768 };
67769- struct k_clock clock_realtime_coarse = {
67770+ static struct k_clock clock_realtime_coarse = {
67771 .clock_getres = posix_get_coarse_res,
67772 .clock_get = posix_get_realtime_coarse,
67773 };
67774- struct k_clock clock_monotonic_coarse = {
67775+ static struct k_clock clock_monotonic_coarse = {
67776 .clock_getres = posix_get_coarse_res,
67777 .clock_get = posix_get_monotonic_coarse,
67778 };
67779- struct k_clock clock_boottime = {
67780+ static struct k_clock clock_boottime = {
67781 .clock_getres = hrtimer_get_res,
67782 .clock_get = posix_get_boottime,
67783 .nsleep = common_nsleep,
67784@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
67785 return;
67786 }
67787
67788- posix_clocks[clock_id] = *new_clock;
67789+ posix_clocks[clock_id] = new_clock;
67790 }
67791 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
67792
67793@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
67794 return (id & CLOCKFD_MASK) == CLOCKFD ?
67795 &clock_posix_dynamic : &clock_posix_cpu;
67796
67797- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
67798+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
67799 return NULL;
67800- return &posix_clocks[id];
67801+ return posix_clocks[id];
67802 }
67803
67804 static int common_timer_create(struct k_itimer *new_timer)
67805@@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
67806 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
67807 return -EFAULT;
67808
67809+ /* only the CLOCK_REALTIME clock can be set, all other clocks
67810+ have their clock_set fptr set to a nosettime dummy function
67811+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
67812+ call common_clock_set, which calls do_sys_settimeofday, which
67813+ we hook
67814+ */
67815+
67816 return kc->clock_set(which_clock, &new_tp);
67817 }
67818
67819diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
67820index d523593..68197a4 100644
67821--- a/kernel/power/poweroff.c
67822+++ b/kernel/power/poweroff.c
67823@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
67824 .enable_mask = SYSRQ_ENABLE_BOOT,
67825 };
67826
67827-static int pm_sysrq_init(void)
67828+static int __init pm_sysrq_init(void)
67829 {
67830 register_sysrq_key('o', &sysrq_poweroff_op);
67831 return 0;
67832diff --git a/kernel/power/process.c b/kernel/power/process.c
67833index 19db29f..33b52b6 100644
67834--- a/kernel/power/process.c
67835+++ b/kernel/power/process.c
67836@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
67837 u64 elapsed_csecs64;
67838 unsigned int elapsed_csecs;
67839 bool wakeup = false;
67840+ bool timedout = false;
67841
67842 do_gettimeofday(&start);
67843
67844@@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
67845
67846 while (true) {
67847 todo = 0;
67848+ if (time_after(jiffies, end_time))
67849+ timedout = true;
67850 read_lock(&tasklist_lock);
67851 do_each_thread(g, p) {
67852 if (p == current || !freeze_task(p))
67853@@ -58,9 +61,13 @@ static int try_to_freeze_tasks(bool user_only)
67854 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
67855 * transition can't race with task state testing here.
67856 */
67857- if (!task_is_stopped_or_traced(p) &&
67858- !freezer_should_skip(p))
67859+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
67860 todo++;
67861+ if (timedout) {
67862+ printk(KERN_ERR "Task refusing to freeze:\n");
67863+ sched_show_task(p);
67864+ }
67865+ }
67866 } while_each_thread(g, p);
67867 read_unlock(&tasklist_lock);
67868
67869@@ -69,7 +76,7 @@ static int try_to_freeze_tasks(bool user_only)
67870 todo += wq_busy;
67871 }
67872
67873- if (!todo || time_after(jiffies, end_time))
67874+ if (!todo || timedout)
67875 break;
67876
67877 if (pm_wakeup_pending()) {
67878diff --git a/kernel/printk.c b/kernel/printk.c
67879index b663c2c..1d6ba7a 100644
67880--- a/kernel/printk.c
67881+++ b/kernel/printk.c
67882@@ -316,6 +316,11 @@ static int check_syslog_permissions(int type, bool from_file)
67883 if (from_file && type != SYSLOG_ACTION_OPEN)
67884 return 0;
67885
67886+#ifdef CONFIG_GRKERNSEC_DMESG
67887+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
67888+ return -EPERM;
67889+#endif
67890+
67891 if (syslog_action_restricted(type)) {
67892 if (capable(CAP_SYSLOG))
67893 return 0;
67894diff --git a/kernel/profile.c b/kernel/profile.c
67895index 76b8e77..a2930e8 100644
67896--- a/kernel/profile.c
67897+++ b/kernel/profile.c
67898@@ -39,7 +39,7 @@ struct profile_hit {
67899 /* Oprofile timer tick hook */
67900 static int (*timer_hook)(struct pt_regs *) __read_mostly;
67901
67902-static atomic_t *prof_buffer;
67903+static atomic_unchecked_t *prof_buffer;
67904 static unsigned long prof_len, prof_shift;
67905
67906 int prof_on __read_mostly;
67907@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
67908 hits[i].pc = 0;
67909 continue;
67910 }
67911- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67912+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67913 hits[i].hits = hits[i].pc = 0;
67914 }
67915 }
67916@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67917 * Add the current hit(s) and flush the write-queue out
67918 * to the global buffer:
67919 */
67920- atomic_add(nr_hits, &prof_buffer[pc]);
67921+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
67922 for (i = 0; i < NR_PROFILE_HIT; ++i) {
67923- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67924+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67925 hits[i].pc = hits[i].hits = 0;
67926 }
67927 out:
67928@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67929 {
67930 unsigned long pc;
67931 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
67932- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67933+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67934 }
67935 #endif /* !CONFIG_SMP */
67936
67937@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
67938 return -EFAULT;
67939 buf++; p++; count--; read++;
67940 }
67941- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
67942+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
67943 if (copy_to_user(buf, (void *)pnt, count))
67944 return -EFAULT;
67945 read += count;
67946@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
67947 }
67948 #endif
67949 profile_discard_flip_buffers();
67950- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
67951+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
67952 return count;
67953 }
67954
67955diff --git a/kernel/ptrace.c b/kernel/ptrace.c
67956index ee8d49b..bd3d790 100644
67957--- a/kernel/ptrace.c
67958+++ b/kernel/ptrace.c
67959@@ -280,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
67960
67961 if (seize)
67962 flags |= PT_SEIZED;
67963- if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
67964+ if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
67965 flags |= PT_PTRACE_CAP;
67966 task->ptrace = flags;
67967
67968@@ -487,7 +487,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
67969 break;
67970 return -EIO;
67971 }
67972- if (copy_to_user(dst, buf, retval))
67973+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
67974 return -EFAULT;
67975 copied += retval;
67976 src += retval;
67977@@ -672,7 +672,7 @@ int ptrace_request(struct task_struct *child, long request,
67978 bool seized = child->ptrace & PT_SEIZED;
67979 int ret = -EIO;
67980 siginfo_t siginfo, *si;
67981- void __user *datavp = (void __user *) data;
67982+ void __user *datavp = (__force void __user *) data;
67983 unsigned long __user *datalp = datavp;
67984 unsigned long flags;
67985
67986@@ -874,14 +874,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
67987 goto out;
67988 }
67989
67990+ if (gr_handle_ptrace(child, request)) {
67991+ ret = -EPERM;
67992+ goto out_put_task_struct;
67993+ }
67994+
67995 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67996 ret = ptrace_attach(child, request, addr, data);
67997 /*
67998 * Some architectures need to do book-keeping after
67999 * a ptrace attach.
68000 */
68001- if (!ret)
68002+ if (!ret) {
68003 arch_ptrace_attach(child);
68004+ gr_audit_ptrace(child);
68005+ }
68006 goto out_put_task_struct;
68007 }
68008
68009@@ -907,7 +914,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
68010 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
68011 if (copied != sizeof(tmp))
68012 return -EIO;
68013- return put_user(tmp, (unsigned long __user *)data);
68014+ return put_user(tmp, (__force unsigned long __user *)data);
68015 }
68016
68017 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
68018@@ -1017,14 +1024,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
68019 goto out;
68020 }
68021
68022+ if (gr_handle_ptrace(child, request)) {
68023+ ret = -EPERM;
68024+ goto out_put_task_struct;
68025+ }
68026+
68027 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
68028 ret = ptrace_attach(child, request, addr, data);
68029 /*
68030 * Some architectures need to do book-keeping after
68031 * a ptrace attach.
68032 */
68033- if (!ret)
68034+ if (!ret) {
68035 arch_ptrace_attach(child);
68036+ gr_audit_ptrace(child);
68037+ }
68038 goto out_put_task_struct;
68039 }
68040
68041diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
68042index 37a5444..eec170a 100644
68043--- a/kernel/rcutiny.c
68044+++ b/kernel/rcutiny.c
68045@@ -46,7 +46,7 @@
68046 struct rcu_ctrlblk;
68047 static void invoke_rcu_callbacks(void);
68048 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
68049-static void rcu_process_callbacks(struct softirq_action *unused);
68050+static void rcu_process_callbacks(void);
68051 static void __call_rcu(struct rcu_head *head,
68052 void (*func)(struct rcu_head *rcu),
68053 struct rcu_ctrlblk *rcp);
68054@@ -307,7 +307,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
68055 rcu_is_callbacks_kthread()));
68056 }
68057
68058-static void rcu_process_callbacks(struct softirq_action *unused)
68059+static void rcu_process_callbacks(void)
68060 {
68061 __rcu_process_callbacks(&rcu_sched_ctrlblk);
68062 __rcu_process_callbacks(&rcu_bh_ctrlblk);
68063diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
68064index 22ecea0..3789898 100644
68065--- a/kernel/rcutiny_plugin.h
68066+++ b/kernel/rcutiny_plugin.h
68067@@ -955,7 +955,7 @@ static int rcu_kthread(void *arg)
68068 have_rcu_kthread_work = morework;
68069 local_irq_restore(flags);
68070 if (work)
68071- rcu_process_callbacks(NULL);
68072+ rcu_process_callbacks();
68073 schedule_timeout_interruptible(1); /* Leave CPU for others. */
68074 }
68075
68076diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
68077index a89b381..efdcad8 100644
68078--- a/kernel/rcutorture.c
68079+++ b/kernel/rcutorture.c
68080@@ -158,12 +158,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
68081 { 0 };
68082 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
68083 { 0 };
68084-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
68085-static atomic_t n_rcu_torture_alloc;
68086-static atomic_t n_rcu_torture_alloc_fail;
68087-static atomic_t n_rcu_torture_free;
68088-static atomic_t n_rcu_torture_mberror;
68089-static atomic_t n_rcu_torture_error;
68090+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
68091+static atomic_unchecked_t n_rcu_torture_alloc;
68092+static atomic_unchecked_t n_rcu_torture_alloc_fail;
68093+static atomic_unchecked_t n_rcu_torture_free;
68094+static atomic_unchecked_t n_rcu_torture_mberror;
68095+static atomic_unchecked_t n_rcu_torture_error;
68096 static long n_rcu_torture_boost_ktrerror;
68097 static long n_rcu_torture_boost_rterror;
68098 static long n_rcu_torture_boost_failure;
68099@@ -253,11 +253,11 @@ rcu_torture_alloc(void)
68100
68101 spin_lock_bh(&rcu_torture_lock);
68102 if (list_empty(&rcu_torture_freelist)) {
68103- atomic_inc(&n_rcu_torture_alloc_fail);
68104+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
68105 spin_unlock_bh(&rcu_torture_lock);
68106 return NULL;
68107 }
68108- atomic_inc(&n_rcu_torture_alloc);
68109+ atomic_inc_unchecked(&n_rcu_torture_alloc);
68110 p = rcu_torture_freelist.next;
68111 list_del_init(p);
68112 spin_unlock_bh(&rcu_torture_lock);
68113@@ -270,7 +270,7 @@ rcu_torture_alloc(void)
68114 static void
68115 rcu_torture_free(struct rcu_torture *p)
68116 {
68117- atomic_inc(&n_rcu_torture_free);
68118+ atomic_inc_unchecked(&n_rcu_torture_free);
68119 spin_lock_bh(&rcu_torture_lock);
68120 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
68121 spin_unlock_bh(&rcu_torture_lock);
68122@@ -390,7 +390,7 @@ rcu_torture_cb(struct rcu_head *p)
68123 i = rp->rtort_pipe_count;
68124 if (i > RCU_TORTURE_PIPE_LEN)
68125 i = RCU_TORTURE_PIPE_LEN;
68126- atomic_inc(&rcu_torture_wcount[i]);
68127+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
68128 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
68129 rp->rtort_mbtest = 0;
68130 rcu_torture_free(rp);
68131@@ -437,7 +437,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
68132 i = rp->rtort_pipe_count;
68133 if (i > RCU_TORTURE_PIPE_LEN)
68134 i = RCU_TORTURE_PIPE_LEN;
68135- atomic_inc(&rcu_torture_wcount[i]);
68136+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
68137 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
68138 rp->rtort_mbtest = 0;
68139 list_del(&rp->rtort_free);
68140@@ -926,7 +926,7 @@ rcu_torture_writer(void *arg)
68141 i = old_rp->rtort_pipe_count;
68142 if (i > RCU_TORTURE_PIPE_LEN)
68143 i = RCU_TORTURE_PIPE_LEN;
68144- atomic_inc(&rcu_torture_wcount[i]);
68145+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
68146 old_rp->rtort_pipe_count++;
68147 cur_ops->deferred_free(old_rp);
68148 }
68149@@ -1007,7 +1007,7 @@ static void rcu_torture_timer(unsigned long unused)
68150 }
68151 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
68152 if (p->rtort_mbtest == 0)
68153- atomic_inc(&n_rcu_torture_mberror);
68154+ atomic_inc_unchecked(&n_rcu_torture_mberror);
68155 spin_lock(&rand_lock);
68156 cur_ops->read_delay(&rand);
68157 n_rcu_torture_timers++;
68158@@ -1071,7 +1071,7 @@ rcu_torture_reader(void *arg)
68159 }
68160 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
68161 if (p->rtort_mbtest == 0)
68162- atomic_inc(&n_rcu_torture_mberror);
68163+ atomic_inc_unchecked(&n_rcu_torture_mberror);
68164 cur_ops->read_delay(&rand);
68165 preempt_disable();
68166 pipe_count = p->rtort_pipe_count;
68167@@ -1133,10 +1133,10 @@ rcu_torture_printk(char *page)
68168 rcu_torture_current,
68169 rcu_torture_current_version,
68170 list_empty(&rcu_torture_freelist),
68171- atomic_read(&n_rcu_torture_alloc),
68172- atomic_read(&n_rcu_torture_alloc_fail),
68173- atomic_read(&n_rcu_torture_free),
68174- atomic_read(&n_rcu_torture_mberror),
68175+ atomic_read_unchecked(&n_rcu_torture_alloc),
68176+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
68177+ atomic_read_unchecked(&n_rcu_torture_free),
68178+ atomic_read_unchecked(&n_rcu_torture_mberror),
68179 n_rcu_torture_boost_ktrerror,
68180 n_rcu_torture_boost_rterror,
68181 n_rcu_torture_boost_failure,
68182@@ -1146,7 +1146,7 @@ rcu_torture_printk(char *page)
68183 n_online_attempts,
68184 n_offline_successes,
68185 n_offline_attempts);
68186- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
68187+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
68188 n_rcu_torture_boost_ktrerror != 0 ||
68189 n_rcu_torture_boost_rterror != 0 ||
68190 n_rcu_torture_boost_failure != 0)
68191@@ -1154,7 +1154,7 @@ rcu_torture_printk(char *page)
68192 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
68193 if (i > 1) {
68194 cnt += sprintf(&page[cnt], "!!! ");
68195- atomic_inc(&n_rcu_torture_error);
68196+ atomic_inc_unchecked(&n_rcu_torture_error);
68197 WARN_ON_ONCE(1);
68198 }
68199 cnt += sprintf(&page[cnt], "Reader Pipe: ");
68200@@ -1168,7 +1168,7 @@ rcu_torture_printk(char *page)
68201 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
68202 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
68203 cnt += sprintf(&page[cnt], " %d",
68204- atomic_read(&rcu_torture_wcount[i]));
68205+ atomic_read_unchecked(&rcu_torture_wcount[i]));
68206 }
68207 cnt += sprintf(&page[cnt], "\n");
68208 if (cur_ops->stats)
68209@@ -1676,7 +1676,7 @@ rcu_torture_cleanup(void)
68210
68211 if (cur_ops->cleanup)
68212 cur_ops->cleanup();
68213- if (atomic_read(&n_rcu_torture_error))
68214+ if (atomic_read_unchecked(&n_rcu_torture_error))
68215 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
68216 else if (n_online_successes != n_online_attempts ||
68217 n_offline_successes != n_offline_attempts)
68218@@ -1744,17 +1744,17 @@ rcu_torture_init(void)
68219
68220 rcu_torture_current = NULL;
68221 rcu_torture_current_version = 0;
68222- atomic_set(&n_rcu_torture_alloc, 0);
68223- atomic_set(&n_rcu_torture_alloc_fail, 0);
68224- atomic_set(&n_rcu_torture_free, 0);
68225- atomic_set(&n_rcu_torture_mberror, 0);
68226- atomic_set(&n_rcu_torture_error, 0);
68227+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
68228+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
68229+ atomic_set_unchecked(&n_rcu_torture_free, 0);
68230+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
68231+ atomic_set_unchecked(&n_rcu_torture_error, 0);
68232 n_rcu_torture_boost_ktrerror = 0;
68233 n_rcu_torture_boost_rterror = 0;
68234 n_rcu_torture_boost_failure = 0;
68235 n_rcu_torture_boosts = 0;
68236 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
68237- atomic_set(&rcu_torture_wcount[i], 0);
68238+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
68239 for_each_possible_cpu(cpu) {
68240 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
68241 per_cpu(rcu_torture_count, cpu)[i] = 0;
68242diff --git a/kernel/rcutree.c b/kernel/rcutree.c
68243index d0c5baf..109b2e7 100644
68244--- a/kernel/rcutree.c
68245+++ b/kernel/rcutree.c
68246@@ -357,9 +357,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
68247 rcu_prepare_for_idle(smp_processor_id());
68248 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68249 smp_mb__before_atomic_inc(); /* See above. */
68250- atomic_inc(&rdtp->dynticks);
68251+ atomic_inc_unchecked(&rdtp->dynticks);
68252 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
68253- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68254+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68255
68256 /*
68257 * The idle task is not permitted to enter the idle loop while
68258@@ -448,10 +448,10 @@ void rcu_irq_exit(void)
68259 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
68260 {
68261 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
68262- atomic_inc(&rdtp->dynticks);
68263+ atomic_inc_unchecked(&rdtp->dynticks);
68264 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68265 smp_mb__after_atomic_inc(); /* See above. */
68266- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68267+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68268 rcu_cleanup_after_idle(smp_processor_id());
68269 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
68270 if (!is_idle_task(current)) {
68271@@ -545,14 +545,14 @@ void rcu_nmi_enter(void)
68272 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
68273
68274 if (rdtp->dynticks_nmi_nesting == 0 &&
68275- (atomic_read(&rdtp->dynticks) & 0x1))
68276+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
68277 return;
68278 rdtp->dynticks_nmi_nesting++;
68279 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
68280- atomic_inc(&rdtp->dynticks);
68281+ atomic_inc_unchecked(&rdtp->dynticks);
68282 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68283 smp_mb__after_atomic_inc(); /* See above. */
68284- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68285+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68286 }
68287
68288 /**
68289@@ -571,9 +571,9 @@ void rcu_nmi_exit(void)
68290 return;
68291 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68292 smp_mb__before_atomic_inc(); /* See above. */
68293- atomic_inc(&rdtp->dynticks);
68294+ atomic_inc_unchecked(&rdtp->dynticks);
68295 smp_mb__after_atomic_inc(); /* Force delay to next write. */
68296- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68297+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68298 }
68299
68300 #ifdef CONFIG_PROVE_RCU
68301@@ -589,7 +589,7 @@ int rcu_is_cpu_idle(void)
68302 int ret;
68303
68304 preempt_disable();
68305- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
68306+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
68307 preempt_enable();
68308 return ret;
68309 }
68310@@ -659,7 +659,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
68311 */
68312 static int dyntick_save_progress_counter(struct rcu_data *rdp)
68313 {
68314- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
68315+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68316 return (rdp->dynticks_snap & 0x1) == 0;
68317 }
68318
68319@@ -674,7 +674,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
68320 unsigned int curr;
68321 unsigned int snap;
68322
68323- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
68324+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68325 snap = (unsigned int)rdp->dynticks_snap;
68326
68327 /*
68328@@ -704,10 +704,10 @@ static int jiffies_till_stall_check(void)
68329 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
68330 */
68331 if (till_stall_check < 3) {
68332- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
68333+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
68334 till_stall_check = 3;
68335 } else if (till_stall_check > 300) {
68336- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
68337+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
68338 till_stall_check = 300;
68339 }
68340 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
68341@@ -1766,7 +1766,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
68342 /*
68343 * Do RCU core processing for the current CPU.
68344 */
68345-static void rcu_process_callbacks(struct softirq_action *unused)
68346+static void rcu_process_callbacks(void)
68347 {
68348 trace_rcu_utilization("Start RCU core");
68349 __rcu_process_callbacks(&rcu_sched_state,
68350@@ -1949,8 +1949,8 @@ void synchronize_rcu_bh(void)
68351 }
68352 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
68353
68354-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
68355-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
68356+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
68357+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
68358
68359 static int synchronize_sched_expedited_cpu_stop(void *data)
68360 {
68361@@ -2011,7 +2011,7 @@ void synchronize_sched_expedited(void)
68362 int firstsnap, s, snap, trycount = 0;
68363
68364 /* Note that atomic_inc_return() implies full memory barrier. */
68365- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
68366+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
68367 get_online_cpus();
68368 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
68369
68370@@ -2033,7 +2033,7 @@ void synchronize_sched_expedited(void)
68371 }
68372
68373 /* Check to see if someone else did our work for us. */
68374- s = atomic_read(&sync_sched_expedited_done);
68375+ s = atomic_read_unchecked(&sync_sched_expedited_done);
68376 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
68377 smp_mb(); /* ensure test happens before caller kfree */
68378 return;
68379@@ -2048,7 +2048,7 @@ void synchronize_sched_expedited(void)
68380 * grace period works for us.
68381 */
68382 get_online_cpus();
68383- snap = atomic_read(&sync_sched_expedited_started);
68384+ snap = atomic_read_unchecked(&sync_sched_expedited_started);
68385 smp_mb(); /* ensure read is before try_stop_cpus(). */
68386 }
68387
68388@@ -2059,12 +2059,12 @@ void synchronize_sched_expedited(void)
68389 * than we did beat us to the punch.
68390 */
68391 do {
68392- s = atomic_read(&sync_sched_expedited_done);
68393+ s = atomic_read_unchecked(&sync_sched_expedited_done);
68394 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
68395 smp_mb(); /* ensure test happens before caller kfree */
68396 break;
68397 }
68398- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
68399+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
68400
68401 put_online_cpus();
68402 }
68403@@ -2262,7 +2262,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
68404 rdp->qlen = 0;
68405 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
68406 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
68407- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
68408+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
68409 rdp->cpu = cpu;
68410 rdp->rsp = rsp;
68411 raw_spin_unlock_irqrestore(&rnp->lock, flags);
68412@@ -2290,8 +2290,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
68413 rdp->n_force_qs_snap = rsp->n_force_qs;
68414 rdp->blimit = blimit;
68415 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
68416- atomic_set(&rdp->dynticks->dynticks,
68417- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
68418+ atomic_set_unchecked(&rdp->dynticks->dynticks,
68419+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
68420 rcu_prepare_for_idle_init(cpu);
68421 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
68422
68423diff --git a/kernel/rcutree.h b/kernel/rcutree.h
68424index cdd1be0..5b2efb4 100644
68425--- a/kernel/rcutree.h
68426+++ b/kernel/rcutree.h
68427@@ -87,7 +87,7 @@ struct rcu_dynticks {
68428 long long dynticks_nesting; /* Track irq/process nesting level. */
68429 /* Process level is worth LLONG_MAX/2. */
68430 int dynticks_nmi_nesting; /* Track NMI nesting level. */
68431- atomic_t dynticks; /* Even value for idle, else odd. */
68432+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
68433 };
68434
68435 /* RCU's kthread states for tracing. */
68436diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
68437index c023464..7f57225 100644
68438--- a/kernel/rcutree_plugin.h
68439+++ b/kernel/rcutree_plugin.h
68440@@ -909,7 +909,7 @@ void synchronize_rcu_expedited(void)
68441
68442 /* Clean up and exit. */
68443 smp_mb(); /* ensure expedited GP seen before counter increment. */
68444- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
68445+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
68446 unlock_mb_ret:
68447 mutex_unlock(&sync_rcu_preempt_exp_mutex);
68448 mb_ret:
68449diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
68450index ed459ed..a03c3fa 100644
68451--- a/kernel/rcutree_trace.c
68452+++ b/kernel/rcutree_trace.c
68453@@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
68454 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
68455 rdp->qs_pending);
68456 seq_printf(m, " dt=%d/%llx/%d df=%lu",
68457- atomic_read(&rdp->dynticks->dynticks),
68458+ atomic_read_unchecked(&rdp->dynticks->dynticks),
68459 rdp->dynticks->dynticks_nesting,
68460 rdp->dynticks->dynticks_nmi_nesting,
68461 rdp->dynticks_fqs);
68462@@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
68463 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
68464 rdp->qs_pending);
68465 seq_printf(m, ",%d,%llx,%d,%lu",
68466- atomic_read(&rdp->dynticks->dynticks),
68467+ atomic_read_unchecked(&rdp->dynticks->dynticks),
68468 rdp->dynticks->dynticks_nesting,
68469 rdp->dynticks->dynticks_nmi_nesting,
68470 rdp->dynticks_fqs);
68471diff --git a/kernel/resource.c b/kernel/resource.c
68472index 7e8ea66..1efd11f 100644
68473--- a/kernel/resource.c
68474+++ b/kernel/resource.c
68475@@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
68476
68477 static int __init ioresources_init(void)
68478 {
68479+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68480+#ifdef CONFIG_GRKERNSEC_PROC_USER
68481+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
68482+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
68483+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68484+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
68485+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
68486+#endif
68487+#else
68488 proc_create("ioports", 0, NULL, &proc_ioports_operations);
68489 proc_create("iomem", 0, NULL, &proc_iomem_operations);
68490+#endif
68491 return 0;
68492 }
68493 __initcall(ioresources_init);
68494diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
68495index 98ec494..4241d6d 100644
68496--- a/kernel/rtmutex-tester.c
68497+++ b/kernel/rtmutex-tester.c
68498@@ -20,7 +20,7 @@
68499 #define MAX_RT_TEST_MUTEXES 8
68500
68501 static spinlock_t rttest_lock;
68502-static atomic_t rttest_event;
68503+static atomic_unchecked_t rttest_event;
68504
68505 struct test_thread_data {
68506 int opcode;
68507@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68508
68509 case RTTEST_LOCKCONT:
68510 td->mutexes[td->opdata] = 1;
68511- td->event = atomic_add_return(1, &rttest_event);
68512+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68513 return 0;
68514
68515 case RTTEST_RESET:
68516@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68517 return 0;
68518
68519 case RTTEST_RESETEVENT:
68520- atomic_set(&rttest_event, 0);
68521+ atomic_set_unchecked(&rttest_event, 0);
68522 return 0;
68523
68524 default:
68525@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68526 return ret;
68527
68528 td->mutexes[id] = 1;
68529- td->event = atomic_add_return(1, &rttest_event);
68530+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68531 rt_mutex_lock(&mutexes[id]);
68532- td->event = atomic_add_return(1, &rttest_event);
68533+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68534 td->mutexes[id] = 4;
68535 return 0;
68536
68537@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68538 return ret;
68539
68540 td->mutexes[id] = 1;
68541- td->event = atomic_add_return(1, &rttest_event);
68542+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68543 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
68544- td->event = atomic_add_return(1, &rttest_event);
68545+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68546 td->mutexes[id] = ret ? 0 : 4;
68547 return ret ? -EINTR : 0;
68548
68549@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68550 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
68551 return ret;
68552
68553- td->event = atomic_add_return(1, &rttest_event);
68554+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68555 rt_mutex_unlock(&mutexes[id]);
68556- td->event = atomic_add_return(1, &rttest_event);
68557+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68558 td->mutexes[id] = 0;
68559 return 0;
68560
68561@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68562 break;
68563
68564 td->mutexes[dat] = 2;
68565- td->event = atomic_add_return(1, &rttest_event);
68566+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68567 break;
68568
68569 default:
68570@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68571 return;
68572
68573 td->mutexes[dat] = 3;
68574- td->event = atomic_add_return(1, &rttest_event);
68575+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68576 break;
68577
68578 case RTTEST_LOCKNOWAIT:
68579@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68580 return;
68581
68582 td->mutexes[dat] = 1;
68583- td->event = atomic_add_return(1, &rttest_event);
68584+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68585 return;
68586
68587 default:
68588diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
68589index 0984a21..939f183 100644
68590--- a/kernel/sched/auto_group.c
68591+++ b/kernel/sched/auto_group.c
68592@@ -11,7 +11,7 @@
68593
68594 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
68595 static struct autogroup autogroup_default;
68596-static atomic_t autogroup_seq_nr;
68597+static atomic_unchecked_t autogroup_seq_nr;
68598
68599 void __init autogroup_init(struct task_struct *init_task)
68600 {
68601@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
68602
68603 kref_init(&ag->kref);
68604 init_rwsem(&ag->lock);
68605- ag->id = atomic_inc_return(&autogroup_seq_nr);
68606+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
68607 ag->tg = tg;
68608 #ifdef CONFIG_RT_GROUP_SCHED
68609 /*
68610diff --git a/kernel/sched/core.c b/kernel/sched/core.c
68611index 817bf70..9099fb4 100644
68612--- a/kernel/sched/core.c
68613+++ b/kernel/sched/core.c
68614@@ -4038,6 +4038,8 @@ int can_nice(const struct task_struct *p, const int nice)
68615 /* convert nice value [19,-20] to rlimit style value [1,40] */
68616 int nice_rlim = 20 - nice;
68617
68618+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
68619+
68620 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
68621 capable(CAP_SYS_NICE));
68622 }
68623@@ -4071,7 +4073,8 @@ SYSCALL_DEFINE1(nice, int, increment)
68624 if (nice > 19)
68625 nice = 19;
68626
68627- if (increment < 0 && !can_nice(current, nice))
68628+ if (increment < 0 && (!can_nice(current, nice) ||
68629+ gr_handle_chroot_nice()))
68630 return -EPERM;
68631
68632 retval = security_task_setnice(current, nice);
68633@@ -4228,6 +4231,7 @@ recheck:
68634 unsigned long rlim_rtprio =
68635 task_rlimit(p, RLIMIT_RTPRIO);
68636
68637+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
68638 /* can't set/change the rt policy */
68639 if (policy != p->policy && !rlim_rtprio)
68640 return -EPERM;
68641diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
68642index e955364..eacd2a4 100644
68643--- a/kernel/sched/fair.c
68644+++ b/kernel/sched/fair.c
68645@@ -5107,7 +5107,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
68646 * run_rebalance_domains is triggered when needed from the scheduler tick.
68647 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
68648 */
68649-static void run_rebalance_domains(struct softirq_action *h)
68650+static void run_rebalance_domains(void)
68651 {
68652 int this_cpu = smp_processor_id();
68653 struct rq *this_rq = cpu_rq(this_cpu);
68654diff --git a/kernel/signal.c b/kernel/signal.c
68655index 17afcaf..4500b05 100644
68656--- a/kernel/signal.c
68657+++ b/kernel/signal.c
68658@@ -47,12 +47,12 @@ static struct kmem_cache *sigqueue_cachep;
68659
68660 int print_fatal_signals __read_mostly;
68661
68662-static void __user *sig_handler(struct task_struct *t, int sig)
68663+static __sighandler_t sig_handler(struct task_struct *t, int sig)
68664 {
68665 return t->sighand->action[sig - 1].sa.sa_handler;
68666 }
68667
68668-static int sig_handler_ignored(void __user *handler, int sig)
68669+static int sig_handler_ignored(__sighandler_t handler, int sig)
68670 {
68671 /* Is it explicitly or implicitly ignored? */
68672 return handler == SIG_IGN ||
68673@@ -61,7 +61,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
68674
68675 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
68676 {
68677- void __user *handler;
68678+ __sighandler_t handler;
68679
68680 handler = sig_handler(t, sig);
68681
68682@@ -365,6 +365,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
68683 atomic_inc(&user->sigpending);
68684 rcu_read_unlock();
68685
68686+ if (!override_rlimit)
68687+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
68688+
68689 if (override_rlimit ||
68690 atomic_read(&user->sigpending) <=
68691 task_rlimit(t, RLIMIT_SIGPENDING)) {
68692@@ -489,7 +492,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
68693
68694 int unhandled_signal(struct task_struct *tsk, int sig)
68695 {
68696- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
68697+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
68698 if (is_global_init(tsk))
68699 return 1;
68700 if (handler != SIG_IGN && handler != SIG_DFL)
68701@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
68702 }
68703 }
68704
68705+ /* allow glibc communication via tgkill to other threads in our
68706+ thread group */
68707+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
68708+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
68709+ && gr_handle_signal(t, sig))
68710+ return -EPERM;
68711+
68712 return security_task_kill(t, info, sig, 0);
68713 }
68714
68715@@ -1204,7 +1214,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68716 return send_signal(sig, info, p, 1);
68717 }
68718
68719-static int
68720+int
68721 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68722 {
68723 return send_signal(sig, info, t, 0);
68724@@ -1241,6 +1251,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68725 unsigned long int flags;
68726 int ret, blocked, ignored;
68727 struct k_sigaction *action;
68728+ int is_unhandled = 0;
68729
68730 spin_lock_irqsave(&t->sighand->siglock, flags);
68731 action = &t->sighand->action[sig-1];
68732@@ -1255,9 +1266,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68733 }
68734 if (action->sa.sa_handler == SIG_DFL)
68735 t->signal->flags &= ~SIGNAL_UNKILLABLE;
68736+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
68737+ is_unhandled = 1;
68738 ret = specific_send_sig_info(sig, info, t);
68739 spin_unlock_irqrestore(&t->sighand->siglock, flags);
68740
68741+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
68742+ normal operation */
68743+ if (is_unhandled) {
68744+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
68745+ gr_handle_crash(t, sig);
68746+ }
68747+
68748 return ret;
68749 }
68750
68751@@ -1324,8 +1344,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68752 ret = check_kill_permission(sig, info, p);
68753 rcu_read_unlock();
68754
68755- if (!ret && sig)
68756+ if (!ret && sig) {
68757 ret = do_send_sig_info(sig, info, p, true);
68758+ if (!ret)
68759+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
68760+ }
68761
68762 return ret;
68763 }
68764@@ -2840,7 +2863,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
68765 int error = -ESRCH;
68766
68767 rcu_read_lock();
68768- p = find_task_by_vpid(pid);
68769+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68770+ /* allow glibc communication via tgkill to other threads in our
68771+ thread group */
68772+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
68773+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
68774+ p = find_task_by_vpid_unrestricted(pid);
68775+ else
68776+#endif
68777+ p = find_task_by_vpid(pid);
68778 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
68779 error = check_kill_permission(sig, info, p);
68780 /*
68781diff --git a/kernel/smp.c b/kernel/smp.c
68782index 2f8b10e..a41bc14 100644
68783--- a/kernel/smp.c
68784+++ b/kernel/smp.c
68785@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
68786 }
68787 EXPORT_SYMBOL(smp_call_function);
68788
68789-void ipi_call_lock(void)
68790+void ipi_call_lock(void) __acquires(call_function.lock)
68791 {
68792 raw_spin_lock(&call_function.lock);
68793 }
68794
68795-void ipi_call_unlock(void)
68796+void ipi_call_unlock(void) __releases(call_function.lock)
68797 {
68798 raw_spin_unlock(&call_function.lock);
68799 }
68800
68801-void ipi_call_lock_irq(void)
68802+void ipi_call_lock_irq(void) __acquires(call_function.lock)
68803 {
68804 raw_spin_lock_irq(&call_function.lock);
68805 }
68806
68807-void ipi_call_unlock_irq(void)
68808+void ipi_call_unlock_irq(void) __releases(call_function.lock)
68809 {
68810 raw_spin_unlock_irq(&call_function.lock);
68811 }
68812diff --git a/kernel/softirq.c b/kernel/softirq.c
68813index 671f959..91c51cb 100644
68814--- a/kernel/softirq.c
68815+++ b/kernel/softirq.c
68816@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
68817
68818 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
68819
68820-char *softirq_to_name[NR_SOFTIRQS] = {
68821+const char * const softirq_to_name[NR_SOFTIRQS] = {
68822 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
68823 "TASKLET", "SCHED", "HRTIMER", "RCU"
68824 };
68825@@ -235,7 +235,7 @@ restart:
68826 kstat_incr_softirqs_this_cpu(vec_nr);
68827
68828 trace_softirq_entry(vec_nr);
68829- h->action(h);
68830+ h->action();
68831 trace_softirq_exit(vec_nr);
68832 if (unlikely(prev_count != preempt_count())) {
68833 printk(KERN_ERR "huh, entered softirq %u %s %p"
68834@@ -381,9 +381,11 @@ void __raise_softirq_irqoff(unsigned int nr)
68835 or_softirq_pending(1UL << nr);
68836 }
68837
68838-void open_softirq(int nr, void (*action)(struct softirq_action *))
68839+void open_softirq(int nr, void (*action)(void))
68840 {
68841- softirq_vec[nr].action = action;
68842+ pax_open_kernel();
68843+ *(void **)&softirq_vec[nr].action = action;
68844+ pax_close_kernel();
68845 }
68846
68847 /*
68848@@ -437,7 +439,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
68849
68850 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
68851
68852-static void tasklet_action(struct softirq_action *a)
68853+static void tasklet_action(void)
68854 {
68855 struct tasklet_struct *list;
68856
68857@@ -472,7 +474,7 @@ static void tasklet_action(struct softirq_action *a)
68858 }
68859 }
68860
68861-static void tasklet_hi_action(struct softirq_action *a)
68862+static void tasklet_hi_action(void)
68863 {
68864 struct tasklet_struct *list;
68865
68866diff --git a/kernel/sys.c b/kernel/sys.c
68867index e7006eb..8fb7c51 100644
68868--- a/kernel/sys.c
68869+++ b/kernel/sys.c
68870@@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
68871 error = -EACCES;
68872 goto out;
68873 }
68874+
68875+ if (gr_handle_chroot_setpriority(p, niceval)) {
68876+ error = -EACCES;
68877+ goto out;
68878+ }
68879+
68880 no_nice = security_task_setnice(p, niceval);
68881 if (no_nice) {
68882 error = no_nice;
68883@@ -581,6 +587,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
68884 goto error;
68885 }
68886
68887+ if (gr_check_group_change(new->gid, new->egid, -1))
68888+ goto error;
68889+
68890 if (rgid != (gid_t) -1 ||
68891 (egid != (gid_t) -1 && egid != old->gid))
68892 new->sgid = new->egid;
68893@@ -610,6 +619,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
68894 old = current_cred();
68895
68896 retval = -EPERM;
68897+
68898+ if (gr_check_group_change(gid, gid, gid))
68899+ goto error;
68900+
68901 if (nsown_capable(CAP_SETGID))
68902 new->gid = new->egid = new->sgid = new->fsgid = gid;
68903 else if (gid == old->gid || gid == old->sgid)
68904@@ -627,7 +640,7 @@ error:
68905 /*
68906 * change the user struct in a credentials set to match the new UID
68907 */
68908-static int set_user(struct cred *new)
68909+int set_user(struct cred *new)
68910 {
68911 struct user_struct *new_user;
68912
68913@@ -697,6 +710,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
68914 goto error;
68915 }
68916
68917+ if (gr_check_user_change(new->uid, new->euid, -1))
68918+ goto error;
68919+
68920 if (new->uid != old->uid) {
68921 retval = set_user(new);
68922 if (retval < 0)
68923@@ -741,6 +757,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
68924 old = current_cred();
68925
68926 retval = -EPERM;
68927+
68928+ if (gr_check_crash_uid(uid))
68929+ goto error;
68930+ if (gr_check_user_change(uid, uid, uid))
68931+ goto error;
68932+
68933 if (nsown_capable(CAP_SETUID)) {
68934 new->suid = new->uid = uid;
68935 if (uid != old->uid) {
68936@@ -795,6 +817,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
68937 goto error;
68938 }
68939
68940+ if (gr_check_user_change(ruid, euid, -1))
68941+ goto error;
68942+
68943 if (ruid != (uid_t) -1) {
68944 new->uid = ruid;
68945 if (ruid != old->uid) {
68946@@ -859,6 +884,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
68947 goto error;
68948 }
68949
68950+ if (gr_check_group_change(rgid, egid, -1))
68951+ goto error;
68952+
68953 if (rgid != (gid_t) -1)
68954 new->gid = rgid;
68955 if (egid != (gid_t) -1)
68956@@ -905,6 +933,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68957 old = current_cred();
68958 old_fsuid = old->fsuid;
68959
68960+ if (gr_check_user_change(-1, -1, uid))
68961+ goto error;
68962+
68963 if (uid == old->uid || uid == old->euid ||
68964 uid == old->suid || uid == old->fsuid ||
68965 nsown_capable(CAP_SETUID)) {
68966@@ -915,6 +946,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68967 }
68968 }
68969
68970+error:
68971 abort_creds(new);
68972 return old_fsuid;
68973
68974@@ -941,12 +973,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
68975 if (gid == old->gid || gid == old->egid ||
68976 gid == old->sgid || gid == old->fsgid ||
68977 nsown_capable(CAP_SETGID)) {
68978+ if (gr_check_group_change(-1, -1, gid))
68979+ goto error;
68980+
68981 if (gid != old_fsgid) {
68982 new->fsgid = gid;
68983 goto change_okay;
68984 }
68985 }
68986
68987+error:
68988 abort_creds(new);
68989 return old_fsgid;
68990
68991@@ -1198,7 +1234,10 @@ static int override_release(char __user *release, int len)
68992 }
68993 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
68994 snprintf(buf, len, "2.6.%u%s", v, rest);
68995- ret = copy_to_user(release, buf, len);
68996+ if (len > sizeof(buf))
68997+ ret = -EFAULT;
68998+ else
68999+ ret = copy_to_user(release, buf, len);
69000 }
69001 return ret;
69002 }
69003@@ -1252,19 +1291,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
69004 return -EFAULT;
69005
69006 down_read(&uts_sem);
69007- error = __copy_to_user(&name->sysname, &utsname()->sysname,
69008+ error = __copy_to_user(name->sysname, &utsname()->sysname,
69009 __OLD_UTS_LEN);
69010 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
69011- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
69012+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
69013 __OLD_UTS_LEN);
69014 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
69015- error |= __copy_to_user(&name->release, &utsname()->release,
69016+ error |= __copy_to_user(name->release, &utsname()->release,
69017 __OLD_UTS_LEN);
69018 error |= __put_user(0, name->release + __OLD_UTS_LEN);
69019- error |= __copy_to_user(&name->version, &utsname()->version,
69020+ error |= __copy_to_user(name->version, &utsname()->version,
69021 __OLD_UTS_LEN);
69022 error |= __put_user(0, name->version + __OLD_UTS_LEN);
69023- error |= __copy_to_user(&name->machine, &utsname()->machine,
69024+ error |= __copy_to_user(name->machine, &utsname()->machine,
69025 __OLD_UTS_LEN);
69026 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
69027 up_read(&uts_sem);
69028@@ -1847,7 +1886,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
69029 error = get_dumpable(me->mm);
69030 break;
69031 case PR_SET_DUMPABLE:
69032- if (arg2 < 0 || arg2 > 1) {
69033+ if (arg2 > 1) {
69034 error = -EINVAL;
69035 break;
69036 }
69037diff --git a/kernel/sysctl.c b/kernel/sysctl.c
69038index 4ab1187..0b75ced 100644
69039--- a/kernel/sysctl.c
69040+++ b/kernel/sysctl.c
69041@@ -91,7 +91,6 @@
69042
69043
69044 #if defined(CONFIG_SYSCTL)
69045-
69046 /* External variables not in a header file. */
69047 extern int sysctl_overcommit_memory;
69048 extern int sysctl_overcommit_ratio;
69049@@ -169,10 +168,8 @@ static int proc_taint(struct ctl_table *table, int write,
69050 void __user *buffer, size_t *lenp, loff_t *ppos);
69051 #endif
69052
69053-#ifdef CONFIG_PRINTK
69054 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
69055 void __user *buffer, size_t *lenp, loff_t *ppos);
69056-#endif
69057
69058 #ifdef CONFIG_MAGIC_SYSRQ
69059 /* Note: sysrq code uses it's own private copy */
69060@@ -196,6 +193,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
69061
69062 #endif
69063
69064+extern struct ctl_table grsecurity_table[];
69065+
69066 static struct ctl_table kern_table[];
69067 static struct ctl_table vm_table[];
69068 static struct ctl_table fs_table[];
69069@@ -210,6 +209,20 @@ extern struct ctl_table epoll_table[];
69070 int sysctl_legacy_va_layout;
69071 #endif
69072
69073+#ifdef CONFIG_PAX_SOFTMODE
69074+static ctl_table pax_table[] = {
69075+ {
69076+ .procname = "softmode",
69077+ .data = &pax_softmode,
69078+ .maxlen = sizeof(unsigned int),
69079+ .mode = 0600,
69080+ .proc_handler = &proc_dointvec,
69081+ },
69082+
69083+ { }
69084+};
69085+#endif
69086+
69087 /* The default sysctl tables: */
69088
69089 static struct ctl_table sysctl_base_table[] = {
69090@@ -256,6 +269,22 @@ static int max_extfrag_threshold = 1000;
69091 #endif
69092
69093 static struct ctl_table kern_table[] = {
69094+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
69095+ {
69096+ .procname = "grsecurity",
69097+ .mode = 0500,
69098+ .child = grsecurity_table,
69099+ },
69100+#endif
69101+
69102+#ifdef CONFIG_PAX_SOFTMODE
69103+ {
69104+ .procname = "pax",
69105+ .mode = 0500,
69106+ .child = pax_table,
69107+ },
69108+#endif
69109+
69110 {
69111 .procname = "sched_child_runs_first",
69112 .data = &sysctl_sched_child_runs_first,
69113@@ -540,7 +569,7 @@ static struct ctl_table kern_table[] = {
69114 .data = &modprobe_path,
69115 .maxlen = KMOD_PATH_LEN,
69116 .mode = 0644,
69117- .proc_handler = proc_dostring,
69118+ .proc_handler = proc_dostring_modpriv,
69119 },
69120 {
69121 .procname = "modules_disabled",
69122@@ -707,16 +736,20 @@ static struct ctl_table kern_table[] = {
69123 .extra1 = &zero,
69124 .extra2 = &one,
69125 },
69126+#endif
69127 {
69128 .procname = "kptr_restrict",
69129 .data = &kptr_restrict,
69130 .maxlen = sizeof(int),
69131 .mode = 0644,
69132 .proc_handler = proc_dointvec_minmax_sysadmin,
69133+#ifdef CONFIG_GRKERNSEC_HIDESYM
69134+ .extra1 = &two,
69135+#else
69136 .extra1 = &zero,
69137+#endif
69138 .extra2 = &two,
69139 },
69140-#endif
69141 {
69142 .procname = "ngroups_max",
69143 .data = &ngroups_max,
69144@@ -1215,6 +1248,13 @@ static struct ctl_table vm_table[] = {
69145 .proc_handler = proc_dointvec_minmax,
69146 .extra1 = &zero,
69147 },
69148+ {
69149+ .procname = "heap_stack_gap",
69150+ .data = &sysctl_heap_stack_gap,
69151+ .maxlen = sizeof(sysctl_heap_stack_gap),
69152+ .mode = 0644,
69153+ .proc_handler = proc_doulongvec_minmax,
69154+ },
69155 #else
69156 {
69157 .procname = "nr_trim_pages",
69158@@ -1645,6 +1685,16 @@ int proc_dostring(struct ctl_table *table, int write,
69159 buffer, lenp, ppos);
69160 }
69161
69162+int proc_dostring_modpriv(struct ctl_table *table, int write,
69163+ void __user *buffer, size_t *lenp, loff_t *ppos)
69164+{
69165+ if (write && !capable(CAP_SYS_MODULE))
69166+ return -EPERM;
69167+
69168+ return _proc_do_string(table->data, table->maxlen, write,
69169+ buffer, lenp, ppos);
69170+}
69171+
69172 static size_t proc_skip_spaces(char **buf)
69173 {
69174 size_t ret;
69175@@ -1750,6 +1800,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
69176 len = strlen(tmp);
69177 if (len > *size)
69178 len = *size;
69179+ if (len > sizeof(tmp))
69180+ len = sizeof(tmp);
69181 if (copy_to_user(*buf, tmp, len))
69182 return -EFAULT;
69183 *size -= len;
69184@@ -1942,7 +1994,6 @@ static int proc_taint(struct ctl_table *table, int write,
69185 return err;
69186 }
69187
69188-#ifdef CONFIG_PRINTK
69189 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
69190 void __user *buffer, size_t *lenp, loff_t *ppos)
69191 {
69192@@ -1951,7 +2002,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
69193
69194 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
69195 }
69196-#endif
69197
69198 struct do_proc_dointvec_minmax_conv_param {
69199 int *min;
69200@@ -2066,8 +2116,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
69201 *i = val;
69202 } else {
69203 val = convdiv * (*i) / convmul;
69204- if (!first)
69205+ if (!first) {
69206 err = proc_put_char(&buffer, &left, '\t');
69207+ if (err)
69208+ break;
69209+ }
69210 err = proc_put_long(&buffer, &left, val, false);
69211 if (err)
69212 break;
69213@@ -2459,6 +2512,12 @@ int proc_dostring(struct ctl_table *table, int write,
69214 return -ENOSYS;
69215 }
69216
69217+int proc_dostring_modpriv(struct ctl_table *table, int write,
69218+ void __user *buffer, size_t *lenp, loff_t *ppos)
69219+{
69220+ return -ENOSYS;
69221+}
69222+
69223 int proc_dointvec(struct ctl_table *table, int write,
69224 void __user *buffer, size_t *lenp, loff_t *ppos)
69225 {
69226@@ -2515,5 +2574,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
69227 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
69228 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
69229 EXPORT_SYMBOL(proc_dostring);
69230+EXPORT_SYMBOL(proc_dostring_modpriv);
69231 EXPORT_SYMBOL(proc_doulongvec_minmax);
69232 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
69233diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
69234index a650694..aaeeb20 100644
69235--- a/kernel/sysctl_binary.c
69236+++ b/kernel/sysctl_binary.c
69237@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
69238 int i;
69239
69240 set_fs(KERNEL_DS);
69241- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
69242+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
69243 set_fs(old_fs);
69244 if (result < 0)
69245 goto out_kfree;
69246@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
69247 }
69248
69249 set_fs(KERNEL_DS);
69250- result = vfs_write(file, buffer, str - buffer, &pos);
69251+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69252 set_fs(old_fs);
69253 if (result < 0)
69254 goto out_kfree;
69255@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
69256 int i;
69257
69258 set_fs(KERNEL_DS);
69259- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
69260+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
69261 set_fs(old_fs);
69262 if (result < 0)
69263 goto out_kfree;
69264@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
69265 }
69266
69267 set_fs(KERNEL_DS);
69268- result = vfs_write(file, buffer, str - buffer, &pos);
69269+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69270 set_fs(old_fs);
69271 if (result < 0)
69272 goto out_kfree;
69273@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
69274 int i;
69275
69276 set_fs(KERNEL_DS);
69277- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69278+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69279 set_fs(old_fs);
69280 if (result < 0)
69281 goto out;
69282@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69283 __le16 dnaddr;
69284
69285 set_fs(KERNEL_DS);
69286- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69287+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69288 set_fs(old_fs);
69289 if (result < 0)
69290 goto out;
69291@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69292 le16_to_cpu(dnaddr) & 0x3ff);
69293
69294 set_fs(KERNEL_DS);
69295- result = vfs_write(file, buf, len, &pos);
69296+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
69297 set_fs(old_fs);
69298 if (result < 0)
69299 goto out;
69300diff --git a/kernel/taskstats.c b/kernel/taskstats.c
69301index e660464..c8b9e67 100644
69302--- a/kernel/taskstats.c
69303+++ b/kernel/taskstats.c
69304@@ -27,9 +27,12 @@
69305 #include <linux/cgroup.h>
69306 #include <linux/fs.h>
69307 #include <linux/file.h>
69308+#include <linux/grsecurity.h>
69309 #include <net/genetlink.h>
69310 #include <linux/atomic.h>
69311
69312+extern int gr_is_taskstats_denied(int pid);
69313+
69314 /*
69315 * Maximum length of a cpumask that can be specified in
69316 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
69317@@ -556,6 +559,9 @@ err:
69318
69319 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
69320 {
69321+ if (gr_is_taskstats_denied(current->pid))
69322+ return -EACCES;
69323+
69324 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
69325 return cmd_attr_register_cpumask(info);
69326 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
69327diff --git a/kernel/time.c b/kernel/time.c
69328index ba744cf..267b7c5 100644
69329--- a/kernel/time.c
69330+++ b/kernel/time.c
69331@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
69332 return error;
69333
69334 if (tz) {
69335+ /* we log in do_settimeofday called below, so don't log twice
69336+ */
69337+ if (!tv)
69338+ gr_log_timechange();
69339+
69340 sys_tz = *tz;
69341 update_vsyscall_tz();
69342 if (firsttime) {
69343diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
69344index 8a538c5..def79d4 100644
69345--- a/kernel/time/alarmtimer.c
69346+++ b/kernel/time/alarmtimer.c
69347@@ -779,7 +779,7 @@ static int __init alarmtimer_init(void)
69348 struct platform_device *pdev;
69349 int error = 0;
69350 int i;
69351- struct k_clock alarm_clock = {
69352+ static struct k_clock alarm_clock = {
69353 .clock_getres = alarm_clock_getres,
69354 .clock_get = alarm_clock_get,
69355 .timer_create = alarm_timer_create,
69356diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
69357index f113755..ec24223 100644
69358--- a/kernel/time/tick-broadcast.c
69359+++ b/kernel/time/tick-broadcast.c
69360@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
69361 * then clear the broadcast bit.
69362 */
69363 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
69364- int cpu = smp_processor_id();
69365+ cpu = smp_processor_id();
69366
69367 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
69368 tick_broadcast_clear_oneshot(cpu);
69369diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
69370index 7c50de8..e29a94d 100644
69371--- a/kernel/time/timekeeping.c
69372+++ b/kernel/time/timekeeping.c
69373@@ -14,6 +14,7 @@
69374 #include <linux/init.h>
69375 #include <linux/mm.h>
69376 #include <linux/sched.h>
69377+#include <linux/grsecurity.h>
69378 #include <linux/syscore_ops.h>
69379 #include <linux/clocksource.h>
69380 #include <linux/jiffies.h>
69381@@ -388,6 +389,8 @@ int do_settimeofday(const struct timespec *tv)
69382 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
69383 return -EINVAL;
69384
69385+ gr_log_timechange();
69386+
69387 write_seqlock_irqsave(&timekeeper.lock, flags);
69388
69389 timekeeping_forward_now();
69390diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
69391index 3258455..f35227d 100644
69392--- a/kernel/time/timer_list.c
69393+++ b/kernel/time/timer_list.c
69394@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
69395
69396 static void print_name_offset(struct seq_file *m, void *sym)
69397 {
69398+#ifdef CONFIG_GRKERNSEC_HIDESYM
69399+ SEQ_printf(m, "<%p>", NULL);
69400+#else
69401 char symname[KSYM_NAME_LEN];
69402
69403 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
69404 SEQ_printf(m, "<%pK>", sym);
69405 else
69406 SEQ_printf(m, "%s", symname);
69407+#endif
69408 }
69409
69410 static void
69411@@ -112,7 +116,11 @@ next_one:
69412 static void
69413 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
69414 {
69415+#ifdef CONFIG_GRKERNSEC_HIDESYM
69416+ SEQ_printf(m, " .base: %p\n", NULL);
69417+#else
69418 SEQ_printf(m, " .base: %pK\n", base);
69419+#endif
69420 SEQ_printf(m, " .index: %d\n",
69421 base->index);
69422 SEQ_printf(m, " .resolution: %Lu nsecs\n",
69423@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
69424 {
69425 struct proc_dir_entry *pe;
69426
69427+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69428+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
69429+#else
69430 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
69431+#endif
69432 if (!pe)
69433 return -ENOMEM;
69434 return 0;
69435diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
69436index 0b537f2..9e71eca 100644
69437--- a/kernel/time/timer_stats.c
69438+++ b/kernel/time/timer_stats.c
69439@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
69440 static unsigned long nr_entries;
69441 static struct entry entries[MAX_ENTRIES];
69442
69443-static atomic_t overflow_count;
69444+static atomic_unchecked_t overflow_count;
69445
69446 /*
69447 * The entries are in a hash-table, for fast lookup:
69448@@ -140,7 +140,7 @@ static void reset_entries(void)
69449 nr_entries = 0;
69450 memset(entries, 0, sizeof(entries));
69451 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
69452- atomic_set(&overflow_count, 0);
69453+ atomic_set_unchecked(&overflow_count, 0);
69454 }
69455
69456 static struct entry *alloc_entry(void)
69457@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
69458 if (likely(entry))
69459 entry->count++;
69460 else
69461- atomic_inc(&overflow_count);
69462+ atomic_inc_unchecked(&overflow_count);
69463
69464 out_unlock:
69465 raw_spin_unlock_irqrestore(lock, flags);
69466@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
69467
69468 static void print_name_offset(struct seq_file *m, unsigned long addr)
69469 {
69470+#ifdef CONFIG_GRKERNSEC_HIDESYM
69471+ seq_printf(m, "<%p>", NULL);
69472+#else
69473 char symname[KSYM_NAME_LEN];
69474
69475 if (lookup_symbol_name(addr, symname) < 0)
69476 seq_printf(m, "<%p>", (void *)addr);
69477 else
69478 seq_printf(m, "%s", symname);
69479+#endif
69480 }
69481
69482 static int tstats_show(struct seq_file *m, void *v)
69483@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
69484
69485 seq_puts(m, "Timer Stats Version: v0.2\n");
69486 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
69487- if (atomic_read(&overflow_count))
69488+ if (atomic_read_unchecked(&overflow_count))
69489 seq_printf(m, "Overflow: %d entries\n",
69490- atomic_read(&overflow_count));
69491+ atomic_read_unchecked(&overflow_count));
69492
69493 for (i = 0; i < nr_entries; i++) {
69494 entry = entries + i;
69495@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
69496 {
69497 struct proc_dir_entry *pe;
69498
69499+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69500+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
69501+#else
69502 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
69503+#endif
69504 if (!pe)
69505 return -ENOMEM;
69506 return 0;
69507diff --git a/kernel/timer.c b/kernel/timer.c
69508index a297ffc..5e16b0b 100644
69509--- a/kernel/timer.c
69510+++ b/kernel/timer.c
69511@@ -1354,7 +1354,7 @@ void update_process_times(int user_tick)
69512 /*
69513 * This function runs timers and the timer-tq in bottom half context.
69514 */
69515-static void run_timer_softirq(struct softirq_action *h)
69516+static void run_timer_softirq(void)
69517 {
69518 struct tvec_base *base = __this_cpu_read(tvec_bases);
69519
69520diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
69521index c0bd030..62a1927 100644
69522--- a/kernel/trace/blktrace.c
69523+++ b/kernel/trace/blktrace.c
69524@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
69525 struct blk_trace *bt = filp->private_data;
69526 char buf[16];
69527
69528- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
69529+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
69530
69531 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
69532 }
69533@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
69534 return 1;
69535
69536 bt = buf->chan->private_data;
69537- atomic_inc(&bt->dropped);
69538+ atomic_inc_unchecked(&bt->dropped);
69539 return 0;
69540 }
69541
69542@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
69543
69544 bt->dir = dir;
69545 bt->dev = dev;
69546- atomic_set(&bt->dropped, 0);
69547+ atomic_set_unchecked(&bt->dropped, 0);
69548
69549 ret = -EIO;
69550 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
69551diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
69552index 0fa92f6..89950b2 100644
69553--- a/kernel/trace/ftrace.c
69554+++ b/kernel/trace/ftrace.c
69555@@ -1800,12 +1800,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
69556 if (unlikely(ftrace_disabled))
69557 return 0;
69558
69559+ ret = ftrace_arch_code_modify_prepare();
69560+ FTRACE_WARN_ON(ret);
69561+ if (ret)
69562+ return 0;
69563+
69564 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
69565+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
69566 if (ret) {
69567 ftrace_bug(ret, ip);
69568- return 0;
69569 }
69570- return 1;
69571+ return ret ? 0 : 1;
69572 }
69573
69574 /*
69575@@ -2917,7 +2922,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
69576
69577 int
69578 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
69579- void *data)
69580+ void *data)
69581 {
69582 struct ftrace_func_probe *entry;
69583 struct ftrace_page *pg;
69584diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
69585index 55e4d4c..8c915ec 100644
69586--- a/kernel/trace/trace.c
69587+++ b/kernel/trace/trace.c
69588@@ -4316,10 +4316,9 @@ static const struct file_operations tracing_dyn_info_fops = {
69589 };
69590 #endif
69591
69592-static struct dentry *d_tracer;
69593-
69594 struct dentry *tracing_init_dentry(void)
69595 {
69596+ static struct dentry *d_tracer;
69597 static int once;
69598
69599 if (d_tracer)
69600@@ -4339,10 +4338,9 @@ struct dentry *tracing_init_dentry(void)
69601 return d_tracer;
69602 }
69603
69604-static struct dentry *d_percpu;
69605-
69606 struct dentry *tracing_dentry_percpu(void)
69607 {
69608+ static struct dentry *d_percpu;
69609 static int once;
69610 struct dentry *d_tracer;
69611
69612diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
69613index 29111da..d190fe2 100644
69614--- a/kernel/trace/trace_events.c
69615+++ b/kernel/trace/trace_events.c
69616@@ -1308,10 +1308,6 @@ static LIST_HEAD(ftrace_module_file_list);
69617 struct ftrace_module_file_ops {
69618 struct list_head list;
69619 struct module *mod;
69620- struct file_operations id;
69621- struct file_operations enable;
69622- struct file_operations format;
69623- struct file_operations filter;
69624 };
69625
69626 static struct ftrace_module_file_ops *
69627@@ -1332,17 +1328,12 @@ trace_create_file_ops(struct module *mod)
69628
69629 file_ops->mod = mod;
69630
69631- file_ops->id = ftrace_event_id_fops;
69632- file_ops->id.owner = mod;
69633-
69634- file_ops->enable = ftrace_enable_fops;
69635- file_ops->enable.owner = mod;
69636-
69637- file_ops->filter = ftrace_event_filter_fops;
69638- file_ops->filter.owner = mod;
69639-
69640- file_ops->format = ftrace_event_format_fops;
69641- file_ops->format.owner = mod;
69642+ pax_open_kernel();
69643+ *(void **)&mod->trace_id.owner = mod;
69644+ *(void **)&mod->trace_enable.owner = mod;
69645+ *(void **)&mod->trace_filter.owner = mod;
69646+ *(void **)&mod->trace_format.owner = mod;
69647+ pax_close_kernel();
69648
69649 list_add(&file_ops->list, &ftrace_module_file_list);
69650
69651@@ -1366,8 +1357,8 @@ static void trace_module_add_events(struct module *mod)
69652
69653 for_each_event(call, start, end) {
69654 __trace_add_event_call(*call, mod,
69655- &file_ops->id, &file_ops->enable,
69656- &file_ops->filter, &file_ops->format);
69657+ &mod->trace_id, &mod->trace_enable,
69658+ &mod->trace_filter, &mod->trace_format);
69659 }
69660 }
69661
69662diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
69663index 580a05e..9b31acb 100644
69664--- a/kernel/trace/trace_kprobe.c
69665+++ b/kernel/trace/trace_kprobe.c
69666@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69667 long ret;
69668 int maxlen = get_rloc_len(*(u32 *)dest);
69669 u8 *dst = get_rloc_data(dest);
69670- u8 *src = addr;
69671+ const u8 __user *src = (const u8 __force_user *)addr;
69672 mm_segment_t old_fs = get_fs();
69673 if (!maxlen)
69674 return;
69675@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69676 pagefault_disable();
69677 do
69678 ret = __copy_from_user_inatomic(dst++, src++, 1);
69679- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
69680+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
69681 dst[-1] = '\0';
69682 pagefault_enable();
69683 set_fs(old_fs);
69684@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69685 ((u8 *)get_rloc_data(dest))[0] = '\0';
69686 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
69687 } else
69688- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
69689+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
69690 get_rloc_offs(*(u32 *)dest));
69691 }
69692 /* Return the length of string -- including null terminal byte */
69693@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
69694 set_fs(KERNEL_DS);
69695 pagefault_disable();
69696 do {
69697- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
69698+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
69699 len++;
69700 } while (c && ret == 0 && len < MAX_STRING_SIZE);
69701 pagefault_enable();
69702diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
69703index fd3c8aa..5f324a6 100644
69704--- a/kernel/trace/trace_mmiotrace.c
69705+++ b/kernel/trace/trace_mmiotrace.c
69706@@ -24,7 +24,7 @@ struct header_iter {
69707 static struct trace_array *mmio_trace_array;
69708 static bool overrun_detected;
69709 static unsigned long prev_overruns;
69710-static atomic_t dropped_count;
69711+static atomic_unchecked_t dropped_count;
69712
69713 static void mmio_reset_data(struct trace_array *tr)
69714 {
69715@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
69716
69717 static unsigned long count_overruns(struct trace_iterator *iter)
69718 {
69719- unsigned long cnt = atomic_xchg(&dropped_count, 0);
69720+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
69721 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
69722
69723 if (over > prev_overruns)
69724@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
69725 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
69726 sizeof(*entry), 0, pc);
69727 if (!event) {
69728- atomic_inc(&dropped_count);
69729+ atomic_inc_unchecked(&dropped_count);
69730 return;
69731 }
69732 entry = ring_buffer_event_data(event);
69733@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
69734 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
69735 sizeof(*entry), 0, pc);
69736 if (!event) {
69737- atomic_inc(&dropped_count);
69738+ atomic_inc_unchecked(&dropped_count);
69739 return;
69740 }
69741 entry = ring_buffer_event_data(event);
69742diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
69743index df611a0..10d8b32 100644
69744--- a/kernel/trace/trace_output.c
69745+++ b/kernel/trace/trace_output.c
69746@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
69747
69748 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
69749 if (!IS_ERR(p)) {
69750- p = mangle_path(s->buffer + s->len, p, "\n");
69751+ p = mangle_path(s->buffer + s->len, p, "\n\\");
69752 if (p) {
69753 s->len = p - s->buffer;
69754 return 1;
69755diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
69756index d4545f4..a9010a1 100644
69757--- a/kernel/trace/trace_stack.c
69758+++ b/kernel/trace/trace_stack.c
69759@@ -53,7 +53,7 @@ static inline void check_stack(void)
69760 return;
69761
69762 /* we do not handle interrupt stacks yet */
69763- if (!object_is_on_stack(&this_size))
69764+ if (!object_starts_on_stack(&this_size))
69765 return;
69766
69767 local_irq_save(flags);
69768diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
69769index 209b379..7f76423 100644
69770--- a/kernel/trace/trace_workqueue.c
69771+++ b/kernel/trace/trace_workqueue.c
69772@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
69773 int cpu;
69774 pid_t pid;
69775 /* Can be inserted from interrupt or user context, need to be atomic */
69776- atomic_t inserted;
69777+ atomic_unchecked_t inserted;
69778 /*
69779 * Don't need to be atomic, works are serialized in a single workqueue thread
69780 * on a single CPU.
69781@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
69782 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
69783 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
69784 if (node->pid == wq_thread->pid) {
69785- atomic_inc(&node->inserted);
69786+ atomic_inc_unchecked(&node->inserted);
69787 goto found;
69788 }
69789 }
69790@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
69791 tsk = get_pid_task(pid, PIDTYPE_PID);
69792 if (tsk) {
69793 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
69794- atomic_read(&cws->inserted), cws->executed,
69795+ atomic_read_unchecked(&cws->inserted), cws->executed,
69796 tsk->comm);
69797 put_task_struct(tsk);
69798 }
69799diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
69800index 6777153..8519f60 100644
69801--- a/lib/Kconfig.debug
69802+++ b/lib/Kconfig.debug
69803@@ -1132,6 +1132,7 @@ config LATENCYTOP
69804 depends on DEBUG_KERNEL
69805 depends on STACKTRACE_SUPPORT
69806 depends on PROC_FS
69807+ depends on !GRKERNSEC_HIDESYM
69808 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
69809 select KALLSYMS
69810 select KALLSYMS_ALL
69811diff --git a/lib/bitmap.c b/lib/bitmap.c
69812index b5a8b6a..a69623c 100644
69813--- a/lib/bitmap.c
69814+++ b/lib/bitmap.c
69815@@ -421,7 +421,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
69816 {
69817 int c, old_c, totaldigits, ndigits, nchunks, nbits;
69818 u32 chunk;
69819- const char __user __force *ubuf = (const char __user __force *)buf;
69820+ const char __user *ubuf = (const char __force_user *)buf;
69821
69822 bitmap_zero(maskp, nmaskbits);
69823
69824@@ -506,7 +506,7 @@ int bitmap_parse_user(const char __user *ubuf,
69825 {
69826 if (!access_ok(VERIFY_READ, ubuf, ulen))
69827 return -EFAULT;
69828- return __bitmap_parse((const char __force *)ubuf,
69829+ return __bitmap_parse((const char __force_kernel *)ubuf,
69830 ulen, 1, maskp, nmaskbits);
69831
69832 }
69833@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
69834 {
69835 unsigned a, b;
69836 int c, old_c, totaldigits;
69837- const char __user __force *ubuf = (const char __user __force *)buf;
69838+ const char __user *ubuf = (const char __force_user *)buf;
69839 int exp_digit, in_range;
69840
69841 totaldigits = c = 0;
69842@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
69843 {
69844 if (!access_ok(VERIFY_READ, ubuf, ulen))
69845 return -EFAULT;
69846- return __bitmap_parselist((const char __force *)ubuf,
69847+ return __bitmap_parselist((const char __force_kernel *)ubuf,
69848 ulen, 1, maskp, nmaskbits);
69849 }
69850 EXPORT_SYMBOL(bitmap_parselist_user);
69851diff --git a/lib/bug.c b/lib/bug.c
69852index a28c141..2bd3d95 100644
69853--- a/lib/bug.c
69854+++ b/lib/bug.c
69855@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
69856 return BUG_TRAP_TYPE_NONE;
69857
69858 bug = find_bug(bugaddr);
69859+ if (!bug)
69860+ return BUG_TRAP_TYPE_NONE;
69861
69862 file = NULL;
69863 line = 0;
69864diff --git a/lib/debugobjects.c b/lib/debugobjects.c
69865index 0ab9ae8..f01ceca 100644
69866--- a/lib/debugobjects.c
69867+++ b/lib/debugobjects.c
69868@@ -288,7 +288,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
69869 if (limit > 4)
69870 return;
69871
69872- is_on_stack = object_is_on_stack(addr);
69873+ is_on_stack = object_starts_on_stack(addr);
69874 if (is_on_stack == onstack)
69875 return;
69876
69877diff --git a/lib/devres.c b/lib/devres.c
69878index 80b9c76..9e32279 100644
69879--- a/lib/devres.c
69880+++ b/lib/devres.c
69881@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
69882 void devm_iounmap(struct device *dev, void __iomem *addr)
69883 {
69884 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
69885- (void *)addr));
69886+ (void __force *)addr));
69887 iounmap(addr);
69888 }
69889 EXPORT_SYMBOL(devm_iounmap);
69890@@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
69891 {
69892 ioport_unmap(addr);
69893 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
69894- devm_ioport_map_match, (void *)addr));
69895+ devm_ioport_map_match, (void __force *)addr));
69896 }
69897 EXPORT_SYMBOL(devm_ioport_unmap);
69898
69899diff --git a/lib/dma-debug.c b/lib/dma-debug.c
69900index 13ef233..5241683 100644
69901--- a/lib/dma-debug.c
69902+++ b/lib/dma-debug.c
69903@@ -924,7 +924,7 @@ out:
69904
69905 static void check_for_stack(struct device *dev, void *addr)
69906 {
69907- if (object_is_on_stack(addr))
69908+ if (object_starts_on_stack(addr))
69909 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
69910 "stack [addr=%p]\n", addr);
69911 }
69912diff --git a/lib/extable.c b/lib/extable.c
69913index 4cac81e..63e9b8f 100644
69914--- a/lib/extable.c
69915+++ b/lib/extable.c
69916@@ -13,6 +13,7 @@
69917 #include <linux/init.h>
69918 #include <linux/sort.h>
69919 #include <asm/uaccess.h>
69920+#include <asm/pgtable.h>
69921
69922 #ifndef ARCH_HAS_SORT_EXTABLE
69923 /*
69924@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
69925 void sort_extable(struct exception_table_entry *start,
69926 struct exception_table_entry *finish)
69927 {
69928+ pax_open_kernel();
69929 sort(start, finish - start, sizeof(struct exception_table_entry),
69930 cmp_ex, NULL);
69931+ pax_close_kernel();
69932 }
69933
69934 #ifdef CONFIG_MODULES
69935diff --git a/lib/inflate.c b/lib/inflate.c
69936index 013a761..c28f3fc 100644
69937--- a/lib/inflate.c
69938+++ b/lib/inflate.c
69939@@ -269,7 +269,7 @@ static void free(void *where)
69940 malloc_ptr = free_mem_ptr;
69941 }
69942 #else
69943-#define malloc(a) kmalloc(a, GFP_KERNEL)
69944+#define malloc(a) kmalloc((a), GFP_KERNEL)
69945 #define free(a) kfree(a)
69946 #endif
69947
69948diff --git a/lib/ioremap.c b/lib/ioremap.c
69949index 0c9216c..863bd89 100644
69950--- a/lib/ioremap.c
69951+++ b/lib/ioremap.c
69952@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
69953 unsigned long next;
69954
69955 phys_addr -= addr;
69956- pmd = pmd_alloc(&init_mm, pud, addr);
69957+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
69958 if (!pmd)
69959 return -ENOMEM;
69960 do {
69961@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
69962 unsigned long next;
69963
69964 phys_addr -= addr;
69965- pud = pud_alloc(&init_mm, pgd, addr);
69966+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
69967 if (!pud)
69968 return -ENOMEM;
69969 do {
69970diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
69971index bd2bea9..6b3c95e 100644
69972--- a/lib/is_single_threaded.c
69973+++ b/lib/is_single_threaded.c
69974@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
69975 struct task_struct *p, *t;
69976 bool ret;
69977
69978+ if (!mm)
69979+ return true;
69980+
69981 if (atomic_read(&task->signal->live) != 1)
69982 return false;
69983
69984diff --git a/lib/radix-tree.c b/lib/radix-tree.c
69985index 3ac50dc..240bb7e 100644
69986--- a/lib/radix-tree.c
69987+++ b/lib/radix-tree.c
69988@@ -79,7 +79,7 @@ struct radix_tree_preload {
69989 int nr;
69990 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
69991 };
69992-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
69993+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
69994
69995 static inline void *ptr_to_indirect(void *ptr)
69996 {
69997diff --git a/lib/vsprintf.c b/lib/vsprintf.c
69998index abbabec..d5eba6c 100644
69999--- a/lib/vsprintf.c
70000+++ b/lib/vsprintf.c
70001@@ -16,6 +16,9 @@
70002 * - scnprintf and vscnprintf
70003 */
70004
70005+#ifdef CONFIG_GRKERNSEC_HIDESYM
70006+#define __INCLUDED_BY_HIDESYM 1
70007+#endif
70008 #include <stdarg.h>
70009 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
70010 #include <linux/types.h>
70011@@ -433,7 +436,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
70012 char sym[KSYM_SYMBOL_LEN];
70013 if (ext == 'B')
70014 sprint_backtrace(sym, value);
70015- else if (ext != 'f' && ext != 's')
70016+ else if (ext != 'f' && ext != 's' && ext != 'a')
70017 sprint_symbol(sym, value);
70018 else
70019 kallsyms_lookup(value, NULL, NULL, NULL, sym);
70020@@ -809,7 +812,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
70021 return number(buf, end, *(const netdev_features_t *)addr, spec);
70022 }
70023
70024+#ifdef CONFIG_GRKERNSEC_HIDESYM
70025+int kptr_restrict __read_mostly = 2;
70026+#else
70027 int kptr_restrict __read_mostly;
70028+#endif
70029
70030 /*
70031 * Show a '%p' thing. A kernel extension is that the '%p' is followed
70032@@ -823,6 +830,8 @@ int kptr_restrict __read_mostly;
70033 * - 'S' For symbolic direct pointers with offset
70034 * - 's' For symbolic direct pointers without offset
70035 * - 'B' For backtraced symbolic direct pointers with offset
70036+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
70037+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
70038 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
70039 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
70040 * - 'M' For a 6-byte MAC address, it prints the address in the
70041@@ -866,14 +875,25 @@ static noinline_for_stack
70042 char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70043 struct printf_spec spec)
70044 {
70045+#ifdef CONFIG_GRKERNSEC_HIDESYM
70046+ /* 'P' = approved pointers to copy to userland,
70047+ as in the /proc/kallsyms case, as we make it display nothing
70048+ for non-root users, and the real contents for root users
70049+ */
70050+ if (ptr > TASK_SIZE && *fmt != 'P' && is_usercopy_object(buf)) {
70051+ ptr = NULL;
70052+ goto simple;
70053+ }
70054+#endif
70055+
70056 if (!ptr && *fmt != 'K') {
70057 /*
70058- * Print (null) with the same width as a pointer so it makes
70059+ * Print (nil) with the same width as a pointer so it makes
70060 * tabular output look nice.
70061 */
70062 if (spec.field_width == -1)
70063 spec.field_width = 2 * sizeof(void *);
70064- return string(buf, end, "(null)", spec);
70065+ return string(buf, end, "(nil)", spec);
70066 }
70067
70068 switch (*fmt) {
70069@@ -883,6 +903,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70070 /* Fallthrough */
70071 case 'S':
70072 case 's':
70073+#ifdef CONFIG_GRKERNSEC_HIDESYM
70074+ break;
70075+#else
70076+ return symbol_string(buf, end, ptr, spec, *fmt);
70077+#endif
70078+ case 'A':
70079+ case 'a':
70080 case 'B':
70081 return symbol_string(buf, end, ptr, spec, *fmt);
70082 case 'R':
70083@@ -920,6 +947,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70084 va_end(va);
70085 return buf;
70086 }
70087+ case 'P':
70088+ break;
70089 case 'K':
70090 /*
70091 * %pK cannot be used in IRQ context because its test
70092@@ -942,6 +971,9 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70093 }
70094 break;
70095 }
70096+#ifdef CONFIG_GRKERNSEC_HIDESYM
70097+simple:
70098+#endif
70099 spec.flags |= SMALL;
70100 if (spec.field_width == -1) {
70101 spec.field_width = 2 * sizeof(void *);
70102@@ -1653,11 +1685,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
70103 typeof(type) value; \
70104 if (sizeof(type) == 8) { \
70105 args = PTR_ALIGN(args, sizeof(u32)); \
70106- *(u32 *)&value = *(u32 *)args; \
70107- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
70108+ *(u32 *)&value = *(const u32 *)args; \
70109+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
70110 } else { \
70111 args = PTR_ALIGN(args, sizeof(type)); \
70112- value = *(typeof(type) *)args; \
70113+ value = *(const typeof(type) *)args; \
70114 } \
70115 args += sizeof(type); \
70116 value; \
70117@@ -1720,7 +1752,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
70118 case FORMAT_TYPE_STR: {
70119 const char *str_arg = args;
70120 args += strlen(str_arg) + 1;
70121- str = string(str, end, (char *)str_arg, spec);
70122+ str = string(str, end, str_arg, spec);
70123 break;
70124 }
70125
70126diff --git a/localversion-grsec b/localversion-grsec
70127new file mode 100644
70128index 0000000..7cd6065
70129--- /dev/null
70130+++ b/localversion-grsec
70131@@ -0,0 +1 @@
70132+-grsec
70133diff --git a/mm/Kconfig b/mm/Kconfig
70134index e338407..4210331 100644
70135--- a/mm/Kconfig
70136+++ b/mm/Kconfig
70137@@ -247,10 +247,10 @@ config KSM
70138 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
70139
70140 config DEFAULT_MMAP_MIN_ADDR
70141- int "Low address space to protect from user allocation"
70142+ int "Low address space to protect from user allocation"
70143 depends on MMU
70144- default 4096
70145- help
70146+ default 65536
70147+ help
70148 This is the portion of low virtual memory which should be protected
70149 from userspace allocation. Keeping a user from writing to low pages
70150 can help reduce the impact of kernel NULL pointer bugs.
70151@@ -280,7 +280,7 @@ config MEMORY_FAILURE
70152
70153 config HWPOISON_INJECT
70154 tristate "HWPoison pages injector"
70155- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
70156+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
70157 select PROC_PAGE_MONITOR
70158
70159 config NOMMU_INITIAL_TRIM_EXCESS
70160diff --git a/mm/filemap.c b/mm/filemap.c
70161index 79c4b2b..596b417 100644
70162--- a/mm/filemap.c
70163+++ b/mm/filemap.c
70164@@ -1762,7 +1762,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
70165 struct address_space *mapping = file->f_mapping;
70166
70167 if (!mapping->a_ops->readpage)
70168- return -ENOEXEC;
70169+ return -ENODEV;
70170 file_accessed(file);
70171 vma->vm_ops = &generic_file_vm_ops;
70172 vma->vm_flags |= VM_CAN_NONLINEAR;
70173@@ -2168,6 +2168,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
70174 *pos = i_size_read(inode);
70175
70176 if (limit != RLIM_INFINITY) {
70177+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
70178 if (*pos >= limit) {
70179 send_sig(SIGXFSZ, current, 0);
70180 return -EFBIG;
70181diff --git a/mm/fremap.c b/mm/fremap.c
70182index 9ed4fd4..c42648d 100644
70183--- a/mm/fremap.c
70184+++ b/mm/fremap.c
70185@@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
70186 retry:
70187 vma = find_vma(mm, start);
70188
70189+#ifdef CONFIG_PAX_SEGMEXEC
70190+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
70191+ goto out;
70192+#endif
70193+
70194 /*
70195 * Make sure the vma is shared, that it supports prefaulting,
70196 * and that the remapped range is valid and fully within
70197diff --git a/mm/highmem.c b/mm/highmem.c
70198index 57d82c6..e9e0552 100644
70199--- a/mm/highmem.c
70200+++ b/mm/highmem.c
70201@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
70202 * So no dangers, even with speculative execution.
70203 */
70204 page = pte_page(pkmap_page_table[i]);
70205+ pax_open_kernel();
70206 pte_clear(&init_mm, (unsigned long)page_address(page),
70207 &pkmap_page_table[i]);
70208-
70209+ pax_close_kernel();
70210 set_page_address(page, NULL);
70211 need_flush = 1;
70212 }
70213@@ -186,9 +187,11 @@ start:
70214 }
70215 }
70216 vaddr = PKMAP_ADDR(last_pkmap_nr);
70217+
70218+ pax_open_kernel();
70219 set_pte_at(&init_mm, vaddr,
70220 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
70221-
70222+ pax_close_kernel();
70223 pkmap_count[last_pkmap_nr] = 1;
70224 set_page_address(page, (void *)vaddr);
70225
70226diff --git a/mm/huge_memory.c b/mm/huge_memory.c
70227index f0e5306..cb9398e 100644
70228--- a/mm/huge_memory.c
70229+++ b/mm/huge_memory.c
70230@@ -733,7 +733,7 @@ out:
70231 * run pte_offset_map on the pmd, if an huge pmd could
70232 * materialize from under us from a different thread.
70233 */
70234- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
70235+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
70236 return VM_FAULT_OOM;
70237 /* if an huge pmd materialized from under us just retry later */
70238 if (unlikely(pmd_trans_huge(*pmd)))
70239diff --git a/mm/hugetlb.c b/mm/hugetlb.c
70240index 263e177..3f36aec 100644
70241--- a/mm/hugetlb.c
70242+++ b/mm/hugetlb.c
70243@@ -2446,6 +2446,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
70244 return 1;
70245 }
70246
70247+#ifdef CONFIG_PAX_SEGMEXEC
70248+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
70249+{
70250+ struct mm_struct *mm = vma->vm_mm;
70251+ struct vm_area_struct *vma_m;
70252+ unsigned long address_m;
70253+ pte_t *ptep_m;
70254+
70255+ vma_m = pax_find_mirror_vma(vma);
70256+ if (!vma_m)
70257+ return;
70258+
70259+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70260+ address_m = address + SEGMEXEC_TASK_SIZE;
70261+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
70262+ get_page(page_m);
70263+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
70264+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
70265+}
70266+#endif
70267+
70268 /*
70269 * Hugetlb_cow() should be called with page lock of the original hugepage held.
70270 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
70271@@ -2558,6 +2579,11 @@ retry_avoidcopy:
70272 make_huge_pte(vma, new_page, 1));
70273 page_remove_rmap(old_page);
70274 hugepage_add_new_anon_rmap(new_page, vma, address);
70275+
70276+#ifdef CONFIG_PAX_SEGMEXEC
70277+ pax_mirror_huge_pte(vma, address, new_page);
70278+#endif
70279+
70280 /* Make the old page be freed below */
70281 new_page = old_page;
70282 mmu_notifier_invalidate_range_end(mm,
70283@@ -2712,6 +2738,10 @@ retry:
70284 && (vma->vm_flags & VM_SHARED)));
70285 set_huge_pte_at(mm, address, ptep, new_pte);
70286
70287+#ifdef CONFIG_PAX_SEGMEXEC
70288+ pax_mirror_huge_pte(vma, address, page);
70289+#endif
70290+
70291 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
70292 /* Optimization, do the COW without a second fault */
70293 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
70294@@ -2741,6 +2771,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70295 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
70296 struct hstate *h = hstate_vma(vma);
70297
70298+#ifdef CONFIG_PAX_SEGMEXEC
70299+ struct vm_area_struct *vma_m;
70300+#endif
70301+
70302 address &= huge_page_mask(h);
70303
70304 ptep = huge_pte_offset(mm, address);
70305@@ -2754,6 +2788,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70306 VM_FAULT_SET_HINDEX(h - hstates);
70307 }
70308
70309+#ifdef CONFIG_PAX_SEGMEXEC
70310+ vma_m = pax_find_mirror_vma(vma);
70311+ if (vma_m) {
70312+ unsigned long address_m;
70313+
70314+ if (vma->vm_start > vma_m->vm_start) {
70315+ address_m = address;
70316+ address -= SEGMEXEC_TASK_SIZE;
70317+ vma = vma_m;
70318+ h = hstate_vma(vma);
70319+ } else
70320+ address_m = address + SEGMEXEC_TASK_SIZE;
70321+
70322+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
70323+ return VM_FAULT_OOM;
70324+ address_m &= HPAGE_MASK;
70325+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
70326+ }
70327+#endif
70328+
70329 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
70330 if (!ptep)
70331 return VM_FAULT_OOM;
70332diff --git a/mm/internal.h b/mm/internal.h
70333index 2189af4..f2ca332 100644
70334--- a/mm/internal.h
70335+++ b/mm/internal.h
70336@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
70337 * in mm/page_alloc.c
70338 */
70339 extern void __free_pages_bootmem(struct page *page, unsigned int order);
70340+extern void free_compound_page(struct page *page);
70341 extern void prep_compound_page(struct page *page, unsigned long order);
70342 #ifdef CONFIG_MEMORY_FAILURE
70343 extern bool is_free_buddy_page(struct page *page);
70344diff --git a/mm/kmemleak.c b/mm/kmemleak.c
70345index 45eb621..6ccd8ea 100644
70346--- a/mm/kmemleak.c
70347+++ b/mm/kmemleak.c
70348@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
70349
70350 for (i = 0; i < object->trace_len; i++) {
70351 void *ptr = (void *)object->trace[i];
70352- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
70353+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
70354 }
70355 }
70356
70357diff --git a/mm/maccess.c b/mm/maccess.c
70358index d53adf9..03a24bf 100644
70359--- a/mm/maccess.c
70360+++ b/mm/maccess.c
70361@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
70362 set_fs(KERNEL_DS);
70363 pagefault_disable();
70364 ret = __copy_from_user_inatomic(dst,
70365- (__force const void __user *)src, size);
70366+ (const void __force_user *)src, size);
70367 pagefault_enable();
70368 set_fs(old_fs);
70369
70370@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
70371
70372 set_fs(KERNEL_DS);
70373 pagefault_disable();
70374- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
70375+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
70376 pagefault_enable();
70377 set_fs(old_fs);
70378
70379diff --git a/mm/madvise.c b/mm/madvise.c
70380index 55f645c..cde5320 100644
70381--- a/mm/madvise.c
70382+++ b/mm/madvise.c
70383@@ -46,6 +46,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
70384 pgoff_t pgoff;
70385 unsigned long new_flags = vma->vm_flags;
70386
70387+#ifdef CONFIG_PAX_SEGMEXEC
70388+ struct vm_area_struct *vma_m;
70389+#endif
70390+
70391 switch (behavior) {
70392 case MADV_NORMAL:
70393 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
70394@@ -117,6 +121,13 @@ success:
70395 /*
70396 * vm_flags is protected by the mmap_sem held in write mode.
70397 */
70398+
70399+#ifdef CONFIG_PAX_SEGMEXEC
70400+ vma_m = pax_find_mirror_vma(vma);
70401+ if (vma_m)
70402+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
70403+#endif
70404+
70405 vma->vm_flags = new_flags;
70406
70407 out:
70408@@ -175,6 +186,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
70409 struct vm_area_struct ** prev,
70410 unsigned long start, unsigned long end)
70411 {
70412+
70413+#ifdef CONFIG_PAX_SEGMEXEC
70414+ struct vm_area_struct *vma_m;
70415+#endif
70416+
70417 *prev = vma;
70418 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
70419 return -EINVAL;
70420@@ -187,6 +203,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
70421 zap_page_range(vma, start, end - start, &details);
70422 } else
70423 zap_page_range(vma, start, end - start, NULL);
70424+
70425+#ifdef CONFIG_PAX_SEGMEXEC
70426+ vma_m = pax_find_mirror_vma(vma);
70427+ if (vma_m) {
70428+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
70429+ struct zap_details details = {
70430+ .nonlinear_vma = vma_m,
70431+ .last_index = ULONG_MAX,
70432+ };
70433+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
70434+ } else
70435+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
70436+ }
70437+#endif
70438+
70439 return 0;
70440 }
70441
70442@@ -394,6 +425,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
70443 if (end < start)
70444 goto out;
70445
70446+#ifdef CONFIG_PAX_SEGMEXEC
70447+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70448+ if (end > SEGMEXEC_TASK_SIZE)
70449+ goto out;
70450+ } else
70451+#endif
70452+
70453+ if (end > TASK_SIZE)
70454+ goto out;
70455+
70456 error = 0;
70457 if (end == start)
70458 goto out;
70459diff --git a/mm/memory-failure.c b/mm/memory-failure.c
70460index 97cc273..6ed703f 100644
70461--- a/mm/memory-failure.c
70462+++ b/mm/memory-failure.c
70463@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
70464
70465 int sysctl_memory_failure_recovery __read_mostly = 1;
70466
70467-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70468+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70469
70470 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
70471
70472@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
70473 pfn, t->comm, t->pid);
70474 si.si_signo = SIGBUS;
70475 si.si_errno = 0;
70476- si.si_addr = (void *)addr;
70477+ si.si_addr = (void __user *)addr;
70478 #ifdef __ARCH_SI_TRAPNO
70479 si.si_trapno = trapno;
70480 #endif
70481@@ -1036,7 +1036,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
70482 }
70483
70484 nr_pages = 1 << compound_trans_order(hpage);
70485- atomic_long_add(nr_pages, &mce_bad_pages);
70486+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
70487
70488 /*
70489 * We need/can do nothing about count=0 pages.
70490@@ -1066,7 +1066,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
70491 if (!PageHWPoison(hpage)
70492 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
70493 || (p != hpage && TestSetPageHWPoison(hpage))) {
70494- atomic_long_sub(nr_pages, &mce_bad_pages);
70495+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70496 return 0;
70497 }
70498 set_page_hwpoison_huge_page(hpage);
70499@@ -1124,7 +1124,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
70500 }
70501 if (hwpoison_filter(p)) {
70502 if (TestClearPageHWPoison(p))
70503- atomic_long_sub(nr_pages, &mce_bad_pages);
70504+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70505 unlock_page(hpage);
70506 put_page(hpage);
70507 return 0;
70508@@ -1319,7 +1319,7 @@ int unpoison_memory(unsigned long pfn)
70509 return 0;
70510 }
70511 if (TestClearPageHWPoison(p))
70512- atomic_long_sub(nr_pages, &mce_bad_pages);
70513+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70514 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
70515 return 0;
70516 }
70517@@ -1333,7 +1333,7 @@ int unpoison_memory(unsigned long pfn)
70518 */
70519 if (TestClearPageHWPoison(page)) {
70520 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
70521- atomic_long_sub(nr_pages, &mce_bad_pages);
70522+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70523 freeit = 1;
70524 if (PageHuge(page))
70525 clear_page_hwpoison_huge_page(page);
70526@@ -1446,7 +1446,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
70527 }
70528 done:
70529 if (!PageHWPoison(hpage))
70530- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
70531+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
70532 set_page_hwpoison_huge_page(hpage);
70533 dequeue_hwpoisoned_huge_page(hpage);
70534 /* keep elevated page count for bad page */
70535@@ -1577,7 +1577,7 @@ int soft_offline_page(struct page *page, int flags)
70536 return ret;
70537
70538 done:
70539- atomic_long_add(1, &mce_bad_pages);
70540+ atomic_long_add_unchecked(1, &mce_bad_pages);
70541 SetPageHWPoison(page);
70542 /* keep elevated page count for bad page */
70543 return ret;
70544diff --git a/mm/memory.c b/mm/memory.c
70545index 6105f47..3363489 100644
70546--- a/mm/memory.c
70547+++ b/mm/memory.c
70548@@ -434,8 +434,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
70549 return;
70550
70551 pmd = pmd_offset(pud, start);
70552+
70553+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
70554 pud_clear(pud);
70555 pmd_free_tlb(tlb, pmd, start);
70556+#endif
70557+
70558 }
70559
70560 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
70561@@ -466,9 +470,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
70562 if (end - 1 > ceiling - 1)
70563 return;
70564
70565+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
70566 pud = pud_offset(pgd, start);
70567 pgd_clear(pgd);
70568 pud_free_tlb(tlb, pud, start);
70569+#endif
70570+
70571 }
70572
70573 /*
70574@@ -1597,12 +1604,6 @@ no_page_table:
70575 return page;
70576 }
70577
70578-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
70579-{
70580- return stack_guard_page_start(vma, addr) ||
70581- stack_guard_page_end(vma, addr+PAGE_SIZE);
70582-}
70583-
70584 /**
70585 * __get_user_pages() - pin user pages in memory
70586 * @tsk: task_struct of target task
70587@@ -1675,10 +1676,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70588 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
70589 i = 0;
70590
70591- do {
70592+ while (nr_pages) {
70593 struct vm_area_struct *vma;
70594
70595- vma = find_extend_vma(mm, start);
70596+ vma = find_vma(mm, start);
70597 if (!vma && in_gate_area(mm, start)) {
70598 unsigned long pg = start & PAGE_MASK;
70599 pgd_t *pgd;
70600@@ -1726,7 +1727,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70601 goto next_page;
70602 }
70603
70604- if (!vma ||
70605+ if (!vma || start < vma->vm_start ||
70606 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
70607 !(vm_flags & vma->vm_flags))
70608 return i ? : -EFAULT;
70609@@ -1753,11 +1754,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70610 int ret;
70611 unsigned int fault_flags = 0;
70612
70613- /* For mlock, just skip the stack guard page. */
70614- if (foll_flags & FOLL_MLOCK) {
70615- if (stack_guard_page(vma, start))
70616- goto next_page;
70617- }
70618 if (foll_flags & FOLL_WRITE)
70619 fault_flags |= FAULT_FLAG_WRITE;
70620 if (nonblocking)
70621@@ -1831,7 +1827,7 @@ next_page:
70622 start += PAGE_SIZE;
70623 nr_pages--;
70624 } while (nr_pages && start < vma->vm_end);
70625- } while (nr_pages);
70626+ }
70627 return i;
70628 }
70629 EXPORT_SYMBOL(__get_user_pages);
70630@@ -2038,6 +2034,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
70631 page_add_file_rmap(page);
70632 set_pte_at(mm, addr, pte, mk_pte(page, prot));
70633
70634+#ifdef CONFIG_PAX_SEGMEXEC
70635+ pax_mirror_file_pte(vma, addr, page, ptl);
70636+#endif
70637+
70638 retval = 0;
70639 pte_unmap_unlock(pte, ptl);
70640 return retval;
70641@@ -2072,10 +2072,22 @@ out:
70642 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
70643 struct page *page)
70644 {
70645+
70646+#ifdef CONFIG_PAX_SEGMEXEC
70647+ struct vm_area_struct *vma_m;
70648+#endif
70649+
70650 if (addr < vma->vm_start || addr >= vma->vm_end)
70651 return -EFAULT;
70652 if (!page_count(page))
70653 return -EINVAL;
70654+
70655+#ifdef CONFIG_PAX_SEGMEXEC
70656+ vma_m = pax_find_mirror_vma(vma);
70657+ if (vma_m)
70658+ vma_m->vm_flags |= VM_INSERTPAGE;
70659+#endif
70660+
70661 vma->vm_flags |= VM_INSERTPAGE;
70662 return insert_page(vma, addr, page, vma->vm_page_prot);
70663 }
70664@@ -2161,6 +2173,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
70665 unsigned long pfn)
70666 {
70667 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
70668+ BUG_ON(vma->vm_mirror);
70669
70670 if (addr < vma->vm_start || addr >= vma->vm_end)
70671 return -EFAULT;
70672@@ -2368,7 +2381,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
70673
70674 BUG_ON(pud_huge(*pud));
70675
70676- pmd = pmd_alloc(mm, pud, addr);
70677+ pmd = (mm == &init_mm) ?
70678+ pmd_alloc_kernel(mm, pud, addr) :
70679+ pmd_alloc(mm, pud, addr);
70680 if (!pmd)
70681 return -ENOMEM;
70682 do {
70683@@ -2388,7 +2403,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
70684 unsigned long next;
70685 int err;
70686
70687- pud = pud_alloc(mm, pgd, addr);
70688+ pud = (mm == &init_mm) ?
70689+ pud_alloc_kernel(mm, pgd, addr) :
70690+ pud_alloc(mm, pgd, addr);
70691 if (!pud)
70692 return -ENOMEM;
70693 do {
70694@@ -2476,6 +2493,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
70695 copy_user_highpage(dst, src, va, vma);
70696 }
70697
70698+#ifdef CONFIG_PAX_SEGMEXEC
70699+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
70700+{
70701+ struct mm_struct *mm = vma->vm_mm;
70702+ spinlock_t *ptl;
70703+ pte_t *pte, entry;
70704+
70705+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
70706+ entry = *pte;
70707+ if (!pte_present(entry)) {
70708+ if (!pte_none(entry)) {
70709+ BUG_ON(pte_file(entry));
70710+ free_swap_and_cache(pte_to_swp_entry(entry));
70711+ pte_clear_not_present_full(mm, address, pte, 0);
70712+ }
70713+ } else {
70714+ struct page *page;
70715+
70716+ flush_cache_page(vma, address, pte_pfn(entry));
70717+ entry = ptep_clear_flush(vma, address, pte);
70718+ BUG_ON(pte_dirty(entry));
70719+ page = vm_normal_page(vma, address, entry);
70720+ if (page) {
70721+ update_hiwater_rss(mm);
70722+ if (PageAnon(page))
70723+ dec_mm_counter_fast(mm, MM_ANONPAGES);
70724+ else
70725+ dec_mm_counter_fast(mm, MM_FILEPAGES);
70726+ page_remove_rmap(page);
70727+ page_cache_release(page);
70728+ }
70729+ }
70730+ pte_unmap_unlock(pte, ptl);
70731+}
70732+
70733+/* PaX: if vma is mirrored, synchronize the mirror's PTE
70734+ *
70735+ * the ptl of the lower mapped page is held on entry and is not released on exit
70736+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
70737+ */
70738+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70739+{
70740+ struct mm_struct *mm = vma->vm_mm;
70741+ unsigned long address_m;
70742+ spinlock_t *ptl_m;
70743+ struct vm_area_struct *vma_m;
70744+ pmd_t *pmd_m;
70745+ pte_t *pte_m, entry_m;
70746+
70747+ BUG_ON(!page_m || !PageAnon(page_m));
70748+
70749+ vma_m = pax_find_mirror_vma(vma);
70750+ if (!vma_m)
70751+ return;
70752+
70753+ BUG_ON(!PageLocked(page_m));
70754+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70755+ address_m = address + SEGMEXEC_TASK_SIZE;
70756+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70757+ pte_m = pte_offset_map(pmd_m, address_m);
70758+ ptl_m = pte_lockptr(mm, pmd_m);
70759+ if (ptl != ptl_m) {
70760+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70761+ if (!pte_none(*pte_m))
70762+ goto out;
70763+ }
70764+
70765+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70766+ page_cache_get(page_m);
70767+ page_add_anon_rmap(page_m, vma_m, address_m);
70768+ inc_mm_counter_fast(mm, MM_ANONPAGES);
70769+ set_pte_at(mm, address_m, pte_m, entry_m);
70770+ update_mmu_cache(vma_m, address_m, entry_m);
70771+out:
70772+ if (ptl != ptl_m)
70773+ spin_unlock(ptl_m);
70774+ pte_unmap(pte_m);
70775+ unlock_page(page_m);
70776+}
70777+
70778+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70779+{
70780+ struct mm_struct *mm = vma->vm_mm;
70781+ unsigned long address_m;
70782+ spinlock_t *ptl_m;
70783+ struct vm_area_struct *vma_m;
70784+ pmd_t *pmd_m;
70785+ pte_t *pte_m, entry_m;
70786+
70787+ BUG_ON(!page_m || PageAnon(page_m));
70788+
70789+ vma_m = pax_find_mirror_vma(vma);
70790+ if (!vma_m)
70791+ return;
70792+
70793+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70794+ address_m = address + SEGMEXEC_TASK_SIZE;
70795+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70796+ pte_m = pte_offset_map(pmd_m, address_m);
70797+ ptl_m = pte_lockptr(mm, pmd_m);
70798+ if (ptl != ptl_m) {
70799+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70800+ if (!pte_none(*pte_m))
70801+ goto out;
70802+ }
70803+
70804+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70805+ page_cache_get(page_m);
70806+ page_add_file_rmap(page_m);
70807+ inc_mm_counter_fast(mm, MM_FILEPAGES);
70808+ set_pte_at(mm, address_m, pte_m, entry_m);
70809+ update_mmu_cache(vma_m, address_m, entry_m);
70810+out:
70811+ if (ptl != ptl_m)
70812+ spin_unlock(ptl_m);
70813+ pte_unmap(pte_m);
70814+}
70815+
70816+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
70817+{
70818+ struct mm_struct *mm = vma->vm_mm;
70819+ unsigned long address_m;
70820+ spinlock_t *ptl_m;
70821+ struct vm_area_struct *vma_m;
70822+ pmd_t *pmd_m;
70823+ pte_t *pte_m, entry_m;
70824+
70825+ vma_m = pax_find_mirror_vma(vma);
70826+ if (!vma_m)
70827+ return;
70828+
70829+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70830+ address_m = address + SEGMEXEC_TASK_SIZE;
70831+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70832+ pte_m = pte_offset_map(pmd_m, address_m);
70833+ ptl_m = pte_lockptr(mm, pmd_m);
70834+ if (ptl != ptl_m) {
70835+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70836+ if (!pte_none(*pte_m))
70837+ goto out;
70838+ }
70839+
70840+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
70841+ set_pte_at(mm, address_m, pte_m, entry_m);
70842+out:
70843+ if (ptl != ptl_m)
70844+ spin_unlock(ptl_m);
70845+ pte_unmap(pte_m);
70846+}
70847+
70848+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
70849+{
70850+ struct page *page_m;
70851+ pte_t entry;
70852+
70853+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
70854+ goto out;
70855+
70856+ entry = *pte;
70857+ page_m = vm_normal_page(vma, address, entry);
70858+ if (!page_m)
70859+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
70860+ else if (PageAnon(page_m)) {
70861+ if (pax_find_mirror_vma(vma)) {
70862+ pte_unmap_unlock(pte, ptl);
70863+ lock_page(page_m);
70864+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
70865+ if (pte_same(entry, *pte))
70866+ pax_mirror_anon_pte(vma, address, page_m, ptl);
70867+ else
70868+ unlock_page(page_m);
70869+ }
70870+ } else
70871+ pax_mirror_file_pte(vma, address, page_m, ptl);
70872+
70873+out:
70874+ pte_unmap_unlock(pte, ptl);
70875+}
70876+#endif
70877+
70878 /*
70879 * This routine handles present pages, when users try to write
70880 * to a shared page. It is done by copying the page to a new address
70881@@ -2687,6 +2884,12 @@ gotten:
70882 */
70883 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70884 if (likely(pte_same(*page_table, orig_pte))) {
70885+
70886+#ifdef CONFIG_PAX_SEGMEXEC
70887+ if (pax_find_mirror_vma(vma))
70888+ BUG_ON(!trylock_page(new_page));
70889+#endif
70890+
70891 if (old_page) {
70892 if (!PageAnon(old_page)) {
70893 dec_mm_counter_fast(mm, MM_FILEPAGES);
70894@@ -2738,6 +2941,10 @@ gotten:
70895 page_remove_rmap(old_page);
70896 }
70897
70898+#ifdef CONFIG_PAX_SEGMEXEC
70899+ pax_mirror_anon_pte(vma, address, new_page, ptl);
70900+#endif
70901+
70902 /* Free the old page.. */
70903 new_page = old_page;
70904 ret |= VM_FAULT_WRITE;
70905@@ -3017,6 +3224,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70906 swap_free(entry);
70907 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
70908 try_to_free_swap(page);
70909+
70910+#ifdef CONFIG_PAX_SEGMEXEC
70911+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
70912+#endif
70913+
70914 unlock_page(page);
70915 if (swapcache) {
70916 /*
70917@@ -3040,6 +3252,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70918
70919 /* No need to invalidate - it was non-present before */
70920 update_mmu_cache(vma, address, page_table);
70921+
70922+#ifdef CONFIG_PAX_SEGMEXEC
70923+ pax_mirror_anon_pte(vma, address, page, ptl);
70924+#endif
70925+
70926 unlock:
70927 pte_unmap_unlock(page_table, ptl);
70928 out:
70929@@ -3059,40 +3276,6 @@ out_release:
70930 }
70931
70932 /*
70933- * This is like a special single-page "expand_{down|up}wards()",
70934- * except we must first make sure that 'address{-|+}PAGE_SIZE'
70935- * doesn't hit another vma.
70936- */
70937-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
70938-{
70939- address &= PAGE_MASK;
70940- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
70941- struct vm_area_struct *prev = vma->vm_prev;
70942-
70943- /*
70944- * Is there a mapping abutting this one below?
70945- *
70946- * That's only ok if it's the same stack mapping
70947- * that has gotten split..
70948- */
70949- if (prev && prev->vm_end == address)
70950- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
70951-
70952- expand_downwards(vma, address - PAGE_SIZE);
70953- }
70954- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
70955- struct vm_area_struct *next = vma->vm_next;
70956-
70957- /* As VM_GROWSDOWN but s/below/above/ */
70958- if (next && next->vm_start == address + PAGE_SIZE)
70959- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
70960-
70961- expand_upwards(vma, address + PAGE_SIZE);
70962- }
70963- return 0;
70964-}
70965-
70966-/*
70967 * We enter with non-exclusive mmap_sem (to exclude vma changes,
70968 * but allow concurrent faults), and pte mapped but not yet locked.
70969 * We return with mmap_sem still held, but pte unmapped and unlocked.
70970@@ -3101,27 +3284,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70971 unsigned long address, pte_t *page_table, pmd_t *pmd,
70972 unsigned int flags)
70973 {
70974- struct page *page;
70975+ struct page *page = NULL;
70976 spinlock_t *ptl;
70977 pte_t entry;
70978
70979- pte_unmap(page_table);
70980-
70981- /* Check if we need to add a guard page to the stack */
70982- if (check_stack_guard_page(vma, address) < 0)
70983- return VM_FAULT_SIGBUS;
70984-
70985- /* Use the zero-page for reads */
70986 if (!(flags & FAULT_FLAG_WRITE)) {
70987 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
70988 vma->vm_page_prot));
70989- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70990+ ptl = pte_lockptr(mm, pmd);
70991+ spin_lock(ptl);
70992 if (!pte_none(*page_table))
70993 goto unlock;
70994 goto setpte;
70995 }
70996
70997 /* Allocate our own private page. */
70998+ pte_unmap(page_table);
70999+
71000 if (unlikely(anon_vma_prepare(vma)))
71001 goto oom;
71002 page = alloc_zeroed_user_highpage_movable(vma, address);
71003@@ -3140,6 +3319,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
71004 if (!pte_none(*page_table))
71005 goto release;
71006
71007+#ifdef CONFIG_PAX_SEGMEXEC
71008+ if (pax_find_mirror_vma(vma))
71009+ BUG_ON(!trylock_page(page));
71010+#endif
71011+
71012 inc_mm_counter_fast(mm, MM_ANONPAGES);
71013 page_add_new_anon_rmap(page, vma, address);
71014 setpte:
71015@@ -3147,6 +3331,12 @@ setpte:
71016
71017 /* No need to invalidate - it was non-present before */
71018 update_mmu_cache(vma, address, page_table);
71019+
71020+#ifdef CONFIG_PAX_SEGMEXEC
71021+ if (page)
71022+ pax_mirror_anon_pte(vma, address, page, ptl);
71023+#endif
71024+
71025 unlock:
71026 pte_unmap_unlock(page_table, ptl);
71027 return 0;
71028@@ -3290,6 +3480,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71029 */
71030 /* Only go through if we didn't race with anybody else... */
71031 if (likely(pte_same(*page_table, orig_pte))) {
71032+
71033+#ifdef CONFIG_PAX_SEGMEXEC
71034+ if (anon && pax_find_mirror_vma(vma))
71035+ BUG_ON(!trylock_page(page));
71036+#endif
71037+
71038 flush_icache_page(vma, page);
71039 entry = mk_pte(page, vma->vm_page_prot);
71040 if (flags & FAULT_FLAG_WRITE)
71041@@ -3309,6 +3505,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71042
71043 /* no need to invalidate: a not-present page won't be cached */
71044 update_mmu_cache(vma, address, page_table);
71045+
71046+#ifdef CONFIG_PAX_SEGMEXEC
71047+ if (anon)
71048+ pax_mirror_anon_pte(vma, address, page, ptl);
71049+ else
71050+ pax_mirror_file_pte(vma, address, page, ptl);
71051+#endif
71052+
71053 } else {
71054 if (cow_page)
71055 mem_cgroup_uncharge_page(cow_page);
71056@@ -3462,6 +3666,12 @@ int handle_pte_fault(struct mm_struct *mm,
71057 if (flags & FAULT_FLAG_WRITE)
71058 flush_tlb_fix_spurious_fault(vma, address);
71059 }
71060+
71061+#ifdef CONFIG_PAX_SEGMEXEC
71062+ pax_mirror_pte(vma, address, pte, pmd, ptl);
71063+ return 0;
71064+#endif
71065+
71066 unlock:
71067 pte_unmap_unlock(pte, ptl);
71068 return 0;
71069@@ -3478,6 +3688,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71070 pmd_t *pmd;
71071 pte_t *pte;
71072
71073+#ifdef CONFIG_PAX_SEGMEXEC
71074+ struct vm_area_struct *vma_m;
71075+#endif
71076+
71077 __set_current_state(TASK_RUNNING);
71078
71079 count_vm_event(PGFAULT);
71080@@ -3489,6 +3703,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71081 if (unlikely(is_vm_hugetlb_page(vma)))
71082 return hugetlb_fault(mm, vma, address, flags);
71083
71084+#ifdef CONFIG_PAX_SEGMEXEC
71085+ vma_m = pax_find_mirror_vma(vma);
71086+ if (vma_m) {
71087+ unsigned long address_m;
71088+ pgd_t *pgd_m;
71089+ pud_t *pud_m;
71090+ pmd_t *pmd_m;
71091+
71092+ if (vma->vm_start > vma_m->vm_start) {
71093+ address_m = address;
71094+ address -= SEGMEXEC_TASK_SIZE;
71095+ vma = vma_m;
71096+ } else
71097+ address_m = address + SEGMEXEC_TASK_SIZE;
71098+
71099+ pgd_m = pgd_offset(mm, address_m);
71100+ pud_m = pud_alloc(mm, pgd_m, address_m);
71101+ if (!pud_m)
71102+ return VM_FAULT_OOM;
71103+ pmd_m = pmd_alloc(mm, pud_m, address_m);
71104+ if (!pmd_m)
71105+ return VM_FAULT_OOM;
71106+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
71107+ return VM_FAULT_OOM;
71108+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
71109+ }
71110+#endif
71111+
71112 pgd = pgd_offset(mm, address);
71113 pud = pud_alloc(mm, pgd, address);
71114 if (!pud)
71115@@ -3518,7 +3760,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71116 * run pte_offset_map on the pmd, if an huge pmd could
71117 * materialize from under us from a different thread.
71118 */
71119- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
71120+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
71121 return VM_FAULT_OOM;
71122 /* if an huge pmd materialized from under us just retry later */
71123 if (unlikely(pmd_trans_huge(*pmd)))
71124@@ -3555,6 +3797,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
71125 spin_unlock(&mm->page_table_lock);
71126 return 0;
71127 }
71128+
71129+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
71130+{
71131+ pud_t *new = pud_alloc_one(mm, address);
71132+ if (!new)
71133+ return -ENOMEM;
71134+
71135+ smp_wmb(); /* See comment in __pte_alloc */
71136+
71137+ spin_lock(&mm->page_table_lock);
71138+ if (pgd_present(*pgd)) /* Another has populated it */
71139+ pud_free(mm, new);
71140+ else
71141+ pgd_populate_kernel(mm, pgd, new);
71142+ spin_unlock(&mm->page_table_lock);
71143+ return 0;
71144+}
71145 #endif /* __PAGETABLE_PUD_FOLDED */
71146
71147 #ifndef __PAGETABLE_PMD_FOLDED
71148@@ -3585,6 +3844,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
71149 spin_unlock(&mm->page_table_lock);
71150 return 0;
71151 }
71152+
71153+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
71154+{
71155+ pmd_t *new = pmd_alloc_one(mm, address);
71156+ if (!new)
71157+ return -ENOMEM;
71158+
71159+ smp_wmb(); /* See comment in __pte_alloc */
71160+
71161+ spin_lock(&mm->page_table_lock);
71162+#ifndef __ARCH_HAS_4LEVEL_HACK
71163+ if (pud_present(*pud)) /* Another has populated it */
71164+ pmd_free(mm, new);
71165+ else
71166+ pud_populate_kernel(mm, pud, new);
71167+#else
71168+ if (pgd_present(*pud)) /* Another has populated it */
71169+ pmd_free(mm, new);
71170+ else
71171+ pgd_populate_kernel(mm, pud, new);
71172+#endif /* __ARCH_HAS_4LEVEL_HACK */
71173+ spin_unlock(&mm->page_table_lock);
71174+ return 0;
71175+}
71176 #endif /* __PAGETABLE_PMD_FOLDED */
71177
71178 int make_pages_present(unsigned long addr, unsigned long end)
71179@@ -3622,7 +3905,7 @@ static int __init gate_vma_init(void)
71180 gate_vma.vm_start = FIXADDR_USER_START;
71181 gate_vma.vm_end = FIXADDR_USER_END;
71182 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
71183- gate_vma.vm_page_prot = __P101;
71184+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
71185
71186 return 0;
71187 }
71188diff --git a/mm/mempolicy.c b/mm/mempolicy.c
71189index bf5b485..e44c2cb 100644
71190--- a/mm/mempolicy.c
71191+++ b/mm/mempolicy.c
71192@@ -619,6 +619,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
71193 unsigned long vmstart;
71194 unsigned long vmend;
71195
71196+#ifdef CONFIG_PAX_SEGMEXEC
71197+ struct vm_area_struct *vma_m;
71198+#endif
71199+
71200 vma = find_vma(mm, start);
71201 if (!vma || vma->vm_start > start)
71202 return -EFAULT;
71203@@ -672,6 +676,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
71204 if (err)
71205 goto out;
71206 }
71207+
71208+#ifdef CONFIG_PAX_SEGMEXEC
71209+ vma_m = pax_find_mirror_vma(vma);
71210+ if (vma_m && vma_m->vm_ops && vma_m->vm_ops->set_policy) {
71211+ err = vma_m->vm_ops->set_policy(vma_m, new_pol);
71212+ if (err)
71213+ goto out;
71214+ }
71215+#endif
71216+
71217 }
71218
71219 out:
71220@@ -1105,6 +1119,17 @@ static long do_mbind(unsigned long start, unsigned long len,
71221
71222 if (end < start)
71223 return -EINVAL;
71224+
71225+#ifdef CONFIG_PAX_SEGMEXEC
71226+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71227+ if (end > SEGMEXEC_TASK_SIZE)
71228+ return -EINVAL;
71229+ } else
71230+#endif
71231+
71232+ if (end > TASK_SIZE)
71233+ return -EINVAL;
71234+
71235 if (end == start)
71236 return 0;
71237
71238@@ -1328,8 +1353,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
71239 */
71240 tcred = __task_cred(task);
71241 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
71242- cred->uid != tcred->suid && cred->uid != tcred->uid &&
71243- !capable(CAP_SYS_NICE)) {
71244+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
71245 rcu_read_unlock();
71246 err = -EPERM;
71247 goto out_put;
71248@@ -1360,6 +1384,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
71249 goto out;
71250 }
71251
71252+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71253+ if (mm != current->mm &&
71254+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
71255+ mmput(mm);
71256+ err = -EPERM;
71257+ goto out;
71258+ }
71259+#endif
71260+
71261 err = do_migrate_pages(mm, old, new,
71262 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
71263
71264diff --git a/mm/mlock.c b/mm/mlock.c
71265index ef726e8..13e0901 100644
71266--- a/mm/mlock.c
71267+++ b/mm/mlock.c
71268@@ -13,6 +13,7 @@
71269 #include <linux/pagemap.h>
71270 #include <linux/mempolicy.h>
71271 #include <linux/syscalls.h>
71272+#include <linux/security.h>
71273 #include <linux/sched.h>
71274 #include <linux/export.h>
71275 #include <linux/rmap.h>
71276@@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
71277 return -EINVAL;
71278 if (end == start)
71279 return 0;
71280+ if (end > TASK_SIZE)
71281+ return -EINVAL;
71282+
71283 vma = find_vma(current->mm, start);
71284 if (!vma || vma->vm_start > start)
71285 return -ENOMEM;
71286@@ -396,6 +400,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
71287 for (nstart = start ; ; ) {
71288 vm_flags_t newflags;
71289
71290+#ifdef CONFIG_PAX_SEGMEXEC
71291+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
71292+ break;
71293+#endif
71294+
71295 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
71296
71297 newflags = vma->vm_flags | VM_LOCKED;
71298@@ -501,6 +510,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
71299 lock_limit >>= PAGE_SHIFT;
71300
71301 /* check against resource limits */
71302+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
71303 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
71304 error = do_mlock(start, len, 1);
71305 up_write(&current->mm->mmap_sem);
71306@@ -524,17 +534,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
71307 static int do_mlockall(int flags)
71308 {
71309 struct vm_area_struct * vma, * prev = NULL;
71310- unsigned int def_flags = 0;
71311
71312 if (flags & MCL_FUTURE)
71313- def_flags = VM_LOCKED;
71314- current->mm->def_flags = def_flags;
71315+ current->mm->def_flags |= VM_LOCKED;
71316+ else
71317+ current->mm->def_flags &= ~VM_LOCKED;
71318 if (flags == MCL_FUTURE)
71319 goto out;
71320
71321 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
71322 vm_flags_t newflags;
71323
71324+#ifdef CONFIG_PAX_SEGMEXEC
71325+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
71326+ break;
71327+#endif
71328+
71329+ BUG_ON(vma->vm_end > TASK_SIZE);
71330 newflags = vma->vm_flags | VM_LOCKED;
71331 if (!(flags & MCL_CURRENT))
71332 newflags &= ~VM_LOCKED;
71333@@ -567,6 +583,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
71334 lock_limit >>= PAGE_SHIFT;
71335
71336 ret = -ENOMEM;
71337+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
71338 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
71339 capable(CAP_IPC_LOCK))
71340 ret = do_mlockall(flags);
71341diff --git a/mm/mmap.c b/mm/mmap.c
71342index 848ef52..d2b586c 100644
71343--- a/mm/mmap.c
71344+++ b/mm/mmap.c
71345@@ -46,6 +46,16 @@
71346 #define arch_rebalance_pgtables(addr, len) (addr)
71347 #endif
71348
71349+static inline void verify_mm_writelocked(struct mm_struct *mm)
71350+{
71351+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
71352+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71353+ up_read(&mm->mmap_sem);
71354+ BUG();
71355+ }
71356+#endif
71357+}
71358+
71359 static void unmap_region(struct mm_struct *mm,
71360 struct vm_area_struct *vma, struct vm_area_struct *prev,
71361 unsigned long start, unsigned long end);
71362@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
71363 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
71364 *
71365 */
71366-pgprot_t protection_map[16] = {
71367+pgprot_t protection_map[16] __read_only = {
71368 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
71369 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
71370 };
71371
71372-pgprot_t vm_get_page_prot(unsigned long vm_flags)
71373+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
71374 {
71375- return __pgprot(pgprot_val(protection_map[vm_flags &
71376+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
71377 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
71378 pgprot_val(arch_vm_get_page_prot(vm_flags)));
71379+
71380+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71381+ if (!(__supported_pte_mask & _PAGE_NX) &&
71382+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
71383+ (vm_flags & (VM_READ | VM_WRITE)))
71384+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
71385+#endif
71386+
71387+ return prot;
71388 }
71389 EXPORT_SYMBOL(vm_get_page_prot);
71390
71391 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
71392 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
71393 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
71394+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
71395 /*
71396 * Make sure vm_committed_as in one cacheline and not cacheline shared with
71397 * other variables. It can be updated by several CPUs frequently.
71398@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
71399 struct vm_area_struct *next = vma->vm_next;
71400
71401 might_sleep();
71402+ BUG_ON(vma->vm_mirror);
71403 if (vma->vm_ops && vma->vm_ops->close)
71404 vma->vm_ops->close(vma);
71405 if (vma->vm_file) {
71406@@ -274,6 +295,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
71407 * not page aligned -Ram Gupta
71408 */
71409 rlim = rlimit(RLIMIT_DATA);
71410+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
71411 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
71412 (mm->end_data - mm->start_data) > rlim)
71413 goto out;
71414@@ -690,6 +712,12 @@ static int
71415 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
71416 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71417 {
71418+
71419+#ifdef CONFIG_PAX_SEGMEXEC
71420+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
71421+ return 0;
71422+#endif
71423+
71424 if (is_mergeable_vma(vma, file, vm_flags) &&
71425 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
71426 if (vma->vm_pgoff == vm_pgoff)
71427@@ -709,6 +737,12 @@ static int
71428 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
71429 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71430 {
71431+
71432+#ifdef CONFIG_PAX_SEGMEXEC
71433+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
71434+ return 0;
71435+#endif
71436+
71437 if (is_mergeable_vma(vma, file, vm_flags) &&
71438 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
71439 pgoff_t vm_pglen;
71440@@ -751,13 +785,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
71441 struct vm_area_struct *vma_merge(struct mm_struct *mm,
71442 struct vm_area_struct *prev, unsigned long addr,
71443 unsigned long end, unsigned long vm_flags,
71444- struct anon_vma *anon_vma, struct file *file,
71445+ struct anon_vma *anon_vma, struct file *file,
71446 pgoff_t pgoff, struct mempolicy *policy)
71447 {
71448 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
71449 struct vm_area_struct *area, *next;
71450 int err;
71451
71452+#ifdef CONFIG_PAX_SEGMEXEC
71453+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
71454+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
71455+
71456+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
71457+#endif
71458+
71459 /*
71460 * We later require that vma->vm_flags == vm_flags,
71461 * so this tests vma->vm_flags & VM_SPECIAL, too.
71462@@ -773,6 +814,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71463 if (next && next->vm_end == end) /* cases 6, 7, 8 */
71464 next = next->vm_next;
71465
71466+#ifdef CONFIG_PAX_SEGMEXEC
71467+ if (prev)
71468+ prev_m = pax_find_mirror_vma(prev);
71469+ if (area)
71470+ area_m = pax_find_mirror_vma(area);
71471+ if (next)
71472+ next_m = pax_find_mirror_vma(next);
71473+#endif
71474+
71475 /*
71476 * Can it merge with the predecessor?
71477 */
71478@@ -792,9 +842,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71479 /* cases 1, 6 */
71480 err = vma_adjust(prev, prev->vm_start,
71481 next->vm_end, prev->vm_pgoff, NULL);
71482- } else /* cases 2, 5, 7 */
71483+
71484+#ifdef CONFIG_PAX_SEGMEXEC
71485+ if (!err && prev_m)
71486+ err = vma_adjust(prev_m, prev_m->vm_start,
71487+ next_m->vm_end, prev_m->vm_pgoff, NULL);
71488+#endif
71489+
71490+ } else { /* cases 2, 5, 7 */
71491 err = vma_adjust(prev, prev->vm_start,
71492 end, prev->vm_pgoff, NULL);
71493+
71494+#ifdef CONFIG_PAX_SEGMEXEC
71495+ if (!err && prev_m)
71496+ err = vma_adjust(prev_m, prev_m->vm_start,
71497+ end_m, prev_m->vm_pgoff, NULL);
71498+#endif
71499+
71500+ }
71501 if (err)
71502 return NULL;
71503 khugepaged_enter_vma_merge(prev);
71504@@ -808,12 +873,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71505 mpol_equal(policy, vma_policy(next)) &&
71506 can_vma_merge_before(next, vm_flags,
71507 anon_vma, file, pgoff+pglen)) {
71508- if (prev && addr < prev->vm_end) /* case 4 */
71509+ if (prev && addr < prev->vm_end) { /* case 4 */
71510 err = vma_adjust(prev, prev->vm_start,
71511 addr, prev->vm_pgoff, NULL);
71512- else /* cases 3, 8 */
71513+
71514+#ifdef CONFIG_PAX_SEGMEXEC
71515+ if (!err && prev_m)
71516+ err = vma_adjust(prev_m, prev_m->vm_start,
71517+ addr_m, prev_m->vm_pgoff, NULL);
71518+#endif
71519+
71520+ } else { /* cases 3, 8 */
71521 err = vma_adjust(area, addr, next->vm_end,
71522 next->vm_pgoff - pglen, NULL);
71523+
71524+#ifdef CONFIG_PAX_SEGMEXEC
71525+ if (!err && area_m)
71526+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
71527+ next_m->vm_pgoff - pglen, NULL);
71528+#endif
71529+
71530+ }
71531 if (err)
71532 return NULL;
71533 khugepaged_enter_vma_merge(area);
71534@@ -922,14 +1002,11 @@ none:
71535 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
71536 struct file *file, long pages)
71537 {
71538- const unsigned long stack_flags
71539- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
71540-
71541 if (file) {
71542 mm->shared_vm += pages;
71543 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
71544 mm->exec_vm += pages;
71545- } else if (flags & stack_flags)
71546+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
71547 mm->stack_vm += pages;
71548 if (flags & (VM_RESERVED|VM_IO))
71549 mm->reserved_vm += pages;
71550@@ -969,7 +1046,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71551 * (the exception is when the underlying filesystem is noexec
71552 * mounted, in which case we dont add PROT_EXEC.)
71553 */
71554- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
71555+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
71556 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
71557 prot |= PROT_EXEC;
71558
71559@@ -995,7 +1072,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71560 /* Obtain the address to map to. we verify (or select) it and ensure
71561 * that it represents a valid section of the address space.
71562 */
71563- addr = get_unmapped_area(file, addr, len, pgoff, flags);
71564+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
71565 if (addr & ~PAGE_MASK)
71566 return addr;
71567
71568@@ -1006,6 +1083,36 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71569 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
71570 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
71571
71572+#ifdef CONFIG_PAX_MPROTECT
71573+ if (mm->pax_flags & MF_PAX_MPROTECT) {
71574+#ifndef CONFIG_PAX_MPROTECT_COMPAT
71575+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
71576+ gr_log_rwxmmap(file);
71577+
71578+#ifdef CONFIG_PAX_EMUPLT
71579+ vm_flags &= ~VM_EXEC;
71580+#else
71581+ return -EPERM;
71582+#endif
71583+
71584+ }
71585+
71586+ if (!(vm_flags & VM_EXEC))
71587+ vm_flags &= ~VM_MAYEXEC;
71588+#else
71589+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
71590+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
71591+#endif
71592+ else
71593+ vm_flags &= ~VM_MAYWRITE;
71594+ }
71595+#endif
71596+
71597+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71598+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
71599+ vm_flags &= ~VM_PAGEEXEC;
71600+#endif
71601+
71602 if (flags & MAP_LOCKED)
71603 if (!can_do_mlock())
71604 return -EPERM;
71605@@ -1017,6 +1124,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71606 locked += mm->locked_vm;
71607 lock_limit = rlimit(RLIMIT_MEMLOCK);
71608 lock_limit >>= PAGE_SHIFT;
71609+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71610 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
71611 return -EAGAIN;
71612 }
71613@@ -1087,6 +1195,9 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71614 if (error)
71615 return error;
71616
71617+ if (!gr_acl_handle_mmap(file, prot))
71618+ return -EACCES;
71619+
71620 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
71621 }
71622
71623@@ -1192,7 +1303,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
71624 vm_flags_t vm_flags = vma->vm_flags;
71625
71626 /* If it was private or non-writable, the write bit is already clear */
71627- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
71628+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
71629 return 0;
71630
71631 /* The backer wishes to know when pages are first written to? */
71632@@ -1241,14 +1352,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
71633 unsigned long charged = 0;
71634 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
71635
71636+#ifdef CONFIG_PAX_SEGMEXEC
71637+ struct vm_area_struct *vma_m = NULL;
71638+#endif
71639+
71640+ /*
71641+ * mm->mmap_sem is required to protect against another thread
71642+ * changing the mappings in case we sleep.
71643+ */
71644+ verify_mm_writelocked(mm);
71645+
71646 /* Clear old maps */
71647 error = -ENOMEM;
71648-munmap_back:
71649 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71650 if (vma && vma->vm_start < addr + len) {
71651 if (do_munmap(mm, addr, len))
71652 return -ENOMEM;
71653- goto munmap_back;
71654+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71655+ BUG_ON(vma && vma->vm_start < addr + len);
71656 }
71657
71658 /* Check against address space limit. */
71659@@ -1297,6 +1418,16 @@ munmap_back:
71660 goto unacct_error;
71661 }
71662
71663+#ifdef CONFIG_PAX_SEGMEXEC
71664+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
71665+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71666+ if (!vma_m) {
71667+ error = -ENOMEM;
71668+ goto free_vma;
71669+ }
71670+ }
71671+#endif
71672+
71673 vma->vm_mm = mm;
71674 vma->vm_start = addr;
71675 vma->vm_end = addr + len;
71676@@ -1321,6 +1452,19 @@ munmap_back:
71677 error = file->f_op->mmap(file, vma);
71678 if (error)
71679 goto unmap_and_free_vma;
71680+
71681+#ifdef CONFIG_PAX_SEGMEXEC
71682+ if (vma_m && (vm_flags & VM_EXECUTABLE))
71683+ added_exe_file_vma(mm);
71684+#endif
71685+
71686+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71687+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
71688+ vma->vm_flags |= VM_PAGEEXEC;
71689+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71690+ }
71691+#endif
71692+
71693 if (vm_flags & VM_EXECUTABLE)
71694 added_exe_file_vma(mm);
71695
71696@@ -1358,6 +1502,11 @@ munmap_back:
71697 vma_link(mm, vma, prev, rb_link, rb_parent);
71698 file = vma->vm_file;
71699
71700+#ifdef CONFIG_PAX_SEGMEXEC
71701+ if (vma_m)
71702+ BUG_ON(pax_mirror_vma(vma_m, vma));
71703+#endif
71704+
71705 /* Once vma denies write, undo our temporary denial count */
71706 if (correct_wcount)
71707 atomic_inc(&inode->i_writecount);
71708@@ -1366,6 +1515,7 @@ out:
71709
71710 mm->total_vm += len >> PAGE_SHIFT;
71711 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
71712+ track_exec_limit(mm, addr, addr + len, vm_flags);
71713 if (vm_flags & VM_LOCKED) {
71714 if (!mlock_vma_pages_range(vma, addr, addr + len))
71715 mm->locked_vm += (len >> PAGE_SHIFT);
71716@@ -1383,6 +1533,12 @@ unmap_and_free_vma:
71717 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
71718 charged = 0;
71719 free_vma:
71720+
71721+#ifdef CONFIG_PAX_SEGMEXEC
71722+ if (vma_m)
71723+ kmem_cache_free(vm_area_cachep, vma_m);
71724+#endif
71725+
71726 kmem_cache_free(vm_area_cachep, vma);
71727 unacct_error:
71728 if (charged)
71729@@ -1390,6 +1546,44 @@ unacct_error:
71730 return error;
71731 }
71732
71733+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
71734+{
71735+ if (!vma) {
71736+#ifdef CONFIG_STACK_GROWSUP
71737+ if (addr > sysctl_heap_stack_gap)
71738+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
71739+ else
71740+ vma = find_vma(current->mm, 0);
71741+ if (vma && (vma->vm_flags & VM_GROWSUP))
71742+ return false;
71743+#endif
71744+ return true;
71745+ }
71746+
71747+ if (addr + len > vma->vm_start)
71748+ return false;
71749+
71750+ if (vma->vm_flags & VM_GROWSDOWN)
71751+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
71752+#ifdef CONFIG_STACK_GROWSUP
71753+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
71754+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
71755+#endif
71756+
71757+ return true;
71758+}
71759+
71760+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
71761+{
71762+ if (vma->vm_start < len)
71763+ return -ENOMEM;
71764+ if (!(vma->vm_flags & VM_GROWSDOWN))
71765+ return vma->vm_start - len;
71766+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
71767+ return vma->vm_start - len - sysctl_heap_stack_gap;
71768+ return -ENOMEM;
71769+}
71770+
71771 /* Get an address range which is currently unmapped.
71772 * For shmat() with addr=0.
71773 *
71774@@ -1416,18 +1610,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
71775 if (flags & MAP_FIXED)
71776 return addr;
71777
71778+#ifdef CONFIG_PAX_RANDMMAP
71779+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71780+#endif
71781+
71782 if (addr) {
71783 addr = PAGE_ALIGN(addr);
71784- vma = find_vma(mm, addr);
71785- if (TASK_SIZE - len >= addr &&
71786- (!vma || addr + len <= vma->vm_start))
71787- return addr;
71788+ if (TASK_SIZE - len >= addr) {
71789+ vma = find_vma(mm, addr);
71790+ if (check_heap_stack_gap(vma, addr, len))
71791+ return addr;
71792+ }
71793 }
71794 if (len > mm->cached_hole_size) {
71795- start_addr = addr = mm->free_area_cache;
71796+ start_addr = addr = mm->free_area_cache;
71797 } else {
71798- start_addr = addr = TASK_UNMAPPED_BASE;
71799- mm->cached_hole_size = 0;
71800+ start_addr = addr = mm->mmap_base;
71801+ mm->cached_hole_size = 0;
71802 }
71803
71804 full_search:
71805@@ -1438,34 +1637,40 @@ full_search:
71806 * Start a new search - just in case we missed
71807 * some holes.
71808 */
71809- if (start_addr != TASK_UNMAPPED_BASE) {
71810- addr = TASK_UNMAPPED_BASE;
71811- start_addr = addr;
71812+ if (start_addr != mm->mmap_base) {
71813+ start_addr = addr = mm->mmap_base;
71814 mm->cached_hole_size = 0;
71815 goto full_search;
71816 }
71817 return -ENOMEM;
71818 }
71819- if (!vma || addr + len <= vma->vm_start) {
71820- /*
71821- * Remember the place where we stopped the search:
71822- */
71823- mm->free_area_cache = addr + len;
71824- return addr;
71825- }
71826+ if (check_heap_stack_gap(vma, addr, len))
71827+ break;
71828 if (addr + mm->cached_hole_size < vma->vm_start)
71829 mm->cached_hole_size = vma->vm_start - addr;
71830 addr = vma->vm_end;
71831 }
71832+
71833+ /*
71834+ * Remember the place where we stopped the search:
71835+ */
71836+ mm->free_area_cache = addr + len;
71837+ return addr;
71838 }
71839 #endif
71840
71841 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
71842 {
71843+
71844+#ifdef CONFIG_PAX_SEGMEXEC
71845+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71846+ return;
71847+#endif
71848+
71849 /*
71850 * Is this a new hole at the lowest possible address?
71851 */
71852- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
71853+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
71854 mm->free_area_cache = addr;
71855 }
71856
71857@@ -1481,7 +1686,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71858 {
71859 struct vm_area_struct *vma;
71860 struct mm_struct *mm = current->mm;
71861- unsigned long addr = addr0, start_addr;
71862+ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
71863
71864 /* requested length too big for entire address space */
71865 if (len > TASK_SIZE)
71866@@ -1490,13 +1695,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71867 if (flags & MAP_FIXED)
71868 return addr;
71869
71870+#ifdef CONFIG_PAX_RANDMMAP
71871+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71872+#endif
71873+
71874 /* requesting a specific address */
71875 if (addr) {
71876 addr = PAGE_ALIGN(addr);
71877- vma = find_vma(mm, addr);
71878- if (TASK_SIZE - len >= addr &&
71879- (!vma || addr + len <= vma->vm_start))
71880- return addr;
71881+ if (TASK_SIZE - len >= addr) {
71882+ vma = find_vma(mm, addr);
71883+ if (check_heap_stack_gap(vma, addr, len))
71884+ return addr;
71885+ }
71886 }
71887
71888 /* check if free_area_cache is useful for us */
71889@@ -1520,7 +1730,7 @@ try_again:
71890 * return with success:
71891 */
71892 vma = find_vma(mm, addr);
71893- if (!vma || addr+len <= vma->vm_start)
71894+ if (check_heap_stack_gap(vma, addr, len))
71895 /* remember the address as a hint for next time */
71896 return (mm->free_area_cache = addr);
71897
71898@@ -1529,8 +1739,8 @@ try_again:
71899 mm->cached_hole_size = vma->vm_start - addr;
71900
71901 /* try just below the current vma->vm_start */
71902- addr = vma->vm_start-len;
71903- } while (len < vma->vm_start);
71904+ addr = skip_heap_stack_gap(vma, len);
71905+ } while (!IS_ERR_VALUE(addr));
71906
71907 fail:
71908 /*
71909@@ -1553,13 +1763,21 @@ fail:
71910 * can happen with large stack limits and large mmap()
71911 * allocations.
71912 */
71913+ mm->mmap_base = TASK_UNMAPPED_BASE;
71914+
71915+#ifdef CONFIG_PAX_RANDMMAP
71916+ if (mm->pax_flags & MF_PAX_RANDMMAP)
71917+ mm->mmap_base += mm->delta_mmap;
71918+#endif
71919+
71920+ mm->free_area_cache = mm->mmap_base;
71921 mm->cached_hole_size = ~0UL;
71922- mm->free_area_cache = TASK_UNMAPPED_BASE;
71923 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
71924 /*
71925 * Restore the topdown base:
71926 */
71927- mm->free_area_cache = mm->mmap_base;
71928+ mm->mmap_base = base;
71929+ mm->free_area_cache = base;
71930 mm->cached_hole_size = ~0UL;
71931
71932 return addr;
71933@@ -1568,6 +1786,12 @@ fail:
71934
71935 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71936 {
71937+
71938+#ifdef CONFIG_PAX_SEGMEXEC
71939+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71940+ return;
71941+#endif
71942+
71943 /*
71944 * Is this a new hole at the highest possible address?
71945 */
71946@@ -1575,8 +1799,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71947 mm->free_area_cache = addr;
71948
71949 /* dont allow allocations above current base */
71950- if (mm->free_area_cache > mm->mmap_base)
71951+ if (mm->free_area_cache > mm->mmap_base) {
71952 mm->free_area_cache = mm->mmap_base;
71953+ mm->cached_hole_size = ~0UL;
71954+ }
71955 }
71956
71957 unsigned long
71958@@ -1672,6 +1898,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
71959 return vma;
71960 }
71961
71962+#ifdef CONFIG_PAX_SEGMEXEC
71963+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
71964+{
71965+ struct vm_area_struct *vma_m;
71966+
71967+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
71968+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
71969+ BUG_ON(vma->vm_mirror);
71970+ return NULL;
71971+ }
71972+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
71973+ vma_m = vma->vm_mirror;
71974+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
71975+ BUG_ON(vma->vm_file != vma_m->vm_file);
71976+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
71977+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
71978+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
71979+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
71980+ return vma_m;
71981+}
71982+#endif
71983+
71984 /*
71985 * Verify that the stack growth is acceptable and
71986 * update accounting. This is shared with both the
71987@@ -1688,6 +1936,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71988 return -ENOMEM;
71989
71990 /* Stack limit test */
71991+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
71992 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
71993 return -ENOMEM;
71994
71995@@ -1698,6 +1947,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71996 locked = mm->locked_vm + grow;
71997 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
71998 limit >>= PAGE_SHIFT;
71999+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
72000 if (locked > limit && !capable(CAP_IPC_LOCK))
72001 return -ENOMEM;
72002 }
72003@@ -1728,37 +1978,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
72004 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
72005 * vma is the last one with address > vma->vm_end. Have to extend vma.
72006 */
72007+#ifndef CONFIG_IA64
72008+static
72009+#endif
72010 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
72011 {
72012 int error;
72013+ bool locknext;
72014
72015 if (!(vma->vm_flags & VM_GROWSUP))
72016 return -EFAULT;
72017
72018+ /* Also guard against wrapping around to address 0. */
72019+ if (address < PAGE_ALIGN(address+1))
72020+ address = PAGE_ALIGN(address+1);
72021+ else
72022+ return -ENOMEM;
72023+
72024 /*
72025 * We must make sure the anon_vma is allocated
72026 * so that the anon_vma locking is not a noop.
72027 */
72028 if (unlikely(anon_vma_prepare(vma)))
72029 return -ENOMEM;
72030+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
72031+ if (locknext && anon_vma_prepare(vma->vm_next))
72032+ return -ENOMEM;
72033 vma_lock_anon_vma(vma);
72034+ if (locknext)
72035+ vma_lock_anon_vma(vma->vm_next);
72036
72037 /*
72038 * vma->vm_start/vm_end cannot change under us because the caller
72039 * is required to hold the mmap_sem in read mode. We need the
72040- * anon_vma lock to serialize against concurrent expand_stacks.
72041- * Also guard against wrapping around to address 0.
72042+ * anon_vma locks to serialize against concurrent expand_stacks
72043+ * and expand_upwards.
72044 */
72045- if (address < PAGE_ALIGN(address+4))
72046- address = PAGE_ALIGN(address+4);
72047- else {
72048- vma_unlock_anon_vma(vma);
72049- return -ENOMEM;
72050- }
72051 error = 0;
72052
72053 /* Somebody else might have raced and expanded it already */
72054- if (address > vma->vm_end) {
72055+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
72056+ error = -ENOMEM;
72057+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
72058 unsigned long size, grow;
72059
72060 size = address - vma->vm_start;
72061@@ -1773,6 +2034,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
72062 }
72063 }
72064 }
72065+ if (locknext)
72066+ vma_unlock_anon_vma(vma->vm_next);
72067 vma_unlock_anon_vma(vma);
72068 khugepaged_enter_vma_merge(vma);
72069 return error;
72070@@ -1786,6 +2049,8 @@ int expand_downwards(struct vm_area_struct *vma,
72071 unsigned long address)
72072 {
72073 int error;
72074+ bool lockprev = false;
72075+ struct vm_area_struct *prev;
72076
72077 /*
72078 * We must make sure the anon_vma is allocated
72079@@ -1799,6 +2064,15 @@ int expand_downwards(struct vm_area_struct *vma,
72080 if (error)
72081 return error;
72082
72083+ prev = vma->vm_prev;
72084+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
72085+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
72086+#endif
72087+ if (lockprev && anon_vma_prepare(prev))
72088+ return -ENOMEM;
72089+ if (lockprev)
72090+ vma_lock_anon_vma(prev);
72091+
72092 vma_lock_anon_vma(vma);
72093
72094 /*
72095@@ -1808,9 +2082,17 @@ int expand_downwards(struct vm_area_struct *vma,
72096 */
72097
72098 /* Somebody else might have raced and expanded it already */
72099- if (address < vma->vm_start) {
72100+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
72101+ error = -ENOMEM;
72102+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
72103 unsigned long size, grow;
72104
72105+#ifdef CONFIG_PAX_SEGMEXEC
72106+ struct vm_area_struct *vma_m;
72107+
72108+ vma_m = pax_find_mirror_vma(vma);
72109+#endif
72110+
72111 size = vma->vm_end - address;
72112 grow = (vma->vm_start - address) >> PAGE_SHIFT;
72113
72114@@ -1820,11 +2102,22 @@ int expand_downwards(struct vm_area_struct *vma,
72115 if (!error) {
72116 vma->vm_start = address;
72117 vma->vm_pgoff -= grow;
72118+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
72119+
72120+#ifdef CONFIG_PAX_SEGMEXEC
72121+ if (vma_m) {
72122+ vma_m->vm_start -= grow << PAGE_SHIFT;
72123+ vma_m->vm_pgoff -= grow;
72124+ }
72125+#endif
72126+
72127 perf_event_mmap(vma);
72128 }
72129 }
72130 }
72131 vma_unlock_anon_vma(vma);
72132+ if (lockprev)
72133+ vma_unlock_anon_vma(prev);
72134 khugepaged_enter_vma_merge(vma);
72135 return error;
72136 }
72137@@ -1894,6 +2187,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
72138 do {
72139 long nrpages = vma_pages(vma);
72140
72141+#ifdef CONFIG_PAX_SEGMEXEC
72142+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
72143+ vma = remove_vma(vma);
72144+ continue;
72145+ }
72146+#endif
72147+
72148 mm->total_vm -= nrpages;
72149 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
72150 vma = remove_vma(vma);
72151@@ -1939,6 +2239,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
72152 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
72153 vma->vm_prev = NULL;
72154 do {
72155+
72156+#ifdef CONFIG_PAX_SEGMEXEC
72157+ if (vma->vm_mirror) {
72158+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
72159+ vma->vm_mirror->vm_mirror = NULL;
72160+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
72161+ vma->vm_mirror = NULL;
72162+ }
72163+#endif
72164+
72165 rb_erase(&vma->vm_rb, &mm->mm_rb);
72166 mm->map_count--;
72167 tail_vma = vma;
72168@@ -1967,14 +2277,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72169 struct vm_area_struct *new;
72170 int err = -ENOMEM;
72171
72172+#ifdef CONFIG_PAX_SEGMEXEC
72173+ struct vm_area_struct *vma_m, *new_m = NULL;
72174+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
72175+#endif
72176+
72177 if (is_vm_hugetlb_page(vma) && (addr &
72178 ~(huge_page_mask(hstate_vma(vma)))))
72179 return -EINVAL;
72180
72181+#ifdef CONFIG_PAX_SEGMEXEC
72182+ vma_m = pax_find_mirror_vma(vma);
72183+#endif
72184+
72185 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
72186 if (!new)
72187 goto out_err;
72188
72189+#ifdef CONFIG_PAX_SEGMEXEC
72190+ if (vma_m) {
72191+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
72192+ if (!new_m) {
72193+ kmem_cache_free(vm_area_cachep, new);
72194+ goto out_err;
72195+ }
72196+ }
72197+#endif
72198+
72199 /* most fields are the same, copy all, and then fixup */
72200 *new = *vma;
72201
72202@@ -1987,6 +2316,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72203 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
72204 }
72205
72206+#ifdef CONFIG_PAX_SEGMEXEC
72207+ if (vma_m) {
72208+ *new_m = *vma_m;
72209+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
72210+ new_m->vm_mirror = new;
72211+ new->vm_mirror = new_m;
72212+
72213+ if (new_below)
72214+ new_m->vm_end = addr_m;
72215+ else {
72216+ new_m->vm_start = addr_m;
72217+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
72218+ }
72219+ }
72220+#endif
72221+
72222 pol = mpol_dup(vma_policy(vma));
72223 if (IS_ERR(pol)) {
72224 err = PTR_ERR(pol);
72225@@ -2012,6 +2357,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72226 else
72227 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
72228
72229+#ifdef CONFIG_PAX_SEGMEXEC
72230+ if (!err && vma_m) {
72231+ if (anon_vma_clone(new_m, vma_m))
72232+ goto out_free_mpol;
72233+
72234+ mpol_get(pol);
72235+ vma_set_policy(new_m, pol);
72236+
72237+ if (new_m->vm_file) {
72238+ get_file(new_m->vm_file);
72239+ if (vma_m->vm_flags & VM_EXECUTABLE)
72240+ added_exe_file_vma(mm);
72241+ }
72242+
72243+ if (new_m->vm_ops && new_m->vm_ops->open)
72244+ new_m->vm_ops->open(new_m);
72245+
72246+ if (new_below)
72247+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
72248+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
72249+ else
72250+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
72251+
72252+ if (err) {
72253+ if (new_m->vm_ops && new_m->vm_ops->close)
72254+ new_m->vm_ops->close(new_m);
72255+ if (new_m->vm_file) {
72256+ if (vma_m->vm_flags & VM_EXECUTABLE)
72257+ removed_exe_file_vma(mm);
72258+ fput(new_m->vm_file);
72259+ }
72260+ mpol_put(pol);
72261+ }
72262+ }
72263+#endif
72264+
72265 /* Success. */
72266 if (!err)
72267 return 0;
72268@@ -2024,10 +2405,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72269 removed_exe_file_vma(mm);
72270 fput(new->vm_file);
72271 }
72272- unlink_anon_vmas(new);
72273 out_free_mpol:
72274 mpol_put(pol);
72275 out_free_vma:
72276+
72277+#ifdef CONFIG_PAX_SEGMEXEC
72278+ if (new_m) {
72279+ unlink_anon_vmas(new_m);
72280+ kmem_cache_free(vm_area_cachep, new_m);
72281+ }
72282+#endif
72283+
72284+ unlink_anon_vmas(new);
72285 kmem_cache_free(vm_area_cachep, new);
72286 out_err:
72287 return err;
72288@@ -2040,6 +2429,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72289 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72290 unsigned long addr, int new_below)
72291 {
72292+
72293+#ifdef CONFIG_PAX_SEGMEXEC
72294+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
72295+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
72296+ if (mm->map_count >= sysctl_max_map_count-1)
72297+ return -ENOMEM;
72298+ } else
72299+#endif
72300+
72301 if (mm->map_count >= sysctl_max_map_count)
72302 return -ENOMEM;
72303
72304@@ -2051,11 +2449,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72305 * work. This now handles partial unmappings.
72306 * Jeremy Fitzhardinge <jeremy@goop.org>
72307 */
72308+#ifdef CONFIG_PAX_SEGMEXEC
72309 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72310 {
72311+ int ret = __do_munmap(mm, start, len);
72312+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
72313+ return ret;
72314+
72315+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
72316+}
72317+
72318+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72319+#else
72320+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72321+#endif
72322+{
72323 unsigned long end;
72324 struct vm_area_struct *vma, *prev, *last;
72325
72326+ /*
72327+ * mm->mmap_sem is required to protect against another thread
72328+ * changing the mappings in case we sleep.
72329+ */
72330+ verify_mm_writelocked(mm);
72331+
72332 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
72333 return -EINVAL;
72334
72335@@ -2130,6 +2547,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72336 /* Fix up all other VM information */
72337 remove_vma_list(mm, vma);
72338
72339+ track_exec_limit(mm, start, end, 0UL);
72340+
72341 return 0;
72342 }
72343 EXPORT_SYMBOL(do_munmap);
72344@@ -2139,6 +2558,13 @@ int vm_munmap(unsigned long start, size_t len)
72345 int ret;
72346 struct mm_struct *mm = current->mm;
72347
72348+
72349+#ifdef CONFIG_PAX_SEGMEXEC
72350+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
72351+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
72352+ return -EINVAL;
72353+#endif
72354+
72355 down_write(&mm->mmap_sem);
72356 ret = do_munmap(mm, start, len);
72357 up_write(&mm->mmap_sem);
72358@@ -2152,16 +2578,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
72359 return vm_munmap(addr, len);
72360 }
72361
72362-static inline void verify_mm_writelocked(struct mm_struct *mm)
72363-{
72364-#ifdef CONFIG_DEBUG_VM
72365- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
72366- WARN_ON(1);
72367- up_read(&mm->mmap_sem);
72368- }
72369-#endif
72370-}
72371-
72372 /*
72373 * this is really a simplified "do_mmap". it only handles
72374 * anonymous maps. eventually we may be able to do some
72375@@ -2175,6 +2591,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
72376 struct rb_node ** rb_link, * rb_parent;
72377 pgoff_t pgoff = addr >> PAGE_SHIFT;
72378 int error;
72379+ unsigned long charged;
72380
72381 len = PAGE_ALIGN(len);
72382 if (!len)
72383@@ -2186,16 +2603,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
72384
72385 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
72386
72387+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
72388+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
72389+ flags &= ~VM_EXEC;
72390+
72391+#ifdef CONFIG_PAX_MPROTECT
72392+ if (mm->pax_flags & MF_PAX_MPROTECT)
72393+ flags &= ~VM_MAYEXEC;
72394+#endif
72395+
72396+ }
72397+#endif
72398+
72399 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
72400 if (error & ~PAGE_MASK)
72401 return error;
72402
72403+ charged = len >> PAGE_SHIFT;
72404+
72405 /*
72406 * mlock MCL_FUTURE?
72407 */
72408 if (mm->def_flags & VM_LOCKED) {
72409 unsigned long locked, lock_limit;
72410- locked = len >> PAGE_SHIFT;
72411+ locked = charged;
72412 locked += mm->locked_vm;
72413 lock_limit = rlimit(RLIMIT_MEMLOCK);
72414 lock_limit >>= PAGE_SHIFT;
72415@@ -2212,22 +2643,22 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
72416 /*
72417 * Clear old maps. this also does some error checking for us
72418 */
72419- munmap_back:
72420 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72421 if (vma && vma->vm_start < addr + len) {
72422 if (do_munmap(mm, addr, len))
72423 return -ENOMEM;
72424- goto munmap_back;
72425- }
72426+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72427+ BUG_ON(vma && vma->vm_start < addr + len);
72428+ }
72429
72430 /* Check against address space limits *after* clearing old maps... */
72431- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
72432+ if (!may_expand_vm(mm, charged))
72433 return -ENOMEM;
72434
72435 if (mm->map_count > sysctl_max_map_count)
72436 return -ENOMEM;
72437
72438- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
72439+ if (security_vm_enough_memory_mm(mm, charged))
72440 return -ENOMEM;
72441
72442 /* Can we just expand an old private anonymous mapping? */
72443@@ -2241,7 +2672,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
72444 */
72445 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72446 if (!vma) {
72447- vm_unacct_memory(len >> PAGE_SHIFT);
72448+ vm_unacct_memory(charged);
72449 return -ENOMEM;
72450 }
72451
72452@@ -2255,11 +2686,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
72453 vma_link(mm, vma, prev, rb_link, rb_parent);
72454 out:
72455 perf_event_mmap(vma);
72456- mm->total_vm += len >> PAGE_SHIFT;
72457+ mm->total_vm += charged;
72458 if (flags & VM_LOCKED) {
72459 if (!mlock_vma_pages_range(vma, addr, addr + len))
72460- mm->locked_vm += (len >> PAGE_SHIFT);
72461+ mm->locked_vm += charged;
72462 }
72463+ track_exec_limit(mm, addr, addr + len, flags);
72464 return addr;
72465 }
72466
72467@@ -2315,8 +2747,10 @@ void exit_mmap(struct mm_struct *mm)
72468 * Walk the list again, actually closing and freeing it,
72469 * with preemption enabled, without holding any MM locks.
72470 */
72471- while (vma)
72472+ while (vma) {
72473+ vma->vm_mirror = NULL;
72474 vma = remove_vma(vma);
72475+ }
72476
72477 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
72478 }
72479@@ -2330,6 +2764,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
72480 struct vm_area_struct * __vma, * prev;
72481 struct rb_node ** rb_link, * rb_parent;
72482
72483+#ifdef CONFIG_PAX_SEGMEXEC
72484+ struct vm_area_struct *vma_m = NULL;
72485+#endif
72486+
72487+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
72488+ return -EPERM;
72489+
72490 /*
72491 * The vm_pgoff of a purely anonymous vma should be irrelevant
72492 * until its first write fault, when page's anon_vma and index
72493@@ -2352,7 +2793,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
72494 if ((vma->vm_flags & VM_ACCOUNT) &&
72495 security_vm_enough_memory_mm(mm, vma_pages(vma)))
72496 return -ENOMEM;
72497+
72498+#ifdef CONFIG_PAX_SEGMEXEC
72499+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
72500+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72501+ if (!vma_m)
72502+ return -ENOMEM;
72503+ }
72504+#endif
72505+
72506 vma_link(mm, vma, prev, rb_link, rb_parent);
72507+
72508+#ifdef CONFIG_PAX_SEGMEXEC
72509+ if (vma_m)
72510+ BUG_ON(pax_mirror_vma(vma_m, vma));
72511+#endif
72512+
72513 return 0;
72514 }
72515
72516@@ -2371,6 +2827,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
72517 struct mempolicy *pol;
72518 bool faulted_in_anon_vma = true;
72519
72520+ BUG_ON(vma->vm_mirror);
72521+
72522 /*
72523 * If anonymous vma has not yet been faulted, update new pgoff
72524 * to match new location, to increase its chance of merging.
72525@@ -2438,6 +2896,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
72526 return NULL;
72527 }
72528
72529+#ifdef CONFIG_PAX_SEGMEXEC
72530+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
72531+{
72532+ struct vm_area_struct *prev_m;
72533+ struct rb_node **rb_link_m, *rb_parent_m;
72534+ struct mempolicy *pol_m;
72535+
72536+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
72537+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
72538+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
72539+ *vma_m = *vma;
72540+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
72541+ if (anon_vma_clone(vma_m, vma))
72542+ return -ENOMEM;
72543+ pol_m = vma_policy(vma_m);
72544+ mpol_get(pol_m);
72545+ vma_set_policy(vma_m, pol_m);
72546+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
72547+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
72548+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
72549+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
72550+ if (vma_m->vm_file)
72551+ get_file(vma_m->vm_file);
72552+ if (vma_m->vm_ops && vma_m->vm_ops->open)
72553+ vma_m->vm_ops->open(vma_m);
72554+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
72555+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
72556+ vma_m->vm_mirror = vma;
72557+ vma->vm_mirror = vma_m;
72558+ return 0;
72559+}
72560+#endif
72561+
72562 /*
72563 * Return true if the calling process may expand its vm space by the passed
72564 * number of pages
72565@@ -2449,6 +2940,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
72566
72567 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
72568
72569+#ifdef CONFIG_PAX_RANDMMAP
72570+ if (mm->pax_flags & MF_PAX_RANDMMAP)
72571+ cur -= mm->brk_gap;
72572+#endif
72573+
72574+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
72575 if (cur + npages > lim)
72576 return 0;
72577 return 1;
72578@@ -2519,6 +3016,22 @@ int install_special_mapping(struct mm_struct *mm,
72579 vma->vm_start = addr;
72580 vma->vm_end = addr + len;
72581
72582+#ifdef CONFIG_PAX_MPROTECT
72583+ if (mm->pax_flags & MF_PAX_MPROTECT) {
72584+#ifndef CONFIG_PAX_MPROTECT_COMPAT
72585+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
72586+ return -EPERM;
72587+ if (!(vm_flags & VM_EXEC))
72588+ vm_flags &= ~VM_MAYEXEC;
72589+#else
72590+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
72591+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
72592+#endif
72593+ else
72594+ vm_flags &= ~VM_MAYWRITE;
72595+ }
72596+#endif
72597+
72598 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
72599 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
72600
72601diff --git a/mm/mprotect.c b/mm/mprotect.c
72602index a409926..8b32e6d 100644
72603--- a/mm/mprotect.c
72604+++ b/mm/mprotect.c
72605@@ -23,10 +23,17 @@
72606 #include <linux/mmu_notifier.h>
72607 #include <linux/migrate.h>
72608 #include <linux/perf_event.h>
72609+
72610+#ifdef CONFIG_PAX_MPROTECT
72611+#include <linux/elf.h>
72612+#include <linux/binfmts.h>
72613+#endif
72614+
72615 #include <asm/uaccess.h>
72616 #include <asm/pgtable.h>
72617 #include <asm/cacheflush.h>
72618 #include <asm/tlbflush.h>
72619+#include <asm/mmu_context.h>
72620
72621 #ifndef pgprot_modify
72622 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
72623@@ -141,6 +148,48 @@ static void change_protection(struct vm_area_struct *vma,
72624 flush_tlb_range(vma, start, end);
72625 }
72626
72627+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72628+/* called while holding the mmap semaphor for writing except stack expansion */
72629+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
72630+{
72631+ unsigned long oldlimit, newlimit = 0UL;
72632+
72633+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
72634+ return;
72635+
72636+ spin_lock(&mm->page_table_lock);
72637+ oldlimit = mm->context.user_cs_limit;
72638+ if ((prot & VM_EXEC) && oldlimit < end)
72639+ /* USER_CS limit moved up */
72640+ newlimit = end;
72641+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
72642+ /* USER_CS limit moved down */
72643+ newlimit = start;
72644+
72645+ if (newlimit) {
72646+ mm->context.user_cs_limit = newlimit;
72647+
72648+#ifdef CONFIG_SMP
72649+ wmb();
72650+ cpus_clear(mm->context.cpu_user_cs_mask);
72651+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
72652+#endif
72653+
72654+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
72655+ }
72656+ spin_unlock(&mm->page_table_lock);
72657+ if (newlimit == end) {
72658+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
72659+
72660+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
72661+ if (is_vm_hugetlb_page(vma))
72662+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
72663+ else
72664+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
72665+ }
72666+}
72667+#endif
72668+
72669 int
72670 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72671 unsigned long start, unsigned long end, unsigned long newflags)
72672@@ -153,11 +202,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72673 int error;
72674 int dirty_accountable = 0;
72675
72676+#ifdef CONFIG_PAX_SEGMEXEC
72677+ struct vm_area_struct *vma_m = NULL;
72678+ unsigned long start_m, end_m;
72679+
72680+ start_m = start + SEGMEXEC_TASK_SIZE;
72681+ end_m = end + SEGMEXEC_TASK_SIZE;
72682+#endif
72683+
72684 if (newflags == oldflags) {
72685 *pprev = vma;
72686 return 0;
72687 }
72688
72689+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
72690+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
72691+
72692+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
72693+ return -ENOMEM;
72694+
72695+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
72696+ return -ENOMEM;
72697+ }
72698+
72699 /*
72700 * If we make a private mapping writable we increase our commit;
72701 * but (without finer accounting) cannot reduce our commit if we
72702@@ -174,6 +241,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72703 }
72704 }
72705
72706+#ifdef CONFIG_PAX_SEGMEXEC
72707+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
72708+ if (start != vma->vm_start) {
72709+ error = split_vma(mm, vma, start, 1);
72710+ if (error)
72711+ goto fail;
72712+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
72713+ *pprev = (*pprev)->vm_next;
72714+ }
72715+
72716+ if (end != vma->vm_end) {
72717+ error = split_vma(mm, vma, end, 0);
72718+ if (error)
72719+ goto fail;
72720+ }
72721+
72722+ if (pax_find_mirror_vma(vma)) {
72723+ error = __do_munmap(mm, start_m, end_m - start_m);
72724+ if (error)
72725+ goto fail;
72726+ } else {
72727+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72728+ if (!vma_m) {
72729+ error = -ENOMEM;
72730+ goto fail;
72731+ }
72732+ vma->vm_flags = newflags;
72733+ error = pax_mirror_vma(vma_m, vma);
72734+ if (error) {
72735+ vma->vm_flags = oldflags;
72736+ goto fail;
72737+ }
72738+ }
72739+ }
72740+#endif
72741+
72742 /*
72743 * First try to merge with previous and/or next vma.
72744 */
72745@@ -204,9 +307,21 @@ success:
72746 * vm_flags and vm_page_prot are protected by the mmap_sem
72747 * held in write mode.
72748 */
72749+
72750+#ifdef CONFIG_PAX_SEGMEXEC
72751+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
72752+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
72753+#endif
72754+
72755 vma->vm_flags = newflags;
72756+
72757+#ifdef CONFIG_PAX_MPROTECT
72758+ if (mm->binfmt && mm->binfmt->handle_mprotect)
72759+ mm->binfmt->handle_mprotect(vma, newflags);
72760+#endif
72761+
72762 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
72763- vm_get_page_prot(newflags));
72764+ vm_get_page_prot(vma->vm_flags));
72765
72766 if (vma_wants_writenotify(vma)) {
72767 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
72768@@ -248,6 +363,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72769 end = start + len;
72770 if (end <= start)
72771 return -ENOMEM;
72772+
72773+#ifdef CONFIG_PAX_SEGMEXEC
72774+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
72775+ if (end > SEGMEXEC_TASK_SIZE)
72776+ return -EINVAL;
72777+ } else
72778+#endif
72779+
72780+ if (end > TASK_SIZE)
72781+ return -EINVAL;
72782+
72783 if (!arch_validate_prot(prot))
72784 return -EINVAL;
72785
72786@@ -255,7 +381,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72787 /*
72788 * Does the application expect PROT_READ to imply PROT_EXEC:
72789 */
72790- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72791+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72792 prot |= PROT_EXEC;
72793
72794 vm_flags = calc_vm_prot_bits(prot);
72795@@ -288,6 +414,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72796 if (start > vma->vm_start)
72797 prev = vma;
72798
72799+#ifdef CONFIG_PAX_MPROTECT
72800+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
72801+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
72802+#endif
72803+
72804 for (nstart = start ; ; ) {
72805 unsigned long newflags;
72806
72807@@ -297,6 +428,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72808
72809 /* newflags >> 4 shift VM_MAY% in place of VM_% */
72810 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
72811+ if (prot & (PROT_WRITE | PROT_EXEC))
72812+ gr_log_rwxmprotect(vma->vm_file);
72813+
72814+ error = -EACCES;
72815+ goto out;
72816+ }
72817+
72818+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
72819 error = -EACCES;
72820 goto out;
72821 }
72822@@ -311,6 +450,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72823 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
72824 if (error)
72825 goto out;
72826+
72827+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
72828+
72829 nstart = tmp;
72830
72831 if (nstart < prev->vm_end)
72832diff --git a/mm/mremap.c b/mm/mremap.c
72833index db8d983..76506cb 100644
72834--- a/mm/mremap.c
72835+++ b/mm/mremap.c
72836@@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
72837 continue;
72838 pte = ptep_get_and_clear(mm, old_addr, old_pte);
72839 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
72840+
72841+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72842+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
72843+ pte = pte_exprotect(pte);
72844+#endif
72845+
72846 set_pte_at(mm, new_addr, new_pte, pte);
72847 }
72848
72849@@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
72850 if (is_vm_hugetlb_page(vma))
72851 goto Einval;
72852
72853+#ifdef CONFIG_PAX_SEGMEXEC
72854+ if (pax_find_mirror_vma(vma))
72855+ goto Einval;
72856+#endif
72857+
72858 /* We can't remap across vm area boundaries */
72859 if (old_len > vma->vm_end - addr)
72860 goto Efault;
72861@@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
72862 unsigned long ret = -EINVAL;
72863 unsigned long charged = 0;
72864 unsigned long map_flags;
72865+ unsigned long pax_task_size = TASK_SIZE;
72866
72867 if (new_addr & ~PAGE_MASK)
72868 goto out;
72869
72870- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
72871+#ifdef CONFIG_PAX_SEGMEXEC
72872+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
72873+ pax_task_size = SEGMEXEC_TASK_SIZE;
72874+#endif
72875+
72876+ pax_task_size -= PAGE_SIZE;
72877+
72878+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
72879 goto out;
72880
72881 /* Check if the location we're moving into overlaps the
72882 * old location at all, and fail if it does.
72883 */
72884- if ((new_addr <= addr) && (new_addr+new_len) > addr)
72885- goto out;
72886-
72887- if ((addr <= new_addr) && (addr+old_len) > new_addr)
72888+ if (addr + old_len > new_addr && new_addr + new_len > addr)
72889 goto out;
72890
72891 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72892@@ -440,6 +456,7 @@ unsigned long do_mremap(unsigned long addr,
72893 struct vm_area_struct *vma;
72894 unsigned long ret = -EINVAL;
72895 unsigned long charged = 0;
72896+ unsigned long pax_task_size = TASK_SIZE;
72897
72898 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
72899 goto out;
72900@@ -458,6 +475,17 @@ unsigned long do_mremap(unsigned long addr,
72901 if (!new_len)
72902 goto out;
72903
72904+#ifdef CONFIG_PAX_SEGMEXEC
72905+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
72906+ pax_task_size = SEGMEXEC_TASK_SIZE;
72907+#endif
72908+
72909+ pax_task_size -= PAGE_SIZE;
72910+
72911+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
72912+ old_len > pax_task_size || addr > pax_task_size-old_len)
72913+ goto out;
72914+
72915 if (flags & MREMAP_FIXED) {
72916 if (flags & MREMAP_MAYMOVE)
72917 ret = mremap_to(addr, old_len, new_addr, new_len);
72918@@ -507,6 +535,7 @@ unsigned long do_mremap(unsigned long addr,
72919 addr + new_len);
72920 }
72921 ret = addr;
72922+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
72923 goto out;
72924 }
72925 }
72926@@ -533,7 +562,13 @@ unsigned long do_mremap(unsigned long addr,
72927 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72928 if (ret)
72929 goto out;
72930+
72931+ map_flags = vma->vm_flags;
72932 ret = move_vma(vma, addr, old_len, new_len, new_addr);
72933+ if (!(ret & ~PAGE_MASK)) {
72934+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
72935+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
72936+ }
72937 }
72938 out:
72939 if (ret & ~PAGE_MASK)
72940diff --git a/mm/nommu.c b/mm/nommu.c
72941index bb8f4f0..40d3e02 100644
72942--- a/mm/nommu.c
72943+++ b/mm/nommu.c
72944@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
72945 int sysctl_overcommit_ratio = 50; /* default is 50% */
72946 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
72947 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
72948-int heap_stack_gap = 0;
72949
72950 atomic_long_t mmap_pages_allocated;
72951
72952@@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
72953 EXPORT_SYMBOL(find_vma);
72954
72955 /*
72956- * find a VMA
72957- * - we don't extend stack VMAs under NOMMU conditions
72958- */
72959-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
72960-{
72961- return find_vma(mm, addr);
72962-}
72963-
72964-/*
72965 * expand a stack to a given address
72966 * - not supported under NOMMU conditions
72967 */
72968@@ -1580,6 +1570,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72969
72970 /* most fields are the same, copy all, and then fixup */
72971 *new = *vma;
72972+ INIT_LIST_HEAD(&new->anon_vma_chain);
72973 *region = *vma->vm_region;
72974 new->vm_region = region;
72975
72976diff --git a/mm/page_alloc.c b/mm/page_alloc.c
72977index 918330f..ae99ae1 100644
72978--- a/mm/page_alloc.c
72979+++ b/mm/page_alloc.c
72980@@ -335,7 +335,7 @@ out:
72981 * This usage means that zero-order pages may not be compound.
72982 */
72983
72984-static void free_compound_page(struct page *page)
72985+void free_compound_page(struct page *page)
72986 {
72987 __free_pages_ok(page, compound_order(page));
72988 }
72989@@ -692,6 +692,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72990 int i;
72991 int bad = 0;
72992
72993+#ifdef CONFIG_PAX_MEMORY_SANITIZE
72994+ unsigned long index = 1UL << order;
72995+#endif
72996+
72997 trace_mm_page_free(page, order);
72998 kmemcheck_free_shadow(page, order);
72999
73000@@ -707,6 +711,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
73001 debug_check_no_obj_freed(page_address(page),
73002 PAGE_SIZE << order);
73003 }
73004+
73005+#ifdef CONFIG_PAX_MEMORY_SANITIZE
73006+ for (; index; --index)
73007+ sanitize_highpage(page + index - 1);
73008+#endif
73009+
73010 arch_free_page(page, order);
73011 kernel_map_pages(page, 1 << order, 0);
73012
73013@@ -830,8 +840,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
73014 arch_alloc_page(page, order);
73015 kernel_map_pages(page, 1 << order, 1);
73016
73017+#ifndef CONFIG_PAX_MEMORY_SANITIZE
73018 if (gfp_flags & __GFP_ZERO)
73019 prep_zero_page(page, order, gfp_flags);
73020+#endif
73021
73022 if (order && (gfp_flags & __GFP_COMP))
73023 prep_compound_page(page, order);
73024@@ -3523,7 +3535,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
73025 unsigned long pfn;
73026
73027 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
73028+#ifdef CONFIG_X86_32
73029+ /* boot failures in VMware 8 on 32bit vanilla since
73030+ this change */
73031+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
73032+#else
73033 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
73034+#endif
73035 return 1;
73036 }
73037 return 0;
73038diff --git a/mm/percpu.c b/mm/percpu.c
73039index bb4be74..a43ea85 100644
73040--- a/mm/percpu.c
73041+++ b/mm/percpu.c
73042@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
73043 static unsigned int pcpu_high_unit_cpu __read_mostly;
73044
73045 /* the address of the first chunk which starts with the kernel static area */
73046-void *pcpu_base_addr __read_mostly;
73047+void *pcpu_base_addr __read_only;
73048 EXPORT_SYMBOL_GPL(pcpu_base_addr);
73049
73050 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
73051diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
73052index c20ff48..137702a 100644
73053--- a/mm/process_vm_access.c
73054+++ b/mm/process_vm_access.c
73055@@ -13,6 +13,7 @@
73056 #include <linux/uio.h>
73057 #include <linux/sched.h>
73058 #include <linux/highmem.h>
73059+#include <linux/security.h>
73060 #include <linux/ptrace.h>
73061 #include <linux/slab.h>
73062 #include <linux/syscalls.h>
73063@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
73064 size_t iov_l_curr_offset = 0;
73065 ssize_t iov_len;
73066
73067+ return -ENOSYS; // PaX: until properly audited
73068+
73069 /*
73070 * Work out how many pages of struct pages we're going to need
73071 * when eventually calling get_user_pages
73072 */
73073 for (i = 0; i < riovcnt; i++) {
73074 iov_len = rvec[i].iov_len;
73075- if (iov_len > 0) {
73076- nr_pages_iov = ((unsigned long)rvec[i].iov_base
73077- + iov_len)
73078- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
73079- / PAGE_SIZE + 1;
73080- nr_pages = max(nr_pages, nr_pages_iov);
73081- }
73082+ if (iov_len <= 0)
73083+ continue;
73084+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
73085+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
73086+ nr_pages = max(nr_pages, nr_pages_iov);
73087 }
73088
73089 if (nr_pages == 0)
73090@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
73091 goto free_proc_pages;
73092 }
73093
73094+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
73095+ rc = -EPERM;
73096+ goto put_task_struct;
73097+ }
73098+
73099 mm = mm_access(task, PTRACE_MODE_ATTACH);
73100 if (!mm || IS_ERR(mm)) {
73101 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
73102diff --git a/mm/rmap.c b/mm/rmap.c
73103index 5b5ad58..0f77903 100644
73104--- a/mm/rmap.c
73105+++ b/mm/rmap.c
73106@@ -167,6 +167,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73107 struct anon_vma *anon_vma = vma->anon_vma;
73108 struct anon_vma_chain *avc;
73109
73110+#ifdef CONFIG_PAX_SEGMEXEC
73111+ struct anon_vma_chain *avc_m = NULL;
73112+#endif
73113+
73114 might_sleep();
73115 if (unlikely(!anon_vma)) {
73116 struct mm_struct *mm = vma->vm_mm;
73117@@ -176,6 +180,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73118 if (!avc)
73119 goto out_enomem;
73120
73121+#ifdef CONFIG_PAX_SEGMEXEC
73122+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
73123+ if (!avc_m)
73124+ goto out_enomem_free_avc;
73125+#endif
73126+
73127 anon_vma = find_mergeable_anon_vma(vma);
73128 allocated = NULL;
73129 if (!anon_vma) {
73130@@ -189,6 +199,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73131 /* page_table_lock to protect against threads */
73132 spin_lock(&mm->page_table_lock);
73133 if (likely(!vma->anon_vma)) {
73134+
73135+#ifdef CONFIG_PAX_SEGMEXEC
73136+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
73137+
73138+ if (vma_m) {
73139+ BUG_ON(vma_m->anon_vma);
73140+ vma_m->anon_vma = anon_vma;
73141+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
73142+ avc_m = NULL;
73143+ }
73144+#endif
73145+
73146 vma->anon_vma = anon_vma;
73147 anon_vma_chain_link(vma, avc, anon_vma);
73148 allocated = NULL;
73149@@ -199,12 +221,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73150
73151 if (unlikely(allocated))
73152 put_anon_vma(allocated);
73153+
73154+#ifdef CONFIG_PAX_SEGMEXEC
73155+ if (unlikely(avc_m))
73156+ anon_vma_chain_free(avc_m);
73157+#endif
73158+
73159 if (unlikely(avc))
73160 anon_vma_chain_free(avc);
73161 }
73162 return 0;
73163
73164 out_enomem_free_avc:
73165+
73166+#ifdef CONFIG_PAX_SEGMEXEC
73167+ if (avc_m)
73168+ anon_vma_chain_free(avc_m);
73169+#endif
73170+
73171 anon_vma_chain_free(avc);
73172 out_enomem:
73173 return -ENOMEM;
73174@@ -240,7 +274,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
73175 * Attach the anon_vmas from src to dst.
73176 * Returns 0 on success, -ENOMEM on failure.
73177 */
73178-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
73179+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
73180 {
73181 struct anon_vma_chain *avc, *pavc;
73182 struct anon_vma *root = NULL;
73183@@ -318,7 +352,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
73184 * the corresponding VMA in the parent process is attached to.
73185 * Returns 0 on success, non-zero on failure.
73186 */
73187-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
73188+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
73189 {
73190 struct anon_vma_chain *avc;
73191 struct anon_vma *anon_vma;
73192diff --git a/mm/shmem.c b/mm/shmem.c
73193index 9d65a02..7c877e7 100644
73194--- a/mm/shmem.c
73195+++ b/mm/shmem.c
73196@@ -31,7 +31,7 @@
73197 #include <linux/export.h>
73198 #include <linux/swap.h>
73199
73200-static struct vfsmount *shm_mnt;
73201+struct vfsmount *shm_mnt;
73202
73203 #ifdef CONFIG_SHMEM
73204 /*
73205@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
73206 #define BOGO_DIRENT_SIZE 20
73207
73208 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
73209-#define SHORT_SYMLINK_LEN 128
73210+#define SHORT_SYMLINK_LEN 64
73211
73212 struct shmem_xattr {
73213 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
73214@@ -2236,8 +2236,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
73215 int err = -ENOMEM;
73216
73217 /* Round up to L1_CACHE_BYTES to resist false sharing */
73218- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
73219- L1_CACHE_BYTES), GFP_KERNEL);
73220+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
73221 if (!sbinfo)
73222 return -ENOMEM;
73223
73224diff --git a/mm/slab.c b/mm/slab.c
73225index e901a36..ca479fc 100644
73226--- a/mm/slab.c
73227+++ b/mm/slab.c
73228@@ -153,7 +153,7 @@
73229
73230 /* Legal flag mask for kmem_cache_create(). */
73231 #if DEBUG
73232-# define CREATE_MASK (SLAB_RED_ZONE | \
73233+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
73234 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
73235 SLAB_CACHE_DMA | \
73236 SLAB_STORE_USER | \
73237@@ -161,7 +161,7 @@
73238 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
73239 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
73240 #else
73241-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
73242+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
73243 SLAB_CACHE_DMA | \
73244 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
73245 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
73246@@ -290,7 +290,7 @@ struct kmem_list3 {
73247 * Need this for bootstrapping a per node allocator.
73248 */
73249 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
73250-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
73251+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
73252 #define CACHE_CACHE 0
73253 #define SIZE_AC MAX_NUMNODES
73254 #define SIZE_L3 (2 * MAX_NUMNODES)
73255@@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
73256 if ((x)->max_freeable < i) \
73257 (x)->max_freeable = i; \
73258 } while (0)
73259-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
73260-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
73261-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
73262-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
73263+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
73264+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
73265+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
73266+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
73267 #else
73268 #define STATS_INC_ACTIVE(x) do { } while (0)
73269 #define STATS_DEC_ACTIVE(x) do { } while (0)
73270@@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
73271 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
73272 */
73273 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
73274- const struct slab *slab, void *obj)
73275+ const struct slab *slab, const void *obj)
73276 {
73277 u32 offset = (obj - slab->s_mem);
73278 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
73279@@ -563,12 +563,13 @@ EXPORT_SYMBOL(malloc_sizes);
73280 struct cache_names {
73281 char *name;
73282 char *name_dma;
73283+ char *name_usercopy;
73284 };
73285
73286 static struct cache_names __initdata cache_names[] = {
73287-#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
73288+#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
73289 #include <linux/kmalloc_sizes.h>
73290- {NULL,}
73291+ {NULL}
73292 #undef CACHE
73293 };
73294
73295@@ -756,6 +757,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
73296 if (unlikely(gfpflags & GFP_DMA))
73297 return csizep->cs_dmacachep;
73298 #endif
73299+
73300+#ifdef CONFIG_PAX_USERCOPY_SLABS
73301+ if (unlikely(gfpflags & GFP_USERCOPY))
73302+ return csizep->cs_usercopycachep;
73303+#endif
73304+
73305 return csizep->cs_cachep;
73306 }
73307
73308@@ -1588,7 +1595,7 @@ void __init kmem_cache_init(void)
73309 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
73310 sizes[INDEX_AC].cs_size,
73311 ARCH_KMALLOC_MINALIGN,
73312- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73313+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73314 NULL);
73315
73316 if (INDEX_AC != INDEX_L3) {
73317@@ -1596,7 +1603,7 @@ void __init kmem_cache_init(void)
73318 kmem_cache_create(names[INDEX_L3].name,
73319 sizes[INDEX_L3].cs_size,
73320 ARCH_KMALLOC_MINALIGN,
73321- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73322+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73323 NULL);
73324 }
73325
73326@@ -1614,7 +1621,7 @@ void __init kmem_cache_init(void)
73327 sizes->cs_cachep = kmem_cache_create(names->name,
73328 sizes->cs_size,
73329 ARCH_KMALLOC_MINALIGN,
73330- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73331+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73332 NULL);
73333 }
73334 #ifdef CONFIG_ZONE_DMA
73335@@ -1626,6 +1633,16 @@ void __init kmem_cache_init(void)
73336 SLAB_PANIC,
73337 NULL);
73338 #endif
73339+
73340+#ifdef CONFIG_PAX_USERCOPY_SLABS
73341+ sizes->cs_usercopycachep = kmem_cache_create(
73342+ names->name_usercopy,
73343+ sizes->cs_size,
73344+ ARCH_KMALLOC_MINALIGN,
73345+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73346+ NULL);
73347+#endif
73348+
73349 sizes++;
73350 names++;
73351 }
73352@@ -4390,10 +4407,10 @@ static int s_show(struct seq_file *m, void *p)
73353 }
73354 /* cpu stats */
73355 {
73356- unsigned long allochit = atomic_read(&cachep->allochit);
73357- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
73358- unsigned long freehit = atomic_read(&cachep->freehit);
73359- unsigned long freemiss = atomic_read(&cachep->freemiss);
73360+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
73361+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
73362+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
73363+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
73364
73365 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
73366 allochit, allocmiss, freehit, freemiss);
73367@@ -4652,13 +4669,68 @@ static int __init slab_proc_init(void)
73368 {
73369 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
73370 #ifdef CONFIG_DEBUG_SLAB_LEAK
73371- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
73372+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
73373 #endif
73374 return 0;
73375 }
73376 module_init(slab_proc_init);
73377 #endif
73378
73379+bool is_usercopy_object(const void *ptr)
73380+{
73381+ struct page *page;
73382+ struct kmem_cache *cachep;
73383+
73384+ if (ZERO_OR_NULL_PTR(ptr))
73385+ return false;
73386+
73387+ if (!virt_addr_valid(ptr))
73388+ return false;
73389+
73390+ page = virt_to_head_page(ptr);
73391+
73392+ if (!PageSlab(page))
73393+ return false;
73394+
73395+ cachep = page_get_cache(page);
73396+ return cachep->flags & SLAB_USERCOPY;
73397+}
73398+
73399+#ifdef CONFIG_PAX_USERCOPY
73400+const char *check_heap_object(const void *ptr, unsigned long n, bool to)
73401+{
73402+ struct page *page;
73403+ struct kmem_cache *cachep;
73404+ struct slab *slabp;
73405+ unsigned int objnr;
73406+ unsigned long offset;
73407+
73408+ if (ZERO_OR_NULL_PTR(ptr))
73409+ return "<null>";
73410+
73411+ if (!virt_addr_valid(ptr))
73412+ return NULL;
73413+
73414+ page = virt_to_head_page(ptr);
73415+
73416+ if (!PageSlab(page))
73417+ return NULL;
73418+
73419+ cachep = page_get_cache(page);
73420+ if (!(cachep->flags & SLAB_USERCOPY))
73421+ return cachep->name;
73422+
73423+ slabp = page_get_slab(page);
73424+ objnr = obj_to_index(cachep, slabp, ptr);
73425+ BUG_ON(objnr >= cachep->num);
73426+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
73427+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
73428+ return NULL;
73429+
73430+ return cachep->name;
73431+}
73432+#endif
73433+
73434 /**
73435 * ksize - get the actual amount of memory allocated for a given object
73436 * @objp: Pointer to the object
73437diff --git a/mm/slob.c b/mm/slob.c
73438index 8105be4..3c15e57 100644
73439--- a/mm/slob.c
73440+++ b/mm/slob.c
73441@@ -29,7 +29,7 @@
73442 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
73443 * alloc_pages() directly, allocating compound pages so the page order
73444 * does not have to be separately tracked, and also stores the exact
73445- * allocation size in page->private so that it can be used to accurately
73446+ * allocation size in slob_page->size so that it can be used to accurately
73447 * provide ksize(). These objects are detected in kfree() because slob_page()
73448 * is false for them.
73449 *
73450@@ -58,6 +58,7 @@
73451 */
73452
73453 #include <linux/kernel.h>
73454+#include <linux/sched.h>
73455 #include <linux/slab.h>
73456 #include <linux/mm.h>
73457 #include <linux/swap.h> /* struct reclaim_state */
73458@@ -102,7 +103,8 @@ struct slob_page {
73459 unsigned long flags; /* mandatory */
73460 atomic_t _count; /* mandatory */
73461 slobidx_t units; /* free units left in page */
73462- unsigned long pad[2];
73463+ unsigned long pad[1];
73464+ unsigned long size; /* size when >=PAGE_SIZE */
73465 slob_t *free; /* first free slob_t in page */
73466 struct list_head list; /* linked list of free pages */
73467 };
73468@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
73469 */
73470 static inline int is_slob_page(struct slob_page *sp)
73471 {
73472- return PageSlab((struct page *)sp);
73473+ return PageSlab((struct page *)sp) && !sp->size;
73474 }
73475
73476 static inline void set_slob_page(struct slob_page *sp)
73477@@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
73478
73479 static inline struct slob_page *slob_page(const void *addr)
73480 {
73481- return (struct slob_page *)virt_to_page(addr);
73482+ return (struct slob_page *)virt_to_head_page(addr);
73483 }
73484
73485 /*
73486@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
73487 /*
73488 * Return the size of a slob block.
73489 */
73490-static slobidx_t slob_units(slob_t *s)
73491+static slobidx_t slob_units(const slob_t *s)
73492 {
73493 if (s->units > 0)
73494 return s->units;
73495@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
73496 /*
73497 * Return the next free slob block pointer after this one.
73498 */
73499-static slob_t *slob_next(slob_t *s)
73500+static slob_t *slob_next(const slob_t *s)
73501 {
73502 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
73503 slobidx_t next;
73504@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
73505 /*
73506 * Returns true if s is the last free block in its page.
73507 */
73508-static int slob_last(slob_t *s)
73509+static int slob_last(const slob_t *s)
73510 {
73511 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
73512 }
73513@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
73514 if (!page)
73515 return NULL;
73516
73517+ set_slob_page(page);
73518 return page_address(page);
73519 }
73520
73521@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
73522 if (!b)
73523 return NULL;
73524 sp = slob_page(b);
73525- set_slob_page(sp);
73526
73527 spin_lock_irqsave(&slob_lock, flags);
73528 sp->units = SLOB_UNITS(PAGE_SIZE);
73529 sp->free = b;
73530+ sp->size = 0;
73531 INIT_LIST_HEAD(&sp->list);
73532 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
73533 set_slob_page_free(sp, slob_list);
73534@@ -476,10 +479,9 @@ out:
73535 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
73536 */
73537
73538-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73539+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
73540 {
73541- unsigned int *m;
73542- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73543+ slob_t *m;
73544 void *ret;
73545
73546 gfp &= gfp_allowed_mask;
73547@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73548
73549 if (!m)
73550 return NULL;
73551- *m = size;
73552+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
73553+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
73554+ m[0].units = size;
73555+ m[1].units = align;
73556 ret = (void *)m + align;
73557
73558 trace_kmalloc_node(_RET_IP_, ret,
73559@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73560 gfp |= __GFP_COMP;
73561 ret = slob_new_pages(gfp, order, node);
73562 if (ret) {
73563- struct page *page;
73564- page = virt_to_page(ret);
73565- page->private = size;
73566+ struct slob_page *sp;
73567+ sp = slob_page(ret);
73568+ sp->size = size;
73569 }
73570
73571 trace_kmalloc_node(_RET_IP_, ret,
73572 size, PAGE_SIZE << order, gfp, node);
73573 }
73574
73575- kmemleak_alloc(ret, size, 1, gfp);
73576+ return ret;
73577+}
73578+
73579+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73580+{
73581+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73582+ void *ret = __kmalloc_node_align(size, gfp, node, align);
73583+
73584+ if (!ZERO_OR_NULL_PTR(ret))
73585+ kmemleak_alloc(ret, size, 1, gfp);
73586 return ret;
73587 }
73588 EXPORT_SYMBOL(__kmalloc_node);
73589@@ -533,13 +547,83 @@ void kfree(const void *block)
73590 sp = slob_page(block);
73591 if (is_slob_page(sp)) {
73592 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73593- unsigned int *m = (unsigned int *)(block - align);
73594- slob_free(m, *m + align);
73595- } else
73596+ slob_t *m = (slob_t *)(block - align);
73597+ slob_free(m, m[0].units + align);
73598+ } else {
73599+ clear_slob_page(sp);
73600+ free_slob_page(sp);
73601+ sp->size = 0;
73602 put_page(&sp->page);
73603+ }
73604 }
73605 EXPORT_SYMBOL(kfree);
73606
73607+bool is_usercopy_object(const void *ptr)
73608+{
73609+ return false;
73610+}
73611+
73612+#ifdef CONFIG_PAX_USERCOPY
73613+const char *check_heap_object(const void *ptr, unsigned long n, bool to)
73614+{
73615+ struct slob_page *sp;
73616+ const slob_t *free;
73617+ const void *base;
73618+ unsigned long flags;
73619+
73620+ if (ZERO_OR_NULL_PTR(ptr))
73621+ return "<null>";
73622+
73623+ if (!virt_addr_valid(ptr))
73624+ return NULL;
73625+
73626+ sp = slob_page(ptr);
73627+ if (!PageSlab((struct page *)sp))
73628+ return NULL;
73629+
73630+ if (sp->size) {
73631+ base = page_address(&sp->page);
73632+ if (base <= ptr && n <= sp->size - (ptr - base))
73633+ return NULL;
73634+ return "<slob>";
73635+ }
73636+
73637+ /* some tricky double walking to find the chunk */
73638+ spin_lock_irqsave(&slob_lock, flags);
73639+ base = (void *)((unsigned long)ptr & PAGE_MASK);
73640+ free = sp->free;
73641+
73642+ while (!slob_last(free) && (void *)free <= ptr) {
73643+ base = free + slob_units(free);
73644+ free = slob_next(free);
73645+ }
73646+
73647+ while (base < (void *)free) {
73648+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
73649+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
73650+ int offset;
73651+
73652+ if (ptr < base + align)
73653+ break;
73654+
73655+ offset = ptr - base - align;
73656+ if (offset >= m) {
73657+ base += size;
73658+ continue;
73659+ }
73660+
73661+ if (n > m - offset)
73662+ break;
73663+
73664+ spin_unlock_irqrestore(&slob_lock, flags);
73665+ return NULL;
73666+ }
73667+
73668+ spin_unlock_irqrestore(&slob_lock, flags);
73669+ return "<slob>";
73670+}
73671+#endif
73672+
73673 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
73674 size_t ksize(const void *block)
73675 {
73676@@ -552,10 +636,10 @@ size_t ksize(const void *block)
73677 sp = slob_page(block);
73678 if (is_slob_page(sp)) {
73679 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73680- unsigned int *m = (unsigned int *)(block - align);
73681- return SLOB_UNITS(*m) * SLOB_UNIT;
73682+ slob_t *m = (slob_t *)(block - align);
73683+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
73684 } else
73685- return sp->page.private;
73686+ return sp->size;
73687 }
73688 EXPORT_SYMBOL(ksize);
73689
73690@@ -571,8 +655,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73691 {
73692 struct kmem_cache *c;
73693
73694+#ifdef CONFIG_PAX_USERCOPY_SLABS
73695+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
73696+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
73697+#else
73698 c = slob_alloc(sizeof(struct kmem_cache),
73699 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
73700+#endif
73701
73702 if (c) {
73703 c->name = name;
73704@@ -614,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
73705
73706 lockdep_trace_alloc(flags);
73707
73708+#ifdef CONFIG_PAX_USERCOPY_SLABS
73709+ b = __kmalloc_node_align(c->size, flags, node, c->align);
73710+#else
73711 if (c->size < PAGE_SIZE) {
73712 b = slob_alloc(c->size, flags, c->align, node);
73713 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73714 SLOB_UNITS(c->size) * SLOB_UNIT,
73715 flags, node);
73716 } else {
73717+ struct slob_page *sp;
73718+
73719 b = slob_new_pages(flags, get_order(c->size), node);
73720+ sp = slob_page(b);
73721+ sp->size = c->size;
73722 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73723 PAGE_SIZE << get_order(c->size),
73724 flags, node);
73725 }
73726+#endif
73727
73728 if (c->ctor)
73729 c->ctor(b);
73730@@ -636,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
73731
73732 static void __kmem_cache_free(void *b, int size)
73733 {
73734- if (size < PAGE_SIZE)
73735+ struct slob_page *sp = slob_page(b);
73736+
73737+ if (is_slob_page(sp))
73738 slob_free(b, size);
73739- else
73740+ else {
73741+ clear_slob_page(sp);
73742+ free_slob_page(sp);
73743+ sp->size = 0;
73744 slob_free_pages(b, get_order(size));
73745+ }
73746 }
73747
73748 static void kmem_rcu_free(struct rcu_head *head)
73749@@ -652,17 +755,31 @@ static void kmem_rcu_free(struct rcu_head *head)
73750
73751 void kmem_cache_free(struct kmem_cache *c, void *b)
73752 {
73753+ int size = c->size;
73754+
73755+#ifdef CONFIG_PAX_USERCOPY_SLABS
73756+ if (size + c->align < PAGE_SIZE) {
73757+ size += c->align;
73758+ b -= c->align;
73759+ }
73760+#endif
73761+
73762 kmemleak_free_recursive(b, c->flags);
73763 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
73764 struct slob_rcu *slob_rcu;
73765- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
73766- slob_rcu->size = c->size;
73767+ slob_rcu = b + (size - sizeof(struct slob_rcu));
73768+ slob_rcu->size = size;
73769 call_rcu(&slob_rcu->head, kmem_rcu_free);
73770 } else {
73771- __kmem_cache_free(b, c->size);
73772+ __kmem_cache_free(b, size);
73773 }
73774
73775+#ifdef CONFIG_PAX_USERCOPY_SLABS
73776+ trace_kfree(_RET_IP_, b);
73777+#else
73778 trace_kmem_cache_free(_RET_IP_, b);
73779+#endif
73780+
73781 }
73782 EXPORT_SYMBOL(kmem_cache_free);
73783
73784diff --git a/mm/slub.c b/mm/slub.c
73785index 71de9b5..a93d4a4 100644
73786--- a/mm/slub.c
73787+++ b/mm/slub.c
73788@@ -209,7 +209,7 @@ struct track {
73789
73790 enum track_item { TRACK_ALLOC, TRACK_FREE };
73791
73792-#ifdef CONFIG_SYSFS
73793+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73794 static int sysfs_slab_add(struct kmem_cache *);
73795 static int sysfs_slab_alias(struct kmem_cache *, const char *);
73796 static void sysfs_slab_remove(struct kmem_cache *);
73797@@ -538,7 +538,7 @@ static void print_track(const char *s, struct track *t)
73798 if (!t->addr)
73799 return;
73800
73801- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
73802+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
73803 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
73804 #ifdef CONFIG_STACKTRACE
73805 {
73806@@ -2603,6 +2603,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
73807
73808 page = virt_to_head_page(x);
73809
73810+ BUG_ON(!PageSlab(page));
73811+
73812 slab_free(s, page, x, _RET_IP_);
73813
73814 trace_kmem_cache_free(_RET_IP_, x);
73815@@ -2636,7 +2638,7 @@ static int slub_min_objects;
73816 * Merge control. If this is set then no merging of slab caches will occur.
73817 * (Could be removed. This was introduced to pacify the merge skeptics.)
73818 */
73819-static int slub_nomerge;
73820+static int slub_nomerge = 1;
73821
73822 /*
73823 * Calculate the order of allocation given an slab object size.
73824@@ -3089,7 +3091,7 @@ static int kmem_cache_open(struct kmem_cache *s,
73825 else
73826 s->cpu_partial = 30;
73827
73828- s->refcount = 1;
73829+ atomic_set(&s->refcount, 1);
73830 #ifdef CONFIG_NUMA
73831 s->remote_node_defrag_ratio = 1000;
73832 #endif
73833@@ -3193,8 +3195,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
73834 void kmem_cache_destroy(struct kmem_cache *s)
73835 {
73836 down_write(&slub_lock);
73837- s->refcount--;
73838- if (!s->refcount) {
73839+ if (atomic_dec_and_test(&s->refcount)) {
73840 list_del(&s->list);
73841 up_write(&slub_lock);
73842 if (kmem_cache_close(s)) {
73843@@ -3223,6 +3224,10 @@ static struct kmem_cache *kmem_cache;
73844 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
73845 #endif
73846
73847+#ifdef CONFIG_PAX_USERCOPY_SLABS
73848+static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
73849+#endif
73850+
73851 static int __init setup_slub_min_order(char *str)
73852 {
73853 get_option(&str, &slub_min_order);
73854@@ -3337,6 +3342,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
73855 return kmalloc_dma_caches[index];
73856
73857 #endif
73858+
73859+#ifdef CONFIG_PAX_USERCOPY_SLABS
73860+ if (flags & SLAB_USERCOPY)
73861+ return kmalloc_usercopy_caches[index];
73862+
73863+#endif
73864+
73865 return kmalloc_caches[index];
73866 }
73867
73868@@ -3405,6 +3417,56 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
73869 EXPORT_SYMBOL(__kmalloc_node);
73870 #endif
73871
73872+bool is_usercopy_object(const void *ptr)
73873+{
73874+ struct page *page;
73875+ struct kmem_cache *s;
73876+
73877+ if (ZERO_OR_NULL_PTR(ptr))
73878+ return false;
73879+
73880+ if (!virt_addr_valid(ptr))
73881+ return false;
73882+
73883+ page = virt_to_head_page(ptr);
73884+
73885+ if (!PageSlab(page))
73886+ return false;
73887+
73888+ s = page->slab;
73889+ return s->flags & SLAB_USERCOPY;
73890+}
73891+
73892+#ifdef CONFIG_PAX_USERCOPY
73893+const char *check_heap_object(const void *ptr, unsigned long n, bool to)
73894+{
73895+ struct page *page;
73896+ struct kmem_cache *s;
73897+ unsigned long offset;
73898+
73899+ if (ZERO_OR_NULL_PTR(ptr))
73900+ return "<null>";
73901+
73902+ if (!virt_addr_valid(ptr))
73903+ return NULL;
73904+
73905+ page = virt_to_head_page(ptr);
73906+
73907+ if (!PageSlab(page))
73908+ return NULL;
73909+
73910+ s = page->slab;
73911+ if (!(s->flags & SLAB_USERCOPY))
73912+ return s->name;
73913+
73914+ offset = (ptr - page_address(page)) % s->size;
73915+ if (offset <= s->objsize && n <= s->objsize - offset)
73916+ return NULL;
73917+
73918+ return s->name;
73919+}
73920+#endif
73921+
73922 size_t ksize(const void *object)
73923 {
73924 struct page *page;
73925@@ -3679,7 +3741,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
73926 int node;
73927
73928 list_add(&s->list, &slab_caches);
73929- s->refcount = -1;
73930+ atomic_set(&s->refcount, -1);
73931
73932 for_each_node_state(node, N_NORMAL_MEMORY) {
73933 struct kmem_cache_node *n = get_node(s, node);
73934@@ -3799,17 +3861,17 @@ void __init kmem_cache_init(void)
73935
73936 /* Caches that are not of the two-to-the-power-of size */
73937 if (KMALLOC_MIN_SIZE <= 32) {
73938- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
73939+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
73940 caches++;
73941 }
73942
73943 if (KMALLOC_MIN_SIZE <= 64) {
73944- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
73945+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
73946 caches++;
73947 }
73948
73949 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
73950- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
73951+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
73952 caches++;
73953 }
73954
73955@@ -3851,6 +3913,22 @@ void __init kmem_cache_init(void)
73956 }
73957 }
73958 #endif
73959+
73960+#ifdef CONFIG_PAX_USERCOPY_SLABS
73961+ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
73962+ struct kmem_cache *s = kmalloc_caches[i];
73963+
73964+ if (s && s->size) {
73965+ char *name = kasprintf(GFP_NOWAIT,
73966+ "usercopy-kmalloc-%d", s->objsize);
73967+
73968+ BUG_ON(!name);
73969+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
73970+ s->objsize, SLAB_USERCOPY);
73971+ }
73972+ }
73973+#endif
73974+
73975 printk(KERN_INFO
73976 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
73977 " CPUs=%d, Nodes=%d\n",
73978@@ -3877,7 +3955,7 @@ static int slab_unmergeable(struct kmem_cache *s)
73979 /*
73980 * We may have set a slab to be unmergeable during bootstrap.
73981 */
73982- if (s->refcount < 0)
73983+ if (atomic_read(&s->refcount) < 0)
73984 return 1;
73985
73986 return 0;
73987@@ -3936,7 +4014,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73988 down_write(&slub_lock);
73989 s = find_mergeable(size, align, flags, name, ctor);
73990 if (s) {
73991- s->refcount++;
73992+ atomic_inc(&s->refcount);
73993 /*
73994 * Adjust the object sizes so that we clear
73995 * the complete object on kzalloc.
73996@@ -3945,7 +4023,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73997 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
73998
73999 if (sysfs_slab_alias(s, name)) {
74000- s->refcount--;
74001+ atomic_dec(&s->refcount);
74002 goto err;
74003 }
74004 up_write(&slub_lock);
74005@@ -4074,7 +4152,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
74006 }
74007 #endif
74008
74009-#ifdef CONFIG_SYSFS
74010+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74011 static int count_inuse(struct page *page)
74012 {
74013 return page->inuse;
74014@@ -4461,12 +4539,12 @@ static void resiliency_test(void)
74015 validate_slab_cache(kmalloc_caches[9]);
74016 }
74017 #else
74018-#ifdef CONFIG_SYSFS
74019+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74020 static void resiliency_test(void) {};
74021 #endif
74022 #endif
74023
74024-#ifdef CONFIG_SYSFS
74025+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74026 enum slab_stat_type {
74027 SL_ALL, /* All slabs */
74028 SL_PARTIAL, /* Only partially allocated slabs */
74029@@ -4709,7 +4787,7 @@ SLAB_ATTR_RO(ctor);
74030
74031 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
74032 {
74033- return sprintf(buf, "%d\n", s->refcount - 1);
74034+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
74035 }
74036 SLAB_ATTR_RO(aliases);
74037
74038@@ -5280,6 +5358,7 @@ static char *create_unique_id(struct kmem_cache *s)
74039 return name;
74040 }
74041
74042+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74043 static int sysfs_slab_add(struct kmem_cache *s)
74044 {
74045 int err;
74046@@ -5342,6 +5421,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
74047 kobject_del(&s->kobj);
74048 kobject_put(&s->kobj);
74049 }
74050+#endif
74051
74052 /*
74053 * Need to buffer aliases during bootup until sysfs becomes
74054@@ -5355,6 +5435,7 @@ struct saved_alias {
74055
74056 static struct saved_alias *alias_list;
74057
74058+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74059 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
74060 {
74061 struct saved_alias *al;
74062@@ -5377,6 +5458,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
74063 alias_list = al;
74064 return 0;
74065 }
74066+#endif
74067
74068 static int __init slab_sysfs_init(void)
74069 {
74070diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
74071index 1b7e22a..3fcd4f3 100644
74072--- a/mm/sparse-vmemmap.c
74073+++ b/mm/sparse-vmemmap.c
74074@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
74075 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
74076 if (!p)
74077 return NULL;
74078- pud_populate(&init_mm, pud, p);
74079+ pud_populate_kernel(&init_mm, pud, p);
74080 }
74081 return pud;
74082 }
74083@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
74084 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
74085 if (!p)
74086 return NULL;
74087- pgd_populate(&init_mm, pgd, p);
74088+ pgd_populate_kernel(&init_mm, pgd, p);
74089 }
74090 return pgd;
74091 }
74092diff --git a/mm/swap.c b/mm/swap.c
74093index 5c13f13..f1cfc13 100644
74094--- a/mm/swap.c
74095+++ b/mm/swap.c
74096@@ -30,6 +30,7 @@
74097 #include <linux/backing-dev.h>
74098 #include <linux/memcontrol.h>
74099 #include <linux/gfp.h>
74100+#include <linux/hugetlb.h>
74101
74102 #include "internal.h"
74103
74104@@ -70,6 +71,8 @@ static void __put_compound_page(struct page *page)
74105
74106 __page_cache_release(page);
74107 dtor = get_compound_page_dtor(page);
74108+ if (!PageHuge(page))
74109+ BUG_ON(dtor != free_compound_page);
74110 (*dtor)(page);
74111 }
74112
74113diff --git a/mm/swapfile.c b/mm/swapfile.c
74114index 38186d9..bfba6d3 100644
74115--- a/mm/swapfile.c
74116+++ b/mm/swapfile.c
74117@@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
74118
74119 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
74120 /* Activity counter to indicate that a swapon or swapoff has occurred */
74121-static atomic_t proc_poll_event = ATOMIC_INIT(0);
74122+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
74123
74124 static inline unsigned char swap_count(unsigned char ent)
74125 {
74126@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
74127 }
74128 filp_close(swap_file, NULL);
74129 err = 0;
74130- atomic_inc(&proc_poll_event);
74131+ atomic_inc_unchecked(&proc_poll_event);
74132 wake_up_interruptible(&proc_poll_wait);
74133
74134 out_dput:
74135@@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
74136
74137 poll_wait(file, &proc_poll_wait, wait);
74138
74139- if (seq->poll_event != atomic_read(&proc_poll_event)) {
74140- seq->poll_event = atomic_read(&proc_poll_event);
74141+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
74142+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
74143 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
74144 }
74145
74146@@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inode, struct file *file)
74147 return ret;
74148
74149 seq = file->private_data;
74150- seq->poll_event = atomic_read(&proc_poll_event);
74151+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
74152 return 0;
74153 }
74154
74155@@ -2123,7 +2123,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
74156 (p->flags & SWP_DISCARDABLE) ? "D" : "");
74157
74158 mutex_unlock(&swapon_mutex);
74159- atomic_inc(&proc_poll_event);
74160+ atomic_inc_unchecked(&proc_poll_event);
74161 wake_up_interruptible(&proc_poll_wait);
74162
74163 if (S_ISREG(inode->i_mode))
74164diff --git a/mm/util.c b/mm/util.c
74165index ae962b3..0bba886 100644
74166--- a/mm/util.c
74167+++ b/mm/util.c
74168@@ -284,6 +284,12 @@ done:
74169 void arch_pick_mmap_layout(struct mm_struct *mm)
74170 {
74171 mm->mmap_base = TASK_UNMAPPED_BASE;
74172+
74173+#ifdef CONFIG_PAX_RANDMMAP
74174+ if (mm->pax_flags & MF_PAX_RANDMMAP)
74175+ mm->mmap_base += mm->delta_mmap;
74176+#endif
74177+
74178 mm->get_unmapped_area = arch_get_unmapped_area;
74179 mm->unmap_area = arch_unmap_area;
74180 }
74181diff --git a/mm/vmalloc.c b/mm/vmalloc.c
74182index 1196c77..2e608e8 100644
74183--- a/mm/vmalloc.c
74184+++ b/mm/vmalloc.c
74185@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
74186
74187 pte = pte_offset_kernel(pmd, addr);
74188 do {
74189- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
74190- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
74191+
74192+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74193+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
74194+ BUG_ON(!pte_exec(*pte));
74195+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
74196+ continue;
74197+ }
74198+#endif
74199+
74200+ {
74201+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
74202+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
74203+ }
74204 } while (pte++, addr += PAGE_SIZE, addr != end);
74205 }
74206
74207@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
74208 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
74209 {
74210 pte_t *pte;
74211+ int ret = -ENOMEM;
74212
74213 /*
74214 * nr is a running index into the array which helps higher level
74215@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
74216 pte = pte_alloc_kernel(pmd, addr);
74217 if (!pte)
74218 return -ENOMEM;
74219+
74220+ pax_open_kernel();
74221 do {
74222 struct page *page = pages[*nr];
74223
74224- if (WARN_ON(!pte_none(*pte)))
74225- return -EBUSY;
74226- if (WARN_ON(!page))
74227- return -ENOMEM;
74228+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74229+ if (pgprot_val(prot) & _PAGE_NX)
74230+#endif
74231+
74232+ if (WARN_ON(!pte_none(*pte))) {
74233+ ret = -EBUSY;
74234+ goto out;
74235+ }
74236+ if (WARN_ON(!page)) {
74237+ ret = -ENOMEM;
74238+ goto out;
74239+ }
74240 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
74241 (*nr)++;
74242 } while (pte++, addr += PAGE_SIZE, addr != end);
74243- return 0;
74244+ ret = 0;
74245+out:
74246+ pax_close_kernel();
74247+ return ret;
74248 }
74249
74250 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
74251@@ -119,7 +144,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
74252 pmd_t *pmd;
74253 unsigned long next;
74254
74255- pmd = pmd_alloc(&init_mm, pud, addr);
74256+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
74257 if (!pmd)
74258 return -ENOMEM;
74259 do {
74260@@ -136,7 +161,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
74261 pud_t *pud;
74262 unsigned long next;
74263
74264- pud = pud_alloc(&init_mm, pgd, addr);
74265+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
74266 if (!pud)
74267 return -ENOMEM;
74268 do {
74269@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
74270 * and fall back on vmalloc() if that fails. Others
74271 * just put it in the vmalloc space.
74272 */
74273-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
74274+#ifdef CONFIG_MODULES
74275+#ifdef MODULES_VADDR
74276 unsigned long addr = (unsigned long)x;
74277 if (addr >= MODULES_VADDR && addr < MODULES_END)
74278 return 1;
74279 #endif
74280+
74281+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74282+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
74283+ return 1;
74284+#endif
74285+
74286+#endif
74287+
74288 return is_vmalloc_addr(x);
74289 }
74290
74291@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
74292
74293 if (!pgd_none(*pgd)) {
74294 pud_t *pud = pud_offset(pgd, addr);
74295+#ifdef CONFIG_X86
74296+ if (!pud_large(*pud))
74297+#endif
74298 if (!pud_none(*pud)) {
74299 pmd_t *pmd = pmd_offset(pud, addr);
74300+#ifdef CONFIG_X86
74301+ if (!pmd_large(*pmd))
74302+#endif
74303 if (!pmd_none(*pmd)) {
74304 pte_t *ptep, pte;
74305
74306@@ -332,6 +372,10 @@ static void purge_vmap_area_lazy(void);
74307 static struct vmap_area *alloc_vmap_area(unsigned long size,
74308 unsigned long align,
74309 unsigned long vstart, unsigned long vend,
74310+ int node, gfp_t gfp_mask) __size_overflow(1);
74311+static struct vmap_area *alloc_vmap_area(unsigned long size,
74312+ unsigned long align,
74313+ unsigned long vstart, unsigned long vend,
74314 int node, gfp_t gfp_mask)
74315 {
74316 struct vmap_area *va;
74317@@ -1320,6 +1364,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
74318 struct vm_struct *area;
74319
74320 BUG_ON(in_interrupt());
74321+
74322+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74323+ if (flags & VM_KERNEXEC) {
74324+ if (start != VMALLOC_START || end != VMALLOC_END)
74325+ return NULL;
74326+ start = (unsigned long)MODULES_EXEC_VADDR;
74327+ end = (unsigned long)MODULES_EXEC_END;
74328+ }
74329+#endif
74330+
74331 if (flags & VM_IOREMAP) {
74332 int bit = fls(size);
74333
74334@@ -1552,6 +1606,11 @@ void *vmap(struct page **pages, unsigned int count,
74335 if (count > totalram_pages)
74336 return NULL;
74337
74338+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74339+ if (!(pgprot_val(prot) & _PAGE_NX))
74340+ flags |= VM_KERNEXEC;
74341+#endif
74342+
74343 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
74344 __builtin_return_address(0));
74345 if (!area)
74346@@ -1653,6 +1712,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
74347 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
74348 goto fail;
74349
74350+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74351+ if (!(pgprot_val(prot) & _PAGE_NX))
74352+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
74353+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
74354+ else
74355+#endif
74356+
74357 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
74358 start, end, node, gfp_mask, caller);
74359 if (!area)
74360@@ -1826,10 +1892,9 @@ EXPORT_SYMBOL(vzalloc_node);
74361 * For tight control over page level allocator and protection flags
74362 * use __vmalloc() instead.
74363 */
74364-
74365 void *vmalloc_exec(unsigned long size)
74366 {
74367- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
74368+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
74369 -1, __builtin_return_address(0));
74370 }
74371
74372@@ -2124,6 +2189,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
74373 unsigned long uaddr = vma->vm_start;
74374 unsigned long usize = vma->vm_end - vma->vm_start;
74375
74376+ BUG_ON(vma->vm_mirror);
74377+
74378 if ((PAGE_SIZE-1) & (unsigned long)addr)
74379 return -EINVAL;
74380
74381@@ -2376,8 +2443,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
74382 return NULL;
74383 }
74384
74385- vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
74386- vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
74387+ vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
74388+ vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
74389 if (!vas || !vms)
74390 goto err_free2;
74391
74392diff --git a/mm/vmscan.c b/mm/vmscan.c
74393index 4607cc6..be5bc0a 100644
74394--- a/mm/vmscan.c
74395+++ b/mm/vmscan.c
74396@@ -3013,7 +3013,10 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
74397 * them before going back to sleep.
74398 */
74399 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
74400- schedule();
74401+
74402+ if (!kthread_should_stop())
74403+ schedule();
74404+
74405 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
74406 } else {
74407 if (remaining)
74408diff --git a/mm/vmstat.c b/mm/vmstat.c
74409index 7db1b9b..e9f6b07 100644
74410--- a/mm/vmstat.c
74411+++ b/mm/vmstat.c
74412@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
74413 *
74414 * vm_stat contains the global counters
74415 */
74416-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
74417+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
74418 EXPORT_SYMBOL(vm_stat);
74419
74420 #ifdef CONFIG_SMP
74421@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
74422 v = p->vm_stat_diff[i];
74423 p->vm_stat_diff[i] = 0;
74424 local_irq_restore(flags);
74425- atomic_long_add(v, &zone->vm_stat[i]);
74426+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
74427 global_diff[i] += v;
74428 #ifdef CONFIG_NUMA
74429 /* 3 seconds idle till flush */
74430@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
74431
74432 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
74433 if (global_diff[i])
74434- atomic_long_add(global_diff[i], &vm_stat[i]);
74435+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
74436 }
74437
74438 #endif
74439@@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
74440 start_cpu_timer(cpu);
74441 #endif
74442 #ifdef CONFIG_PROC_FS
74443- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
74444- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
74445- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
74446- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
74447+ {
74448+ mode_t gr_mode = S_IRUGO;
74449+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74450+ gr_mode = S_IRUSR;
74451+#endif
74452+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
74453+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
74454+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74455+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
74456+#else
74457+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
74458+#endif
74459+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
74460+ }
74461 #endif
74462 return 0;
74463 }
74464diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
74465index efea35b..9c8dd0b 100644
74466--- a/net/8021q/vlan.c
74467+++ b/net/8021q/vlan.c
74468@@ -554,8 +554,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
74469 err = -EPERM;
74470 if (!capable(CAP_NET_ADMIN))
74471 break;
74472- if ((args.u.name_type >= 0) &&
74473- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
74474+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
74475 struct vlan_net *vn;
74476
74477 vn = net_generic(net, vlan_net_id);
74478diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
74479index fccae26..e7ece2f 100644
74480--- a/net/9p/trans_fd.c
74481+++ b/net/9p/trans_fd.c
74482@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
74483 oldfs = get_fs();
74484 set_fs(get_ds());
74485 /* The cast to a user pointer is valid due to the set_fs() */
74486- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
74487+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
74488 set_fs(oldfs);
74489
74490 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
74491diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
74492index 876fbe8..8bbea9f 100644
74493--- a/net/atm/atm_misc.c
74494+++ b/net/atm/atm_misc.c
74495@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
74496 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
74497 return 1;
74498 atm_return(vcc, truesize);
74499- atomic_inc(&vcc->stats->rx_drop);
74500+ atomic_inc_unchecked(&vcc->stats->rx_drop);
74501 return 0;
74502 }
74503 EXPORT_SYMBOL(atm_charge);
74504@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
74505 }
74506 }
74507 atm_return(vcc, guess);
74508- atomic_inc(&vcc->stats->rx_drop);
74509+ atomic_inc_unchecked(&vcc->stats->rx_drop);
74510 return NULL;
74511 }
74512 EXPORT_SYMBOL(atm_alloc_charge);
74513@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
74514
74515 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
74516 {
74517-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74518+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74519 __SONET_ITEMS
74520 #undef __HANDLE_ITEM
74521 }
74522@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
74523
74524 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
74525 {
74526-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
74527+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
74528 __SONET_ITEMS
74529 #undef __HANDLE_ITEM
74530 }
74531diff --git a/net/atm/lec.h b/net/atm/lec.h
74532index dfc0719..47c5322 100644
74533--- a/net/atm/lec.h
74534+++ b/net/atm/lec.h
74535@@ -48,7 +48,7 @@ struct lane2_ops {
74536 const u8 *tlvs, u32 sizeoftlvs);
74537 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
74538 const u8 *tlvs, u32 sizeoftlvs);
74539-};
74540+} __no_const;
74541
74542 /*
74543 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
74544diff --git a/net/atm/mpc.h b/net/atm/mpc.h
74545index 0919a88..a23d54e 100644
74546--- a/net/atm/mpc.h
74547+++ b/net/atm/mpc.h
74548@@ -33,7 +33,7 @@ struct mpoa_client {
74549 struct mpc_parameters parameters; /* parameters for this client */
74550
74551 const struct net_device_ops *old_ops;
74552- struct net_device_ops new_ops;
74553+ net_device_ops_no_const new_ops;
74554 };
74555
74556
74557diff --git a/net/atm/proc.c b/net/atm/proc.c
74558index 0d020de..011c7bb 100644
74559--- a/net/atm/proc.c
74560+++ b/net/atm/proc.c
74561@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
74562 const struct k_atm_aal_stats *stats)
74563 {
74564 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
74565- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
74566- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
74567- atomic_read(&stats->rx_drop));
74568+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
74569+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
74570+ atomic_read_unchecked(&stats->rx_drop));
74571 }
74572
74573 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
74574diff --git a/net/atm/resources.c b/net/atm/resources.c
74575index 23f45ce..c748f1a 100644
74576--- a/net/atm/resources.c
74577+++ b/net/atm/resources.c
74578@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
74579 static void copy_aal_stats(struct k_atm_aal_stats *from,
74580 struct atm_aal_stats *to)
74581 {
74582-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74583+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74584 __AAL_STAT_ITEMS
74585 #undef __HANDLE_ITEM
74586 }
74587@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
74588 static void subtract_aal_stats(struct k_atm_aal_stats *from,
74589 struct atm_aal_stats *to)
74590 {
74591-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
74592+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
74593 __AAL_STAT_ITEMS
74594 #undef __HANDLE_ITEM
74595 }
74596diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
74597index a6d5d63..1cc6c2b 100644
74598--- a/net/batman-adv/bat_iv_ogm.c
74599+++ b/net/batman-adv/bat_iv_ogm.c
74600@@ -539,7 +539,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
74601
74602 /* change sequence number to network order */
74603 batman_ogm_packet->seqno =
74604- htonl((uint32_t)atomic_read(&hard_iface->seqno));
74605+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
74606
74607 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
74608 batman_ogm_packet->tt_crc = htons((uint16_t)
74609@@ -559,7 +559,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
74610 else
74611 batman_ogm_packet->gw_flags = NO_FLAGS;
74612
74613- atomic_inc(&hard_iface->seqno);
74614+ atomic_inc_unchecked(&hard_iface->seqno);
74615
74616 slide_own_bcast_window(hard_iface);
74617 bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
74618@@ -917,7 +917,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
74619 return;
74620
74621 /* could be changed by schedule_own_packet() */
74622- if_incoming_seqno = atomic_read(&if_incoming->seqno);
74623+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
74624
74625 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
74626
74627diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
74628index 3778977..f6a9450 100644
74629--- a/net/batman-adv/hard-interface.c
74630+++ b/net/batman-adv/hard-interface.c
74631@@ -328,8 +328,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
74632 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
74633 dev_add_pack(&hard_iface->batman_adv_ptype);
74634
74635- atomic_set(&hard_iface->seqno, 1);
74636- atomic_set(&hard_iface->frag_seqno, 1);
74637+ atomic_set_unchecked(&hard_iface->seqno, 1);
74638+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
74639 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
74640 hard_iface->net_dev->name);
74641
74642diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
74643index a5590f4..8d31969 100644
74644--- a/net/batman-adv/soft-interface.c
74645+++ b/net/batman-adv/soft-interface.c
74646@@ -645,7 +645,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
74647
74648 /* set broadcast sequence number */
74649 bcast_packet->seqno =
74650- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
74651+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
74652
74653 add_bcast_packet_to_list(bat_priv, skb, 1);
74654
74655@@ -841,7 +841,7 @@ struct net_device *softif_create(const char *name)
74656 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
74657
74658 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
74659- atomic_set(&bat_priv->bcast_seqno, 1);
74660+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
74661 atomic_set(&bat_priv->ttvn, 0);
74662 atomic_set(&bat_priv->tt_local_changes, 0);
74663 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
74664diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
74665index 302efb5..1590365 100644
74666--- a/net/batman-adv/types.h
74667+++ b/net/batman-adv/types.h
74668@@ -38,8 +38,8 @@ struct hard_iface {
74669 int16_t if_num;
74670 char if_status;
74671 struct net_device *net_dev;
74672- atomic_t seqno;
74673- atomic_t frag_seqno;
74674+ atomic_unchecked_t seqno;
74675+ atomic_unchecked_t frag_seqno;
74676 unsigned char *packet_buff;
74677 int packet_len;
74678 struct kobject *hardif_obj;
74679@@ -155,7 +155,7 @@ struct bat_priv {
74680 atomic_t orig_interval; /* uint */
74681 atomic_t hop_penalty; /* uint */
74682 atomic_t log_level; /* uint */
74683- atomic_t bcast_seqno;
74684+ atomic_unchecked_t bcast_seqno;
74685 atomic_t bcast_queue_left;
74686 atomic_t batman_queue_left;
74687 atomic_t ttvn; /* translation table version number */
74688diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
74689index 676f6a6..3b4e668 100644
74690--- a/net/batman-adv/unicast.c
74691+++ b/net/batman-adv/unicast.c
74692@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
74693 frag1->flags = UNI_FRAG_HEAD | large_tail;
74694 frag2->flags = large_tail;
74695
74696- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
74697+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
74698 frag1->seqno = htons(seqno - 1);
74699 frag2->seqno = htons(seqno);
74700
74701diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
74702index 5238b6b..c9798ce 100644
74703--- a/net/bluetooth/hci_conn.c
74704+++ b/net/bluetooth/hci_conn.c
74705@@ -233,7 +233,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
74706 memset(&cp, 0, sizeof(cp));
74707
74708 cp.handle = cpu_to_le16(conn->handle);
74709- memcpy(cp.ltk, ltk, sizeof(ltk));
74710+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
74711
74712 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
74713 }
74714diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
74715index 6f9c25b..d19fd66 100644
74716--- a/net/bluetooth/l2cap_core.c
74717+++ b/net/bluetooth/l2cap_core.c
74718@@ -2466,8 +2466,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
74719 break;
74720
74721 case L2CAP_CONF_RFC:
74722- if (olen == sizeof(rfc))
74723- memcpy(&rfc, (void *)val, olen);
74724+ if (olen != sizeof(rfc))
74725+ break;
74726+
74727+ memcpy(&rfc, (void *)val, olen);
74728
74729 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
74730 rfc.mode != chan->mode)
74731@@ -2585,8 +2587,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
74732
74733 switch (type) {
74734 case L2CAP_CONF_RFC:
74735- if (olen == sizeof(rfc))
74736- memcpy(&rfc, (void *)val, olen);
74737+ if (olen != sizeof(rfc))
74738+ break;
74739+
74740+ memcpy(&rfc, (void *)val, olen);
74741 goto done;
74742 }
74743 }
74744diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
74745index 5fe2ff3..10968b5 100644
74746--- a/net/bridge/netfilter/ebtables.c
74747+++ b/net/bridge/netfilter/ebtables.c
74748@@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
74749 tmp.valid_hooks = t->table->valid_hooks;
74750 }
74751 mutex_unlock(&ebt_mutex);
74752- if (copy_to_user(user, &tmp, *len) != 0){
74753+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
74754 BUGPRINT("c2u Didn't work\n");
74755 ret = -EFAULT;
74756 break;
74757diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
74758index aa6f716..7bf4c21 100644
74759--- a/net/caif/caif_dev.c
74760+++ b/net/caif/caif_dev.c
74761@@ -562,9 +562,9 @@ static int __init caif_device_init(void)
74762
74763 static void __exit caif_device_exit(void)
74764 {
74765- unregister_pernet_subsys(&caif_net_ops);
74766 unregister_netdevice_notifier(&caif_device_notifier);
74767 dev_remove_pack(&caif_packet_type);
74768+ unregister_pernet_subsys(&caif_net_ops);
74769 }
74770
74771 module_init(caif_device_init);
74772diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
74773index 5cf5222..6f704ad 100644
74774--- a/net/caif/cfctrl.c
74775+++ b/net/caif/cfctrl.c
74776@@ -9,6 +9,7 @@
74777 #include <linux/stddef.h>
74778 #include <linux/spinlock.h>
74779 #include <linux/slab.h>
74780+#include <linux/sched.h>
74781 #include <net/caif/caif_layer.h>
74782 #include <net/caif/cfpkt.h>
74783 #include <net/caif/cfctrl.h>
74784@@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
74785 memset(&dev_info, 0, sizeof(dev_info));
74786 dev_info.id = 0xff;
74787 cfsrvl_init(&this->serv, 0, &dev_info, false);
74788- atomic_set(&this->req_seq_no, 1);
74789- atomic_set(&this->rsp_seq_no, 1);
74790+ atomic_set_unchecked(&this->req_seq_no, 1);
74791+ atomic_set_unchecked(&this->rsp_seq_no, 1);
74792 this->serv.layer.receive = cfctrl_recv;
74793 sprintf(this->serv.layer.name, "ctrl");
74794 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
74795@@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
74796 struct cfctrl_request_info *req)
74797 {
74798 spin_lock_bh(&ctrl->info_list_lock);
74799- atomic_inc(&ctrl->req_seq_no);
74800- req->sequence_no = atomic_read(&ctrl->req_seq_no);
74801+ atomic_inc_unchecked(&ctrl->req_seq_no);
74802+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
74803 list_add_tail(&req->list, &ctrl->list);
74804 spin_unlock_bh(&ctrl->info_list_lock);
74805 }
74806@@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
74807 if (p != first)
74808 pr_warn("Requests are not received in order\n");
74809
74810- atomic_set(&ctrl->rsp_seq_no,
74811+ atomic_set_unchecked(&ctrl->rsp_seq_no,
74812 p->sequence_no);
74813 list_del(&p->list);
74814 goto out;
74815diff --git a/net/can/gw.c b/net/can/gw.c
74816index 3d79b12..8de85fa 100644
74817--- a/net/can/gw.c
74818+++ b/net/can/gw.c
74819@@ -96,7 +96,7 @@ struct cf_mod {
74820 struct {
74821 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
74822 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
74823- } csumfunc;
74824+ } __no_const csumfunc;
74825 };
74826
74827
74828diff --git a/net/compat.c b/net/compat.c
74829index e055708..3f80795 100644
74830--- a/net/compat.c
74831+++ b/net/compat.c
74832@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
74833 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
74834 __get_user(kmsg->msg_flags, &umsg->msg_flags))
74835 return -EFAULT;
74836- kmsg->msg_name = compat_ptr(tmp1);
74837- kmsg->msg_iov = compat_ptr(tmp2);
74838- kmsg->msg_control = compat_ptr(tmp3);
74839+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
74840+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
74841+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
74842 return 0;
74843 }
74844
74845@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74846
74847 if (kern_msg->msg_namelen) {
74848 if (mode == VERIFY_READ) {
74849- int err = move_addr_to_kernel(kern_msg->msg_name,
74850+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
74851 kern_msg->msg_namelen,
74852 kern_address);
74853 if (err < 0)
74854@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74855 kern_msg->msg_name = NULL;
74856
74857 tot_len = iov_from_user_compat_to_kern(kern_iov,
74858- (struct compat_iovec __user *)kern_msg->msg_iov,
74859+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
74860 kern_msg->msg_iovlen);
74861 if (tot_len >= 0)
74862 kern_msg->msg_iov = kern_iov;
74863@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74864
74865 #define CMSG_COMPAT_FIRSTHDR(msg) \
74866 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
74867- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
74868+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
74869 (struct compat_cmsghdr __user *)NULL)
74870
74871 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
74872 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
74873 (ucmlen) <= (unsigned long) \
74874 ((mhdr)->msg_controllen - \
74875- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
74876+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
74877
74878 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
74879 struct compat_cmsghdr __user *cmsg, int cmsg_len)
74880 {
74881 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
74882- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
74883+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
74884 msg->msg_controllen)
74885 return NULL;
74886 return (struct compat_cmsghdr __user *)ptr;
74887@@ -219,7 +219,7 @@ Efault:
74888
74889 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
74890 {
74891- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74892+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74893 struct compat_cmsghdr cmhdr;
74894 int cmlen;
74895
74896@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
74897
74898 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
74899 {
74900- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74901+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74902 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
74903 int fdnum = scm->fp->count;
74904 struct file **fp = scm->fp->fp;
74905@@ -372,7 +372,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
74906 return -EFAULT;
74907 old_fs = get_fs();
74908 set_fs(KERNEL_DS);
74909- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
74910+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
74911 set_fs(old_fs);
74912
74913 return err;
74914@@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
74915 len = sizeof(ktime);
74916 old_fs = get_fs();
74917 set_fs(KERNEL_DS);
74918- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
74919+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
74920 set_fs(old_fs);
74921
74922 if (!err) {
74923@@ -576,7 +576,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74924 case MCAST_JOIN_GROUP:
74925 case MCAST_LEAVE_GROUP:
74926 {
74927- struct compat_group_req __user *gr32 = (void *)optval;
74928+ struct compat_group_req __user *gr32 = (void __user *)optval;
74929 struct group_req __user *kgr =
74930 compat_alloc_user_space(sizeof(struct group_req));
74931 u32 interface;
74932@@ -597,7 +597,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74933 case MCAST_BLOCK_SOURCE:
74934 case MCAST_UNBLOCK_SOURCE:
74935 {
74936- struct compat_group_source_req __user *gsr32 = (void *)optval;
74937+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
74938 struct group_source_req __user *kgsr = compat_alloc_user_space(
74939 sizeof(struct group_source_req));
74940 u32 interface;
74941@@ -618,7 +618,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74942 }
74943 case MCAST_MSFILTER:
74944 {
74945- struct compat_group_filter __user *gf32 = (void *)optval;
74946+ struct compat_group_filter __user *gf32 = (void __user *)optval;
74947 struct group_filter __user *kgf;
74948 u32 interface, fmode, numsrc;
74949
74950@@ -656,7 +656,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
74951 char __user *optval, int __user *optlen,
74952 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
74953 {
74954- struct compat_group_filter __user *gf32 = (void *)optval;
74955+ struct compat_group_filter __user *gf32 = (void __user *)optval;
74956 struct group_filter __user *kgf;
74957 int __user *koptlen;
74958 u32 interface, fmode, numsrc;
74959diff --git a/net/core/datagram.c b/net/core/datagram.c
74960index e4fbfd6..6a6ac94 100644
74961--- a/net/core/datagram.c
74962+++ b/net/core/datagram.c
74963@@ -290,7 +290,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
74964 }
74965
74966 kfree_skb(skb);
74967- atomic_inc(&sk->sk_drops);
74968+ atomic_inc_unchecked(&sk->sk_drops);
74969 sk_mem_reclaim_partial(sk);
74970
74971 return err;
74972diff --git a/net/core/dev.c b/net/core/dev.c
74973index 533c586..f78a55f 100644
74974--- a/net/core/dev.c
74975+++ b/net/core/dev.c
74976@@ -1136,9 +1136,13 @@ void dev_load(struct net *net, const char *name)
74977 if (no_module && capable(CAP_NET_ADMIN))
74978 no_module = request_module("netdev-%s", name);
74979 if (no_module && capable(CAP_SYS_MODULE)) {
74980+#ifdef CONFIG_GRKERNSEC_MODHARDEN
74981+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
74982+#else
74983 if (!request_module("%s", name))
74984 pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
74985 name);
74986+#endif
74987 }
74988 }
74989 EXPORT_SYMBOL(dev_load);
74990@@ -1602,7 +1606,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74991 {
74992 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
74993 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
74994- atomic_long_inc(&dev->rx_dropped);
74995+ atomic_long_inc_unchecked(&dev->rx_dropped);
74996 kfree_skb(skb);
74997 return NET_RX_DROP;
74998 }
74999@@ -1612,7 +1616,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
75000 nf_reset(skb);
75001
75002 if (unlikely(!is_skb_forwardable(dev, skb))) {
75003- atomic_long_inc(&dev->rx_dropped);
75004+ atomic_long_inc_unchecked(&dev->rx_dropped);
75005 kfree_skb(skb);
75006 return NET_RX_DROP;
75007 }
75008@@ -2042,7 +2046,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
75009
75010 struct dev_gso_cb {
75011 void (*destructor)(struct sk_buff *skb);
75012-};
75013+} __no_const;
75014
75015 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
75016
75017@@ -2877,7 +2881,7 @@ enqueue:
75018
75019 local_irq_restore(flags);
75020
75021- atomic_long_inc(&skb->dev->rx_dropped);
75022+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
75023 kfree_skb(skb);
75024 return NET_RX_DROP;
75025 }
75026@@ -2949,7 +2953,7 @@ int netif_rx_ni(struct sk_buff *skb)
75027 }
75028 EXPORT_SYMBOL(netif_rx_ni);
75029
75030-static void net_tx_action(struct softirq_action *h)
75031+static void net_tx_action(void)
75032 {
75033 struct softnet_data *sd = &__get_cpu_var(softnet_data);
75034
75035@@ -3237,7 +3241,7 @@ ncls:
75036 if (pt_prev) {
75037 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
75038 } else {
75039- atomic_long_inc(&skb->dev->rx_dropped);
75040+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
75041 kfree_skb(skb);
75042 /* Jamal, now you will not able to escape explaining
75043 * me how you were going to use this. :-)
75044@@ -3797,7 +3801,7 @@ void netif_napi_del(struct napi_struct *napi)
75045 }
75046 EXPORT_SYMBOL(netif_napi_del);
75047
75048-static void net_rx_action(struct softirq_action *h)
75049+static void net_rx_action(void)
75050 {
75051 struct softnet_data *sd = &__get_cpu_var(softnet_data);
75052 unsigned long time_limit = jiffies + 2;
75053@@ -4267,8 +4271,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
75054 else
75055 seq_printf(seq, "%04x", ntohs(pt->type));
75056
75057+#ifdef CONFIG_GRKERNSEC_HIDESYM
75058+ seq_printf(seq, " %-8s %p\n",
75059+ pt->dev ? pt->dev->name : "", NULL);
75060+#else
75061 seq_printf(seq, " %-8s %pF\n",
75062 pt->dev ? pt->dev->name : "", pt->func);
75063+#endif
75064 }
75065
75066 return 0;
75067@@ -5818,7 +5827,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
75068 } else {
75069 netdev_stats_to_stats64(storage, &dev->stats);
75070 }
75071- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
75072+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
75073 return storage;
75074 }
75075 EXPORT_SYMBOL(dev_get_stats);
75076diff --git a/net/core/flow.c b/net/core/flow.c
75077index e318c7e..168b1d0 100644
75078--- a/net/core/flow.c
75079+++ b/net/core/flow.c
75080@@ -61,7 +61,7 @@ struct flow_cache {
75081 struct timer_list rnd_timer;
75082 };
75083
75084-atomic_t flow_cache_genid = ATOMIC_INIT(0);
75085+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
75086 EXPORT_SYMBOL(flow_cache_genid);
75087 static struct flow_cache flow_cache_global;
75088 static struct kmem_cache *flow_cachep __read_mostly;
75089@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
75090
75091 static int flow_entry_valid(struct flow_cache_entry *fle)
75092 {
75093- if (atomic_read(&flow_cache_genid) != fle->genid)
75094+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
75095 return 0;
75096 if (fle->object && !fle->object->ops->check(fle->object))
75097 return 0;
75098@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
75099 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
75100 fcp->hash_count++;
75101 }
75102- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
75103+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
75104 flo = fle->object;
75105 if (!flo)
75106 goto ret_object;
75107@@ -280,7 +280,7 @@ nocache:
75108 }
75109 flo = resolver(net, key, family, dir, flo, ctx);
75110 if (fle) {
75111- fle->genid = atomic_read(&flow_cache_genid);
75112+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
75113 if (!IS_ERR(flo))
75114 fle->object = flo;
75115 else
75116diff --git a/net/core/iovec.c b/net/core/iovec.c
75117index 7e7aeb0..2a998cb 100644
75118--- a/net/core/iovec.c
75119+++ b/net/core/iovec.c
75120@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
75121 if (m->msg_namelen) {
75122 if (mode == VERIFY_READ) {
75123 void __user *namep;
75124- namep = (void __user __force *) m->msg_name;
75125+ namep = (void __force_user *) m->msg_name;
75126 err = move_addr_to_kernel(namep, m->msg_namelen,
75127 address);
75128 if (err < 0)
75129@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
75130 }
75131
75132 size = m->msg_iovlen * sizeof(struct iovec);
75133- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
75134+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
75135 return -EFAULT;
75136
75137 m->msg_iov = iov;
75138diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
75139index 90430b7..0032ec0 100644
75140--- a/net/core/rtnetlink.c
75141+++ b/net/core/rtnetlink.c
75142@@ -56,7 +56,7 @@ struct rtnl_link {
75143 rtnl_doit_func doit;
75144 rtnl_dumpit_func dumpit;
75145 rtnl_calcit_func calcit;
75146-};
75147+} __no_const;
75148
75149 static DEFINE_MUTEX(rtnl_mutex);
75150
75151diff --git a/net/core/scm.c b/net/core/scm.c
75152index 611c5ef..88f6d6d 100644
75153--- a/net/core/scm.c
75154+++ b/net/core/scm.c
75155@@ -219,7 +219,7 @@ EXPORT_SYMBOL(__scm_send);
75156 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
75157 {
75158 struct cmsghdr __user *cm
75159- = (__force struct cmsghdr __user *)msg->msg_control;
75160+ = (struct cmsghdr __force_user *)msg->msg_control;
75161 struct cmsghdr cmhdr;
75162 int cmlen = CMSG_LEN(len);
75163 int err;
75164@@ -242,7 +242,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
75165 err = -EFAULT;
75166 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
75167 goto out;
75168- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
75169+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
75170 goto out;
75171 cmlen = CMSG_SPACE(len);
75172 if (msg->msg_controllen < cmlen)
75173@@ -258,7 +258,7 @@ EXPORT_SYMBOL(put_cmsg);
75174 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
75175 {
75176 struct cmsghdr __user *cm
75177- = (__force struct cmsghdr __user*)msg->msg_control;
75178+ = (struct cmsghdr __force_user *)msg->msg_control;
75179
75180 int fdmax = 0;
75181 int fdnum = scm->fp->count;
75182@@ -278,7 +278,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
75183 if (fdnum < fdmax)
75184 fdmax = fdnum;
75185
75186- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
75187+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
75188 i++, cmfptr++)
75189 {
75190 int new_fd;
75191diff --git a/net/core/sock.c b/net/core/sock.c
75192index 0f8402e..f0b6338 100644
75193--- a/net/core/sock.c
75194+++ b/net/core/sock.c
75195@@ -340,7 +340,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75196 struct sk_buff_head *list = &sk->sk_receive_queue;
75197
75198 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
75199- atomic_inc(&sk->sk_drops);
75200+ atomic_inc_unchecked(&sk->sk_drops);
75201 trace_sock_rcvqueue_full(sk, skb);
75202 return -ENOMEM;
75203 }
75204@@ -350,7 +350,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75205 return err;
75206
75207 if (!sk_rmem_schedule(sk, skb->truesize)) {
75208- atomic_inc(&sk->sk_drops);
75209+ atomic_inc_unchecked(&sk->sk_drops);
75210 return -ENOBUFS;
75211 }
75212
75213@@ -370,7 +370,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75214 skb_dst_force(skb);
75215
75216 spin_lock_irqsave(&list->lock, flags);
75217- skb->dropcount = atomic_read(&sk->sk_drops);
75218+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
75219 __skb_queue_tail(list, skb);
75220 spin_unlock_irqrestore(&list->lock, flags);
75221
75222@@ -390,7 +390,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
75223 skb->dev = NULL;
75224
75225 if (sk_rcvqueues_full(sk, skb)) {
75226- atomic_inc(&sk->sk_drops);
75227+ atomic_inc_unchecked(&sk->sk_drops);
75228 goto discard_and_relse;
75229 }
75230 if (nested)
75231@@ -408,7 +408,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
75232 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
75233 } else if (sk_add_backlog(sk, skb)) {
75234 bh_unlock_sock(sk);
75235- atomic_inc(&sk->sk_drops);
75236+ atomic_inc_unchecked(&sk->sk_drops);
75237 goto discard_and_relse;
75238 }
75239
75240@@ -984,7 +984,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75241 if (len > sizeof(peercred))
75242 len = sizeof(peercred);
75243 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
75244- if (copy_to_user(optval, &peercred, len))
75245+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
75246 return -EFAULT;
75247 goto lenout;
75248 }
75249@@ -997,7 +997,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75250 return -ENOTCONN;
75251 if (lv < len)
75252 return -EINVAL;
75253- if (copy_to_user(optval, address, len))
75254+ if (len > sizeof(address) || copy_to_user(optval, address, len))
75255 return -EFAULT;
75256 goto lenout;
75257 }
75258@@ -1043,7 +1043,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75259
75260 if (len > lv)
75261 len = lv;
75262- if (copy_to_user(optval, &v, len))
75263+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
75264 return -EFAULT;
75265 lenout:
75266 if (put_user(len, optlen))
75267@@ -2131,7 +2131,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
75268 */
75269 smp_wmb();
75270 atomic_set(&sk->sk_refcnt, 1);
75271- atomic_set(&sk->sk_drops, 0);
75272+ atomic_set_unchecked(&sk->sk_drops, 0);
75273 }
75274 EXPORT_SYMBOL(sock_init_data);
75275
75276diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
75277index b9868e1..849f809 100644
75278--- a/net/core/sock_diag.c
75279+++ b/net/core/sock_diag.c
75280@@ -16,20 +16,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
75281
75282 int sock_diag_check_cookie(void *sk, __u32 *cookie)
75283 {
75284+#ifndef CONFIG_GRKERNSEC_HIDESYM
75285 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
75286 cookie[1] != INET_DIAG_NOCOOKIE) &&
75287 ((u32)(unsigned long)sk != cookie[0] ||
75288 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
75289 return -ESTALE;
75290 else
75291+#endif
75292 return 0;
75293 }
75294 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
75295
75296 void sock_diag_save_cookie(void *sk, __u32 *cookie)
75297 {
75298+#ifdef CONFIG_GRKERNSEC_HIDESYM
75299+ cookie[0] = 0;
75300+ cookie[1] = 0;
75301+#else
75302 cookie[0] = (u32)(unsigned long)sk;
75303 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
75304+#endif
75305 }
75306 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
75307
75308diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
75309index 02e75d1..9a57a7c 100644
75310--- a/net/decnet/sysctl_net_decnet.c
75311+++ b/net/decnet/sysctl_net_decnet.c
75312@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
75313
75314 if (len > *lenp) len = *lenp;
75315
75316- if (copy_to_user(buffer, addr, len))
75317+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
75318 return -EFAULT;
75319
75320 *lenp = len;
75321@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
75322
75323 if (len > *lenp) len = *lenp;
75324
75325- if (copy_to_user(buffer, devname, len))
75326+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
75327 return -EFAULT;
75328
75329 *lenp = len;
75330diff --git a/net/econet/Kconfig b/net/econet/Kconfig
75331index 39a2d29..f39c0fe 100644
75332--- a/net/econet/Kconfig
75333+++ b/net/econet/Kconfig
75334@@ -4,7 +4,7 @@
75335
75336 config ECONET
75337 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
75338- depends on EXPERIMENTAL && INET
75339+ depends on EXPERIMENTAL && INET && BROKEN
75340 ---help---
75341 Econet is a fairly old and slow networking protocol mainly used by
75342 Acorn computers to access file and print servers. It uses native
75343diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
75344index c48adc5..667c1d4 100644
75345--- a/net/ipv4/cipso_ipv4.c
75346+++ b/net/ipv4/cipso_ipv4.c
75347@@ -1725,8 +1725,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
75348 case CIPSO_V4_TAG_LOCAL:
75349 /* This is a non-standard tag that we only allow for
75350 * local connections, so if the incoming interface is
75351- * not the loopback device drop the packet. */
75352- if (!(skb->dev->flags & IFF_LOOPBACK)) {
75353+ * not the loopback device drop the packet. Further,
75354+ * there is no legitimate reason for setting this from
75355+ * userspace so reject it if skb is NULL. */
75356+ if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) {
75357 err_offset = opt_iter;
75358 goto validate_return_locked;
75359 }
75360diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
75361index cbe3a68..a879b75 100644
75362--- a/net/ipv4/fib_frontend.c
75363+++ b/net/ipv4/fib_frontend.c
75364@@ -969,12 +969,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
75365 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75366 fib_sync_up(dev);
75367 #endif
75368- atomic_inc(&net->ipv4.dev_addr_genid);
75369+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75370 rt_cache_flush(dev_net(dev), -1);
75371 break;
75372 case NETDEV_DOWN:
75373 fib_del_ifaddr(ifa, NULL);
75374- atomic_inc(&net->ipv4.dev_addr_genid);
75375+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75376 if (ifa->ifa_dev->ifa_list == NULL) {
75377 /* Last address was deleted from this interface.
75378 * Disable IP.
75379@@ -1010,7 +1010,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
75380 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75381 fib_sync_up(dev);
75382 #endif
75383- atomic_inc(&net->ipv4.dev_addr_genid);
75384+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75385 rt_cache_flush(dev_net(dev), -1);
75386 break;
75387 case NETDEV_DOWN:
75388diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
75389index 8861f91..ab1e3c1 100644
75390--- a/net/ipv4/fib_semantics.c
75391+++ b/net/ipv4/fib_semantics.c
75392@@ -698,7 +698,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
75393 nh->nh_saddr = inet_select_addr(nh->nh_dev,
75394 nh->nh_gw,
75395 nh->nh_parent->fib_scope);
75396- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
75397+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
75398
75399 return nh->nh_saddr;
75400 }
75401diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
75402index 984ec65..97ac518 100644
75403--- a/net/ipv4/inet_hashtables.c
75404+++ b/net/ipv4/inet_hashtables.c
75405@@ -18,12 +18,15 @@
75406 #include <linux/sched.h>
75407 #include <linux/slab.h>
75408 #include <linux/wait.h>
75409+#include <linux/security.h>
75410
75411 #include <net/inet_connection_sock.h>
75412 #include <net/inet_hashtables.h>
75413 #include <net/secure_seq.h>
75414 #include <net/ip.h>
75415
75416+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
75417+
75418 /*
75419 * Allocate and initialize a new local port bind bucket.
75420 * The bindhash mutex for snum's hash chain must be held here.
75421@@ -530,6 +533,8 @@ ok:
75422 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
75423 spin_unlock(&head->lock);
75424
75425+ gr_update_task_in_ip_table(current, inet_sk(sk));
75426+
75427 if (tw) {
75428 inet_twsk_deschedule(tw, death_row);
75429 while (twrefcnt) {
75430diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
75431index dfba343..c827d50 100644
75432--- a/net/ipv4/inetpeer.c
75433+++ b/net/ipv4/inetpeer.c
75434@@ -487,8 +487,8 @@ relookup:
75435 if (p) {
75436 p->daddr = *daddr;
75437 atomic_set(&p->refcnt, 1);
75438- atomic_set(&p->rid, 0);
75439- atomic_set(&p->ip_id_count,
75440+ atomic_set_unchecked(&p->rid, 0);
75441+ atomic_set_unchecked(&p->ip_id_count,
75442 (daddr->family == AF_INET) ?
75443 secure_ip_id(daddr->addr.a4) :
75444 secure_ipv6_id(daddr->addr.a6));
75445diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
75446index 3727e23..517f5df 100644
75447--- a/net/ipv4/ip_fragment.c
75448+++ b/net/ipv4/ip_fragment.c
75449@@ -318,7 +318,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
75450 return 0;
75451
75452 start = qp->rid;
75453- end = atomic_inc_return(&peer->rid);
75454+ end = atomic_inc_return_unchecked(&peer->rid);
75455 qp->rid = end;
75456
75457 rc = qp->q.fragments && (end - start) > max;
75458diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
75459index 2fd0fba..83fac99 100644
75460--- a/net/ipv4/ip_sockglue.c
75461+++ b/net/ipv4/ip_sockglue.c
75462@@ -1137,7 +1137,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
75463 len = min_t(unsigned int, len, opt->optlen);
75464 if (put_user(len, optlen))
75465 return -EFAULT;
75466- if (copy_to_user(optval, opt->__data, len))
75467+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
75468+ copy_to_user(optval, opt->__data, len))
75469 return -EFAULT;
75470 return 0;
75471 }
75472@@ -1268,7 +1269,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
75473 if (sk->sk_type != SOCK_STREAM)
75474 return -ENOPROTOOPT;
75475
75476- msg.msg_control = optval;
75477+ msg.msg_control = (void __force_kernel *)optval;
75478 msg.msg_controllen = len;
75479 msg.msg_flags = flags;
75480
75481diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
75482index 92ac7e7..13f93d9 100644
75483--- a/net/ipv4/ipconfig.c
75484+++ b/net/ipv4/ipconfig.c
75485@@ -321,7 +321,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
75486
75487 mm_segment_t oldfs = get_fs();
75488 set_fs(get_ds());
75489- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75490+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75491 set_fs(oldfs);
75492 return res;
75493 }
75494@@ -332,7 +332,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
75495
75496 mm_segment_t oldfs = get_fs();
75497 set_fs(get_ds());
75498- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75499+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75500 set_fs(oldfs);
75501 return res;
75502 }
75503@@ -343,7 +343,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
75504
75505 mm_segment_t oldfs = get_fs();
75506 set_fs(get_ds());
75507- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
75508+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
75509 set_fs(oldfs);
75510 return res;
75511 }
75512diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
75513index 50009c7..5996a9f 100644
75514--- a/net/ipv4/ping.c
75515+++ b/net/ipv4/ping.c
75516@@ -838,7 +838,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
75517 sk_rmem_alloc_get(sp),
75518 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75519 atomic_read(&sp->sk_refcnt), sp,
75520- atomic_read(&sp->sk_drops), len);
75521+ atomic_read_unchecked(&sp->sk_drops), len);
75522 }
75523
75524 static int ping_seq_show(struct seq_file *seq, void *v)
75525diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
75526index bbd604c..4d5469c 100644
75527--- a/net/ipv4/raw.c
75528+++ b/net/ipv4/raw.c
75529@@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
75530 int raw_rcv(struct sock *sk, struct sk_buff *skb)
75531 {
75532 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
75533- atomic_inc(&sk->sk_drops);
75534+ atomic_inc_unchecked(&sk->sk_drops);
75535 kfree_skb(skb);
75536 return NET_RX_DROP;
75537 }
75538@@ -740,16 +740,20 @@ static int raw_init(struct sock *sk)
75539
75540 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
75541 {
75542+ struct icmp_filter filter;
75543+
75544 if (optlen > sizeof(struct icmp_filter))
75545 optlen = sizeof(struct icmp_filter);
75546- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
75547+ if (copy_from_user(&filter, optval, optlen))
75548 return -EFAULT;
75549+ raw_sk(sk)->filter = filter;
75550 return 0;
75551 }
75552
75553 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
75554 {
75555 int len, ret = -EFAULT;
75556+ struct icmp_filter filter;
75557
75558 if (get_user(len, optlen))
75559 goto out;
75560@@ -759,8 +763,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
75561 if (len > sizeof(struct icmp_filter))
75562 len = sizeof(struct icmp_filter);
75563 ret = -EFAULT;
75564- if (put_user(len, optlen) ||
75565- copy_to_user(optval, &raw_sk(sk)->filter, len))
75566+ filter = raw_sk(sk)->filter;
75567+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
75568 goto out;
75569 ret = 0;
75570 out: return ret;
75571@@ -988,7 +992,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
75572 sk_wmem_alloc_get(sp),
75573 sk_rmem_alloc_get(sp),
75574 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75575- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
75576+ atomic_read(&sp->sk_refcnt),
75577+#ifdef CONFIG_GRKERNSEC_HIDESYM
75578+ NULL,
75579+#else
75580+ sp,
75581+#endif
75582+ atomic_read_unchecked(&sp->sk_drops));
75583 }
75584
75585 static int raw_seq_show(struct seq_file *seq, void *v)
75586diff --git a/net/ipv4/route.c b/net/ipv4/route.c
75587index 167ea10..4b15883 100644
75588--- a/net/ipv4/route.c
75589+++ b/net/ipv4/route.c
75590@@ -312,7 +312,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
75591
75592 static inline int rt_genid(struct net *net)
75593 {
75594- return atomic_read(&net->ipv4.rt_genid);
75595+ return atomic_read_unchecked(&net->ipv4.rt_genid);
75596 }
75597
75598 #ifdef CONFIG_PROC_FS
75599@@ -936,7 +936,7 @@ static void rt_cache_invalidate(struct net *net)
75600 unsigned char shuffle;
75601
75602 get_random_bytes(&shuffle, sizeof(shuffle));
75603- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
75604+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
75605 inetpeer_invalidate_tree(AF_INET);
75606 }
75607
75608@@ -3009,7 +3009,7 @@ static int rt_fill_info(struct net *net,
75609 error = rt->dst.error;
75610 if (peer) {
75611 inet_peer_refcheck(rt->peer);
75612- id = atomic_read(&peer->ip_id_count) & 0xffff;
75613+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
75614 if (peer->tcp_ts_stamp) {
75615 ts = peer->tcp_ts;
75616 tsage = get_seconds() - peer->tcp_ts_stamp;
75617diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
75618index 0cb86ce..8e7fda8 100644
75619--- a/net/ipv4/tcp_ipv4.c
75620+++ b/net/ipv4/tcp_ipv4.c
75621@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
75622 EXPORT_SYMBOL(sysctl_tcp_low_latency);
75623
75624
75625+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75626+extern int grsec_enable_blackhole;
75627+#endif
75628+
75629 #ifdef CONFIG_TCP_MD5SIG
75630 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
75631 __be32 daddr, __be32 saddr, const struct tcphdr *th);
75632@@ -1641,6 +1645,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
75633 return 0;
75634
75635 reset:
75636+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75637+ if (!grsec_enable_blackhole)
75638+#endif
75639 tcp_v4_send_reset(rsk, skb);
75640 discard:
75641 kfree_skb(skb);
75642@@ -1703,12 +1710,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
75643 TCP_SKB_CB(skb)->sacked = 0;
75644
75645 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75646- if (!sk)
75647+ if (!sk) {
75648+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75649+ ret = 1;
75650+#endif
75651 goto no_tcp_socket;
75652-
75653+ }
75654 process:
75655- if (sk->sk_state == TCP_TIME_WAIT)
75656+ if (sk->sk_state == TCP_TIME_WAIT) {
75657+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75658+ ret = 2;
75659+#endif
75660 goto do_time_wait;
75661+ }
75662
75663 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
75664 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
75665@@ -1758,6 +1772,10 @@ no_tcp_socket:
75666 bad_packet:
75667 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75668 } else {
75669+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75670+ if (!grsec_enable_blackhole || (ret == 1 &&
75671+ (skb->dev->flags & IFF_LOOPBACK)))
75672+#endif
75673 tcp_v4_send_reset(NULL, skb);
75674 }
75675
75676@@ -2419,7 +2437,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
75677 0, /* non standard timer */
75678 0, /* open_requests have no inode */
75679 atomic_read(&sk->sk_refcnt),
75680+#ifdef CONFIG_GRKERNSEC_HIDESYM
75681+ NULL,
75682+#else
75683 req,
75684+#endif
75685 len);
75686 }
75687
75688@@ -2469,7 +2491,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
75689 sock_i_uid(sk),
75690 icsk->icsk_probes_out,
75691 sock_i_ino(sk),
75692- atomic_read(&sk->sk_refcnt), sk,
75693+ atomic_read(&sk->sk_refcnt),
75694+#ifdef CONFIG_GRKERNSEC_HIDESYM
75695+ NULL,
75696+#else
75697+ sk,
75698+#endif
75699 jiffies_to_clock_t(icsk->icsk_rto),
75700 jiffies_to_clock_t(icsk->icsk_ack.ato),
75701 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
75702@@ -2497,7 +2524,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
75703 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
75704 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
75705 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75706- atomic_read(&tw->tw_refcnt), tw, len);
75707+ atomic_read(&tw->tw_refcnt),
75708+#ifdef CONFIG_GRKERNSEC_HIDESYM
75709+ NULL,
75710+#else
75711+ tw,
75712+#endif
75713+ len);
75714 }
75715
75716 #define TMPSZ 150
75717diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
75718index 3cabafb..640525b 100644
75719--- a/net/ipv4/tcp_minisocks.c
75720+++ b/net/ipv4/tcp_minisocks.c
75721@@ -27,6 +27,10 @@
75722 #include <net/inet_common.h>
75723 #include <net/xfrm.h>
75724
75725+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75726+extern int grsec_enable_blackhole;
75727+#endif
75728+
75729 int sysctl_tcp_syncookies __read_mostly = 1;
75730 EXPORT_SYMBOL(sysctl_tcp_syncookies);
75731
75732@@ -753,6 +757,10 @@ listen_overflow:
75733
75734 embryonic_reset:
75735 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
75736+
75737+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75738+ if (!grsec_enable_blackhole)
75739+#endif
75740 if (!(flg & TCP_FLAG_RST))
75741 req->rsk_ops->send_reset(sk, skb);
75742
75743diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
75744index a981cdc..48f4c3a 100644
75745--- a/net/ipv4/tcp_probe.c
75746+++ b/net/ipv4/tcp_probe.c
75747@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
75748 if (cnt + width >= len)
75749 break;
75750
75751- if (copy_to_user(buf + cnt, tbuf, width))
75752+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
75753 return -EFAULT;
75754 cnt += width;
75755 }
75756diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
75757index 34d4a02..3b57f86 100644
75758--- a/net/ipv4/tcp_timer.c
75759+++ b/net/ipv4/tcp_timer.c
75760@@ -22,6 +22,10 @@
75761 #include <linux/gfp.h>
75762 #include <net/tcp.h>
75763
75764+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75765+extern int grsec_lastack_retries;
75766+#endif
75767+
75768 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
75769 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
75770 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
75771@@ -196,6 +200,13 @@ static int tcp_write_timeout(struct sock *sk)
75772 }
75773 }
75774
75775+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75776+ if ((sk->sk_state == TCP_LAST_ACK) &&
75777+ (grsec_lastack_retries > 0) &&
75778+ (grsec_lastack_retries < retry_until))
75779+ retry_until = grsec_lastack_retries;
75780+#endif
75781+
75782 if (retransmits_timed_out(sk, retry_until,
75783 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
75784 /* Has it gone just too far? */
75785diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
75786index fe14105..0618260 100644
75787--- a/net/ipv4/udp.c
75788+++ b/net/ipv4/udp.c
75789@@ -87,6 +87,7 @@
75790 #include <linux/types.h>
75791 #include <linux/fcntl.h>
75792 #include <linux/module.h>
75793+#include <linux/security.h>
75794 #include <linux/socket.h>
75795 #include <linux/sockios.h>
75796 #include <linux/igmp.h>
75797@@ -109,6 +110,10 @@
75798 #include <trace/events/udp.h>
75799 #include "udp_impl.h"
75800
75801+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75802+extern int grsec_enable_blackhole;
75803+#endif
75804+
75805 struct udp_table udp_table __read_mostly;
75806 EXPORT_SYMBOL(udp_table);
75807
75808@@ -567,6 +572,9 @@ found:
75809 return s;
75810 }
75811
75812+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
75813+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
75814+
75815 /*
75816 * This routine is called by the ICMP module when it gets some
75817 * sort of error condition. If err < 0 then the socket should
75818@@ -858,9 +866,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
75819 dport = usin->sin_port;
75820 if (dport == 0)
75821 return -EINVAL;
75822+
75823+ err = gr_search_udp_sendmsg(sk, usin);
75824+ if (err)
75825+ return err;
75826 } else {
75827 if (sk->sk_state != TCP_ESTABLISHED)
75828 return -EDESTADDRREQ;
75829+
75830+ err = gr_search_udp_sendmsg(sk, NULL);
75831+ if (err)
75832+ return err;
75833+
75834 daddr = inet->inet_daddr;
75835 dport = inet->inet_dport;
75836 /* Open fast path for connected socket.
75837@@ -1102,7 +1119,7 @@ static unsigned int first_packet_length(struct sock *sk)
75838 udp_lib_checksum_complete(skb)) {
75839 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75840 IS_UDPLITE(sk));
75841- atomic_inc(&sk->sk_drops);
75842+ atomic_inc_unchecked(&sk->sk_drops);
75843 __skb_unlink(skb, rcvq);
75844 __skb_queue_tail(&list_kill, skb);
75845 }
75846@@ -1188,6 +1205,10 @@ try_again:
75847 if (!skb)
75848 goto out;
75849
75850+ err = gr_search_udp_recvmsg(sk, skb);
75851+ if (err)
75852+ goto out_free;
75853+
75854 ulen = skb->len - sizeof(struct udphdr);
75855 copied = len;
75856 if (copied > ulen)
75857@@ -1489,7 +1510,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75858
75859 drop:
75860 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75861- atomic_inc(&sk->sk_drops);
75862+ atomic_inc_unchecked(&sk->sk_drops);
75863 kfree_skb(skb);
75864 return -1;
75865 }
75866@@ -1508,7 +1529,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
75867 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
75868
75869 if (!skb1) {
75870- atomic_inc(&sk->sk_drops);
75871+ atomic_inc_unchecked(&sk->sk_drops);
75872 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
75873 IS_UDPLITE(sk));
75874 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75875@@ -1677,6 +1698,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75876 goto csum_error;
75877
75878 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
75879+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75880+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75881+#endif
75882 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
75883
75884 /*
75885@@ -2094,8 +2118,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
75886 sk_wmem_alloc_get(sp),
75887 sk_rmem_alloc_get(sp),
75888 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75889- atomic_read(&sp->sk_refcnt), sp,
75890- atomic_read(&sp->sk_drops), len);
75891+ atomic_read(&sp->sk_refcnt),
75892+#ifdef CONFIG_GRKERNSEC_HIDESYM
75893+ NULL,
75894+#else
75895+ sp,
75896+#endif
75897+ atomic_read_unchecked(&sp->sk_drops), len);
75898 }
75899
75900 int udp4_seq_show(struct seq_file *seq, void *v)
75901diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
75902index 7d5cb97..c56564f 100644
75903--- a/net/ipv6/addrconf.c
75904+++ b/net/ipv6/addrconf.c
75905@@ -2142,7 +2142,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
75906 p.iph.ihl = 5;
75907 p.iph.protocol = IPPROTO_IPV6;
75908 p.iph.ttl = 64;
75909- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
75910+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
75911
75912 if (ops->ndo_do_ioctl) {
75913 mm_segment_t oldfs = get_fs();
75914diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
75915index 02dd203..e03fcc9 100644
75916--- a/net/ipv6/inet6_connection_sock.c
75917+++ b/net/ipv6/inet6_connection_sock.c
75918@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
75919 #ifdef CONFIG_XFRM
75920 {
75921 struct rt6_info *rt = (struct rt6_info *)dst;
75922- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
75923+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
75924 }
75925 #endif
75926 }
75927@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
75928 #ifdef CONFIG_XFRM
75929 if (dst) {
75930 struct rt6_info *rt = (struct rt6_info *)dst;
75931- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
75932+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
75933 __sk_dst_reset(sk);
75934 dst = NULL;
75935 }
75936diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
75937index 63dd1f8..e7f53ca 100644
75938--- a/net/ipv6/ipv6_sockglue.c
75939+++ b/net/ipv6/ipv6_sockglue.c
75940@@ -990,7 +990,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
75941 if (sk->sk_type != SOCK_STREAM)
75942 return -ENOPROTOOPT;
75943
75944- msg.msg_control = optval;
75945+ msg.msg_control = (void __force_kernel *)optval;
75946 msg.msg_controllen = len;
75947 msg.msg_flags = flags;
75948
75949diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
75950index 5bddea7..82d9d67 100644
75951--- a/net/ipv6/raw.c
75952+++ b/net/ipv6/raw.c
75953@@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
75954 {
75955 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
75956 skb_checksum_complete(skb)) {
75957- atomic_inc(&sk->sk_drops);
75958+ atomic_inc_unchecked(&sk->sk_drops);
75959 kfree_skb(skb);
75960 return NET_RX_DROP;
75961 }
75962@@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75963 struct raw6_sock *rp = raw6_sk(sk);
75964
75965 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
75966- atomic_inc(&sk->sk_drops);
75967+ atomic_inc_unchecked(&sk->sk_drops);
75968 kfree_skb(skb);
75969 return NET_RX_DROP;
75970 }
75971@@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75972
75973 if (inet->hdrincl) {
75974 if (skb_checksum_complete(skb)) {
75975- atomic_inc(&sk->sk_drops);
75976+ atomic_inc_unchecked(&sk->sk_drops);
75977 kfree_skb(skb);
75978 return NET_RX_DROP;
75979 }
75980@@ -602,7 +602,7 @@ out:
75981 return err;
75982 }
75983
75984-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
75985+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
75986 struct flowi6 *fl6, struct dst_entry **dstp,
75987 unsigned int flags)
75988 {
75989@@ -914,12 +914,15 @@ do_confirm:
75990 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
75991 char __user *optval, int optlen)
75992 {
75993+ struct icmp6_filter filter;
75994+
75995 switch (optname) {
75996 case ICMPV6_FILTER:
75997 if (optlen > sizeof(struct icmp6_filter))
75998 optlen = sizeof(struct icmp6_filter);
75999- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
76000+ if (copy_from_user(&filter, optval, optlen))
76001 return -EFAULT;
76002+ raw6_sk(sk)->filter = filter;
76003 return 0;
76004 default:
76005 return -ENOPROTOOPT;
76006@@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
76007 char __user *optval, int __user *optlen)
76008 {
76009 int len;
76010+ struct icmp6_filter filter;
76011
76012 switch (optname) {
76013 case ICMPV6_FILTER:
76014@@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
76015 len = sizeof(struct icmp6_filter);
76016 if (put_user(len, optlen))
76017 return -EFAULT;
76018- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
76019+ filter = raw6_sk(sk)->filter;
76020+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
76021 return -EFAULT;
76022 return 0;
76023 default:
76024@@ -1250,7 +1255,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
76025 0, 0L, 0,
76026 sock_i_uid(sp), 0,
76027 sock_i_ino(sp),
76028- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
76029+ atomic_read(&sp->sk_refcnt),
76030+#ifdef CONFIG_GRKERNSEC_HIDESYM
76031+ NULL,
76032+#else
76033+ sp,
76034+#endif
76035+ atomic_read_unchecked(&sp->sk_drops));
76036 }
76037
76038 static int raw6_seq_show(struct seq_file *seq, void *v)
76039diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
76040index 98256cf..7f16dbd 100644
76041--- a/net/ipv6/tcp_ipv6.c
76042+++ b/net/ipv6/tcp_ipv6.c
76043@@ -94,6 +94,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
76044 }
76045 #endif
76046
76047+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76048+extern int grsec_enable_blackhole;
76049+#endif
76050+
76051 static void tcp_v6_hash(struct sock *sk)
76052 {
76053 if (sk->sk_state != TCP_CLOSE) {
76054@@ -1542,6 +1546,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
76055 return 0;
76056
76057 reset:
76058+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76059+ if (!grsec_enable_blackhole)
76060+#endif
76061 tcp_v6_send_reset(sk, skb);
76062 discard:
76063 if (opt_skb)
76064@@ -1623,12 +1630,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
76065 TCP_SKB_CB(skb)->sacked = 0;
76066
76067 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
76068- if (!sk)
76069+ if (!sk) {
76070+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76071+ ret = 1;
76072+#endif
76073 goto no_tcp_socket;
76074+ }
76075
76076 process:
76077- if (sk->sk_state == TCP_TIME_WAIT)
76078+ if (sk->sk_state == TCP_TIME_WAIT) {
76079+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76080+ ret = 2;
76081+#endif
76082 goto do_time_wait;
76083+ }
76084
76085 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
76086 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
76087@@ -1676,6 +1691,10 @@ no_tcp_socket:
76088 bad_packet:
76089 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
76090 } else {
76091+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76092+ if (!grsec_enable_blackhole || (ret == 1 &&
76093+ (skb->dev->flags & IFF_LOOPBACK)))
76094+#endif
76095 tcp_v6_send_reset(NULL, skb);
76096 }
76097
76098@@ -1930,7 +1949,13 @@ static void get_openreq6(struct seq_file *seq,
76099 uid,
76100 0, /* non standard timer */
76101 0, /* open_requests have no inode */
76102- 0, req);
76103+ 0,
76104+#ifdef CONFIG_GRKERNSEC_HIDESYM
76105+ NULL
76106+#else
76107+ req
76108+#endif
76109+ );
76110 }
76111
76112 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
76113@@ -1980,7 +2005,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
76114 sock_i_uid(sp),
76115 icsk->icsk_probes_out,
76116 sock_i_ino(sp),
76117- atomic_read(&sp->sk_refcnt), sp,
76118+ atomic_read(&sp->sk_refcnt),
76119+#ifdef CONFIG_GRKERNSEC_HIDESYM
76120+ NULL,
76121+#else
76122+ sp,
76123+#endif
76124 jiffies_to_clock_t(icsk->icsk_rto),
76125 jiffies_to_clock_t(icsk->icsk_ack.ato),
76126 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
76127@@ -2015,7 +2045,13 @@ static void get_timewait6_sock(struct seq_file *seq,
76128 dest->s6_addr32[2], dest->s6_addr32[3], destp,
76129 tw->tw_substate, 0, 0,
76130 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
76131- atomic_read(&tw->tw_refcnt), tw);
76132+ atomic_read(&tw->tw_refcnt),
76133+#ifdef CONFIG_GRKERNSEC_HIDESYM
76134+ NULL
76135+#else
76136+ tw
76137+#endif
76138+ );
76139 }
76140
76141 static int tcp6_seq_show(struct seq_file *seq, void *v)
76142diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
76143index 37b0699..d323408 100644
76144--- a/net/ipv6/udp.c
76145+++ b/net/ipv6/udp.c
76146@@ -50,6 +50,10 @@
76147 #include <linux/seq_file.h>
76148 #include "udp_impl.h"
76149
76150+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76151+extern int grsec_enable_blackhole;
76152+#endif
76153+
76154 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
76155 {
76156 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
76157@@ -551,7 +555,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
76158
76159 return 0;
76160 drop:
76161- atomic_inc(&sk->sk_drops);
76162+ atomic_inc_unchecked(&sk->sk_drops);
76163 drop_no_sk_drops_inc:
76164 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
76165 kfree_skb(skb);
76166@@ -627,7 +631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
76167 continue;
76168 }
76169 drop:
76170- atomic_inc(&sk->sk_drops);
76171+ atomic_inc_unchecked(&sk->sk_drops);
76172 UDP6_INC_STATS_BH(sock_net(sk),
76173 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
76174 UDP6_INC_STATS_BH(sock_net(sk),
76175@@ -782,6 +786,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76176 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
76177 proto == IPPROTO_UDPLITE);
76178
76179+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76180+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
76181+#endif
76182 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
76183
76184 kfree_skb(skb);
76185@@ -798,7 +805,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76186 if (!sock_owned_by_user(sk))
76187 udpv6_queue_rcv_skb(sk, skb);
76188 else if (sk_add_backlog(sk, skb)) {
76189- atomic_inc(&sk->sk_drops);
76190+ atomic_inc_unchecked(&sk->sk_drops);
76191 bh_unlock_sock(sk);
76192 sock_put(sk);
76193 goto discard;
76194@@ -1411,8 +1418,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
76195 0, 0L, 0,
76196 sock_i_uid(sp), 0,
76197 sock_i_ino(sp),
76198- atomic_read(&sp->sk_refcnt), sp,
76199- atomic_read(&sp->sk_drops));
76200+ atomic_read(&sp->sk_refcnt),
76201+#ifdef CONFIG_GRKERNSEC_HIDESYM
76202+ NULL,
76203+#else
76204+ sp,
76205+#endif
76206+ atomic_read_unchecked(&sp->sk_drops));
76207 }
76208
76209 int udp6_seq_show(struct seq_file *seq, void *v)
76210diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
76211index 6b9d5a0..4dffaf1 100644
76212--- a/net/irda/ircomm/ircomm_tty.c
76213+++ b/net/irda/ircomm/ircomm_tty.c
76214@@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76215 add_wait_queue(&self->open_wait, &wait);
76216
76217 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
76218- __FILE__,__LINE__, tty->driver->name, self->open_count );
76219+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
76220
76221 /* As far as I can see, we protect open_count - Jean II */
76222 spin_lock_irqsave(&self->spinlock, flags);
76223 if (!tty_hung_up_p(filp)) {
76224 extra_count = 1;
76225- self->open_count--;
76226+ local_dec(&self->open_count);
76227 }
76228 spin_unlock_irqrestore(&self->spinlock, flags);
76229- self->blocked_open++;
76230+ local_inc(&self->blocked_open);
76231
76232 while (1) {
76233 if (tty->termios->c_cflag & CBAUD) {
76234@@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76235 }
76236
76237 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
76238- __FILE__,__LINE__, tty->driver->name, self->open_count );
76239+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
76240
76241 schedule();
76242 }
76243@@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76244 if (extra_count) {
76245 /* ++ is not atomic, so this should be protected - Jean II */
76246 spin_lock_irqsave(&self->spinlock, flags);
76247- self->open_count++;
76248+ local_inc(&self->open_count);
76249 spin_unlock_irqrestore(&self->spinlock, flags);
76250 }
76251- self->blocked_open--;
76252+ local_dec(&self->blocked_open);
76253
76254 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
76255- __FILE__,__LINE__, tty->driver->name, self->open_count);
76256+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
76257
76258 if (!retval)
76259 self->flags |= ASYNC_NORMAL_ACTIVE;
76260@@ -412,14 +412,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
76261 }
76262 /* ++ is not atomic, so this should be protected - Jean II */
76263 spin_lock_irqsave(&self->spinlock, flags);
76264- self->open_count++;
76265+ local_inc(&self->open_count);
76266
76267 tty->driver_data = self;
76268 self->tty = tty;
76269 spin_unlock_irqrestore(&self->spinlock, flags);
76270
76271 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
76272- self->line, self->open_count);
76273+ self->line, local_read(&self->open_count));
76274
76275 /* Not really used by us, but lets do it anyway */
76276 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
76277@@ -505,7 +505,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76278 return;
76279 }
76280
76281- if ((tty->count == 1) && (self->open_count != 1)) {
76282+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
76283 /*
76284 * Uh, oh. tty->count is 1, which means that the tty
76285 * structure will be freed. state->count should always
76286@@ -515,16 +515,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76287 */
76288 IRDA_DEBUG(0, "%s(), bad serial port count; "
76289 "tty->count is 1, state->count is %d\n", __func__ ,
76290- self->open_count);
76291- self->open_count = 1;
76292+ local_read(&self->open_count));
76293+ local_set(&self->open_count, 1);
76294 }
76295
76296- if (--self->open_count < 0) {
76297+ if (local_dec_return(&self->open_count) < 0) {
76298 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
76299- __func__, self->line, self->open_count);
76300- self->open_count = 0;
76301+ __func__, self->line, local_read(&self->open_count));
76302+ local_set(&self->open_count, 0);
76303 }
76304- if (self->open_count) {
76305+ if (local_read(&self->open_count)) {
76306 spin_unlock_irqrestore(&self->spinlock, flags);
76307
76308 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
76309@@ -556,7 +556,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76310 tty->closing = 0;
76311 self->tty = NULL;
76312
76313- if (self->blocked_open) {
76314+ if (local_read(&self->blocked_open)) {
76315 if (self->close_delay)
76316 schedule_timeout_interruptible(self->close_delay);
76317 wake_up_interruptible(&self->open_wait);
76318@@ -1008,7 +1008,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
76319 spin_lock_irqsave(&self->spinlock, flags);
76320 self->flags &= ~ASYNC_NORMAL_ACTIVE;
76321 self->tty = NULL;
76322- self->open_count = 0;
76323+ local_set(&self->open_count, 0);
76324 spin_unlock_irqrestore(&self->spinlock, flags);
76325
76326 wake_up_interruptible(&self->open_wait);
76327@@ -1355,7 +1355,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
76328 seq_putc(m, '\n');
76329
76330 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
76331- seq_printf(m, "Open count: %d\n", self->open_count);
76332+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
76333 seq_printf(m, "Max data size: %d\n", self->max_data_size);
76334 seq_printf(m, "Max header size: %d\n", self->max_header_size);
76335
76336diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
76337index cd6f7a9..e63fe89 100644
76338--- a/net/iucv/af_iucv.c
76339+++ b/net/iucv/af_iucv.c
76340@@ -782,10 +782,10 @@ static int iucv_sock_autobind(struct sock *sk)
76341
76342 write_lock_bh(&iucv_sk_list.lock);
76343
76344- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
76345+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76346 while (__iucv_get_sock_by_name(name)) {
76347 sprintf(name, "%08x",
76348- atomic_inc_return(&iucv_sk_list.autobind_name));
76349+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76350 }
76351
76352 write_unlock_bh(&iucv_sk_list.lock);
76353diff --git a/net/key/af_key.c b/net/key/af_key.c
76354index 7e5d927..cdbb54e 100644
76355--- a/net/key/af_key.c
76356+++ b/net/key/af_key.c
76357@@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
76358 static u32 get_acqseq(void)
76359 {
76360 u32 res;
76361- static atomic_t acqseq;
76362+ static atomic_unchecked_t acqseq;
76363
76364 do {
76365- res = atomic_inc_return(&acqseq);
76366+ res = atomic_inc_return_unchecked(&acqseq);
76367 } while (!res);
76368 return res;
76369 }
76370diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
76371index db8fae5..ff070cd 100644
76372--- a/net/mac80211/ieee80211_i.h
76373+++ b/net/mac80211/ieee80211_i.h
76374@@ -28,6 +28,7 @@
76375 #include <net/ieee80211_radiotap.h>
76376 #include <net/cfg80211.h>
76377 #include <net/mac80211.h>
76378+#include <asm/local.h>
76379 #include "key.h"
76380 #include "sta_info.h"
76381
76382@@ -842,7 +843,7 @@ struct ieee80211_local {
76383 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
76384 spinlock_t queue_stop_reason_lock;
76385
76386- int open_count;
76387+ local_t open_count;
76388 int monitors, cooked_mntrs;
76389 /* number of interfaces with corresponding FIF_ flags */
76390 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
76391diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
76392index 48f937e..4ccd7b8 100644
76393--- a/net/mac80211/iface.c
76394+++ b/net/mac80211/iface.c
76395@@ -222,7 +222,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76396 break;
76397 }
76398
76399- if (local->open_count == 0) {
76400+ if (local_read(&local->open_count) == 0) {
76401 res = drv_start(local);
76402 if (res)
76403 goto err_del_bss;
76404@@ -246,7 +246,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76405 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
76406
76407 if (!is_valid_ether_addr(dev->dev_addr)) {
76408- if (!local->open_count)
76409+ if (!local_read(&local->open_count))
76410 drv_stop(local);
76411 return -EADDRNOTAVAIL;
76412 }
76413@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76414 mutex_unlock(&local->mtx);
76415
76416 if (coming_up)
76417- local->open_count++;
76418+ local_inc(&local->open_count);
76419
76420 if (hw_reconf_flags)
76421 ieee80211_hw_config(local, hw_reconf_flags);
76422@@ -360,7 +360,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76423 err_del_interface:
76424 drv_remove_interface(local, sdata);
76425 err_stop:
76426- if (!local->open_count)
76427+ if (!local_read(&local->open_count))
76428 drv_stop(local);
76429 err_del_bss:
76430 sdata->bss = NULL;
76431@@ -491,7 +491,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
76432 }
76433
76434 if (going_down)
76435- local->open_count--;
76436+ local_dec(&local->open_count);
76437
76438 switch (sdata->vif.type) {
76439 case NL80211_IFTYPE_AP_VLAN:
76440@@ -562,7 +562,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
76441
76442 ieee80211_recalc_ps(local, -1);
76443
76444- if (local->open_count == 0) {
76445+ if (local_read(&local->open_count) == 0) {
76446 if (local->ops->napi_poll)
76447 napi_disable(&local->napi);
76448 ieee80211_clear_tx_pending(local);
76449diff --git a/net/mac80211/main.c b/net/mac80211/main.c
76450index 1633648..d45ebfa 100644
76451--- a/net/mac80211/main.c
76452+++ b/net/mac80211/main.c
76453@@ -164,7 +164,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
76454 local->hw.conf.power_level = power;
76455 }
76456
76457- if (changed && local->open_count) {
76458+ if (changed && local_read(&local->open_count)) {
76459 ret = drv_config(local, changed);
76460 /*
76461 * Goal:
76462diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
76463index ef8eba1..5c63952 100644
76464--- a/net/mac80211/pm.c
76465+++ b/net/mac80211/pm.c
76466@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76467 struct ieee80211_sub_if_data *sdata;
76468 struct sta_info *sta;
76469
76470- if (!local->open_count)
76471+ if (!local_read(&local->open_count))
76472 goto suspend;
76473
76474 ieee80211_scan_cancel(local);
76475@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76476 cancel_work_sync(&local->dynamic_ps_enable_work);
76477 del_timer_sync(&local->dynamic_ps_timer);
76478
76479- local->wowlan = wowlan && local->open_count;
76480+ local->wowlan = wowlan && local_read(&local->open_count);
76481 if (local->wowlan) {
76482 int err = drv_suspend(local, wowlan);
76483 if (err < 0) {
76484@@ -128,7 +128,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76485 }
76486
76487 /* stop hardware - this must stop RX */
76488- if (local->open_count)
76489+ if (local_read(&local->open_count))
76490 ieee80211_stop_device(local);
76491
76492 suspend:
76493diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
76494index 3313c11..bec9f17 100644
76495--- a/net/mac80211/rate.c
76496+++ b/net/mac80211/rate.c
76497@@ -494,7 +494,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
76498
76499 ASSERT_RTNL();
76500
76501- if (local->open_count)
76502+ if (local_read(&local->open_count))
76503 return -EBUSY;
76504
76505 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
76506diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
76507index c97a065..ff61928 100644
76508--- a/net/mac80211/rc80211_pid_debugfs.c
76509+++ b/net/mac80211/rc80211_pid_debugfs.c
76510@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
76511
76512 spin_unlock_irqrestore(&events->lock, status);
76513
76514- if (copy_to_user(buf, pb, p))
76515+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
76516 return -EFAULT;
76517
76518 return p;
76519diff --git a/net/mac80211/util.c b/net/mac80211/util.c
76520index eb9d7c0..d34b832 100644
76521--- a/net/mac80211/util.c
76522+++ b/net/mac80211/util.c
76523@@ -1179,7 +1179,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
76524 }
76525 #endif
76526 /* everything else happens only if HW was up & running */
76527- if (!local->open_count)
76528+ if (!local_read(&local->open_count))
76529 goto wake_up;
76530
76531 /*
76532diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
76533index 0c6f67e..d02cdfc 100644
76534--- a/net/netfilter/Kconfig
76535+++ b/net/netfilter/Kconfig
76536@@ -836,6 +836,16 @@ config NETFILTER_XT_MATCH_ESP
76537
76538 To compile it as a module, choose M here. If unsure, say N.
76539
76540+config NETFILTER_XT_MATCH_GRADM
76541+ tristate '"gradm" match support'
76542+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
76543+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
76544+ ---help---
76545+ The gradm match allows to match on grsecurity RBAC being enabled.
76546+ It is useful when iptables rules are applied early on bootup to
76547+ prevent connections to the machine (except from a trusted host)
76548+ while the RBAC system is disabled.
76549+
76550 config NETFILTER_XT_MATCH_HASHLIMIT
76551 tristate '"hashlimit" match support'
76552 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
76553diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
76554index ca36765..0882e7c 100644
76555--- a/net/netfilter/Makefile
76556+++ b/net/netfilter/Makefile
76557@@ -86,6 +86,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
76558 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
76559 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
76560 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
76561+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
76562 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
76563 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
76564 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
76565diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
76566index 29fa5ba..8debc79 100644
76567--- a/net/netfilter/ipvs/ip_vs_conn.c
76568+++ b/net/netfilter/ipvs/ip_vs_conn.c
76569@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
76570 /* Increase the refcnt counter of the dest */
76571 atomic_inc(&dest->refcnt);
76572
76573- conn_flags = atomic_read(&dest->conn_flags);
76574+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
76575 if (cp->protocol != IPPROTO_UDP)
76576 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
76577 /* Bind with the destination and its corresponding transmitter */
76578@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
76579 atomic_set(&cp->refcnt, 1);
76580
76581 atomic_set(&cp->n_control, 0);
76582- atomic_set(&cp->in_pkts, 0);
76583+ atomic_set_unchecked(&cp->in_pkts, 0);
76584
76585 atomic_inc(&ipvs->conn_count);
76586 if (flags & IP_VS_CONN_F_NO_CPORT)
76587@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
76588
76589 /* Don't drop the entry if its number of incoming packets is not
76590 located in [0, 8] */
76591- i = atomic_read(&cp->in_pkts);
76592+ i = atomic_read_unchecked(&cp->in_pkts);
76593 if (i > 8 || i < 0) return 0;
76594
76595 if (!todrop_rate[i]) return 0;
76596diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
76597index 00bdb1d..6725a48 100644
76598--- a/net/netfilter/ipvs/ip_vs_core.c
76599+++ b/net/netfilter/ipvs/ip_vs_core.c
76600@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
76601 ret = cp->packet_xmit(skb, cp, pd->pp);
76602 /* do not touch skb anymore */
76603
76604- atomic_inc(&cp->in_pkts);
76605+ atomic_inc_unchecked(&cp->in_pkts);
76606 ip_vs_conn_put(cp);
76607 return ret;
76608 }
76609@@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
76610 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
76611 pkts = sysctl_sync_threshold(ipvs);
76612 else
76613- pkts = atomic_add_return(1, &cp->in_pkts);
76614+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76615
76616 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
76617 cp->protocol == IPPROTO_SCTP) {
76618diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
76619index f558998..7dfb054 100644
76620--- a/net/netfilter/ipvs/ip_vs_ctl.c
76621+++ b/net/netfilter/ipvs/ip_vs_ctl.c
76622@@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
76623 ip_vs_rs_hash(ipvs, dest);
76624 write_unlock_bh(&ipvs->rs_lock);
76625 }
76626- atomic_set(&dest->conn_flags, conn_flags);
76627+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
76628
76629 /* bind the service */
76630 if (!dest->svc) {
76631@@ -1521,11 +1521,12 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
76632 {
76633 struct net_device *dev = ptr;
76634 struct net *net = dev_net(dev);
76635+ struct netns_ipvs *ipvs = net_ipvs(net);
76636 struct ip_vs_service *svc;
76637 struct ip_vs_dest *dest;
76638 unsigned int idx;
76639
76640- if (event != NETDEV_UNREGISTER)
76641+ if (event != NETDEV_UNREGISTER || !ipvs)
76642 return NOTIFY_DONE;
76643 IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
76644 EnterFunction(2);
76645@@ -1551,7 +1552,7 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
76646 }
76647 }
76648
76649- list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) {
76650+ list_for_each_entry(dest, &ipvs->dest_trash, n_list) {
76651 __ip_vs_dev_reset(dest, dev);
76652 }
76653 mutex_unlock(&__ip_vs_mutex);
76654@@ -2028,7 +2029,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
76655 " %-7s %-6d %-10d %-10d\n",
76656 &dest->addr.in6,
76657 ntohs(dest->port),
76658- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
76659+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
76660 atomic_read(&dest->weight),
76661 atomic_read(&dest->activeconns),
76662 atomic_read(&dest->inactconns));
76663@@ -2039,7 +2040,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
76664 "%-7s %-6d %-10d %-10d\n",
76665 ntohl(dest->addr.ip),
76666 ntohs(dest->port),
76667- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
76668+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
76669 atomic_read(&dest->weight),
76670 atomic_read(&dest->activeconns),
76671 atomic_read(&dest->inactconns));
76672@@ -2509,7 +2510,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
76673
76674 entry.addr = dest->addr.ip;
76675 entry.port = dest->port;
76676- entry.conn_flags = atomic_read(&dest->conn_flags);
76677+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
76678 entry.weight = atomic_read(&dest->weight);
76679 entry.u_threshold = dest->u_threshold;
76680 entry.l_threshold = dest->l_threshold;
76681@@ -3042,7 +3043,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
76682 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
76683
76684 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
76685- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76686+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76687 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
76688 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
76689 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
76690diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
76691index 8a0d6d6..90ec197 100644
76692--- a/net/netfilter/ipvs/ip_vs_sync.c
76693+++ b/net/netfilter/ipvs/ip_vs_sync.c
76694@@ -649,7 +649,7 @@ control:
76695 * i.e only increment in_pkts for Templates.
76696 */
76697 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
76698- int pkts = atomic_add_return(1, &cp->in_pkts);
76699+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76700
76701 if (pkts % sysctl_sync_period(ipvs) != 1)
76702 return;
76703@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
76704
76705 if (opt)
76706 memcpy(&cp->in_seq, opt, sizeof(*opt));
76707- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76708+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76709 cp->state = state;
76710 cp->old_state = cp->state;
76711 /*
76712diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
76713index 7fd66de..e6fb361 100644
76714--- a/net/netfilter/ipvs/ip_vs_xmit.c
76715+++ b/net/netfilter/ipvs/ip_vs_xmit.c
76716@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
76717 else
76718 rc = NF_ACCEPT;
76719 /* do not touch skb anymore */
76720- atomic_inc(&cp->in_pkts);
76721+ atomic_inc_unchecked(&cp->in_pkts);
76722 goto out;
76723 }
76724
76725@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
76726 else
76727 rc = NF_ACCEPT;
76728 /* do not touch skb anymore */
76729- atomic_inc(&cp->in_pkts);
76730+ atomic_inc_unchecked(&cp->in_pkts);
76731 goto out;
76732 }
76733
76734diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
76735index 66b2c54..c7884e3 100644
76736--- a/net/netfilter/nfnetlink_log.c
76737+++ b/net/netfilter/nfnetlink_log.c
76738@@ -70,7 +70,7 @@ struct nfulnl_instance {
76739 };
76740
76741 static DEFINE_SPINLOCK(instances_lock);
76742-static atomic_t global_seq;
76743+static atomic_unchecked_t global_seq;
76744
76745 #define INSTANCE_BUCKETS 16
76746 static struct hlist_head instance_table[INSTANCE_BUCKETS];
76747@@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
76748 /* global sequence number */
76749 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
76750 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
76751- htonl(atomic_inc_return(&global_seq)));
76752+ htonl(atomic_inc_return_unchecked(&global_seq)));
76753
76754 if (data_len) {
76755 struct nlattr *nla;
76756diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
76757new file mode 100644
76758index 0000000..6905327
76759--- /dev/null
76760+++ b/net/netfilter/xt_gradm.c
76761@@ -0,0 +1,51 @@
76762+/*
76763+ * gradm match for netfilter
76764